repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
OCA/carrier-delivery | delivery_auto_refresh/tests/__init__.py | Python | agpl-3.0 | 131 | 0 | # | -*- coding: utf-8 -*-
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
from . import test_delivery_auto_refresh
| |
AHAAAAAAA/HackCU2016 | sentiment.py | Python | mit | 2,307 | 0.010837 | """
Sentiment prediction module
"""
import nltk
import numpy as np
from cPickle import load
def get_word_index_array(words, word2index):
u"""
Transform the words into list of int(word index)
Note: Unknown words are dropped
>>> words = [u"I", u"love", u"you", u"RANDOM STUFF"]
>>> word2index = {u"I": 0, u"love": 1, u"you": 2}
>>> get_word_index_array(words, word2index)
[0, 1, 2]
"""
return [word2index[w]
for w in words
if word2index.get(w) is not No | ne # filter out those unknown
]
def pad_sents(sents, padding_token_index):
"""
Pad the sents(in word index form) into same length so they can form a matrix
# 15447
>>> sents = [[1,2,3], [1,2], [1,2,3,4,5]]
>>> pad | _sents(sents, padding_token_index = -1)
[[1, 2, 3, -1, -1], [1, 2, -1, -1, -1], [1, 2, 3, 4, 5]]
"""
max_len_sent = max(sents,
key = lambda sent: len(sent))
max_len = len(max_len_sent)
get_padding = lambda sent: [padding_token_index] * (max_len - len(sent))
padded_sents = [(sent + get_padding(sent))
for sent in sents]
return padded_sents
WORD2INDEX = load(open("data/twitter.pkl"))[3]
PADDING_INDEX = WORD2INDEX[u"<PADDING>"]
from param_util import load_dcnn_model_params
from dcnn import DCNN
params = load_dcnn_model_params("models/filter_widths=8,6,,batch_size=10,,ks=20,8,,fold=1,1,,conv_layer_n=2,,ebd_dm=48,,l2_regs=1e-06,1e-06,1e-06,0.0001,,dr=0.5,0.5,,nkerns=7,12.pkl")
MODEL = DCNN(params)
def sentiment_scores_of_sents(sents):
"""
Predict the sentiment positive scores for a bunch of sentences
>>> sentiment_scores_of_sents([u'simultaneously heart breaking and very funny , the last kiss is really all about performances .', u'( u ) stupid .'])
array([ 0.78528505, 0.0455901 ])
"""
word_indices = [get_word_index_array(nltk.word_tokenize(sent), WORD2INDEX)
for sent in sents]
x = np.asarray(
pad_sents(word_indices, PADDING_INDEX),
dtype = np.int32
)
scores = MODEL._p_y_given_x(x)
return scores[:, 1] # return `positiveness`
def sentiment_score(sent):
"""simple wrapper around the more general case"""
return sentiment_scores_of_sents([sent])[0] |
lutcheti/webtext | src/request/backends/BackendWiki.py | Python | gpl-3.0 | 9,114 | 0.004182 | # -- coding: utf-8 --
###########################################################################
# #
# WebText #
# #
# Lucca Hirschi #
# <lucca.hirschi@ens-lyon.fr> #
# #
# Copyright 2014 Lucca Hirschi #
# #
# This file is part of OwnShare. #
# OwnShare is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published by #
# the Free Software Foundation, either version 3 of the License, or #
# (at your option) any later version. #
# #
# OwnShare is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with OwnShare. If not, see <http://www.gnu.org/licenses/>. #
# #
###########################################################################
from __future__ import unicode_literals # implicitly declaring all strings as unicode strings
import logging
import wikipedia
from mainClass import *
from static import *
# -- Setup Logging --
logging = logging.getLogger(__name__)
CHOSENNUMBER = "numero"
LIST = "liste"
RECHERCHE = "recherche"
FR = "fr"
EN = "en"
FOUND = "Voici tous les titres d'articles que l'on a trouvé: "
NOT_FOUND_LIST = "Nous n'avons pas réussi à trouver l'article le plus pertinent parmis cette liste: "
NOT_FOUND_LIST_CHOOSE = "Vous pouvez maintenant affiner votre recherche."
NOT_FOUND = "Aucun article ne correspond à votre requête. Essayez avec une requête plus petite."
FOUND_LIST_DESCR = "%d articles répondent à votre requête. Voici la liste des %d premiers: "
def wikiSummary(request):
"""Fetch the summary of Wikipadia's articles. """
wikipedia.set_lang("fr") # in French by default
# -- PARSING --
searchText = request.argsList[0]
detailsList = None
chosenNumber = None
onlyResearch = None
# 'LIST' (ask for the list of matched articles)
if LIST in map(lambda s: s.strip().lower(), request.argsList[1:]):
detailsList = True
# 'CHOSENNUMBER i' (ask for the i-nth article of the matched articles)
if CHOSENNUMBER in map(lambda s: s.strip().lower().split()[0], request.argsList[1:]):
chosenNumber = int(filter(lambda s: s.strip().lower().split()[0] == CHOSENNUMBER, request.argsList[1:])[0].split()[1])
#'RECHERCHE' (make a research instead of looking for the summary)
if RECHERCHE in map(lambda s: s.strip().lower(), request.argsList[1:]):
onlyResearch = True
# languages: "fr" or "en" for the moment
if EN in map(lambda s: s.strip().lower(), request.argsList[1:]):
wikipedia.set_lang(EN)
if FR in map(lambda s: s.strip().lower(), request.argsList[1:]):
wikipedia.set_lang(FR)
# -- FECTHING --
max_nb_results = 10
max_nb_searchs = 20
options = None
failSuggest = None
answ = ""
if onlyResearch:
searchs = wikipedia.search(searchText, results = max_nb_searchs)
answ += (FOUND + "[" +
",".join(searchs) + "].")
return(answ)
# safe access to the Wiki'API
try:
# fails if there is an ambiguity
try:
# this does not fail if ther is no ambiguity on the required article
summary = wikipedia.summary(searchText, auto_suggest = False)
suggest = wikipedia.suggest(searchText)
title = suggest if suggest else searchText
except wikipedia.exceptions.DisambiguationError as e:
nbOptions = len(e.options)
options = e.options[:(min(len(e.options), max_nb_results))]
# there is an ambiguity -> choose the number-nth
number = chosenNumber if chosenNumber else 1
if len(options) > number - 1:
try:
newSearchText = options[number - 1]
if newSearchText.strip() == searchText.strip() and not(chosenNumber) and len(options) > 1:
newSearchText = options[1]
summary = wikipedia.summary(newSearchText, auto_suggest = False)
title = newSearchText
# In that case, we failed to disambiguate the request
except wikipedia.exceptions.DisambiguationError as e:
failSuggest = True
# results = wikipedia.search(searchtext, results=max_nb_results)
except IOError as e:
logging.error("wikiSummary > wikipedia.search | I/O error({0}): {1}".format(e.errno, e.strerror))
return(MESS_BUG())
# -- ANSWER --
# Fail to resolve ambiguity
if failSuggest:
answ += (NOT_FOUND_LIST +
"[" + ", ".join(options) + "]. " +
NOT_FOUND_LIST_CHOOSE)
return(answ)
# No articles matched the request
if options and (len(options) == 0 or not(len(options) > number - 1)):
answ += NOT_FOUND
return(answ)
# Strictly more than 1 article matches the request
if options and len(options) > 1:
if detailsList:
answ += ((FOUND_LIST_DESCR % (nbOptions, max_nb_results))
+ "[" + ", ".join(options) + "]. ")
if chosenNumber:
answ += ("Voici le %d-ème: " % chosenNumber)
else:
answ += ("Voici le premier: ")
else:
if chosenNumber:
answ += ("%d articles répondent à votre requête. Voici le %d-ème: " % (nbOptions, chosenNumber))
else:
answ += ("%d articles répondent à votre requête. Voici le premier: " % nbOptions)
# Exactly one article matched the request
else:
answ += "Voici le seul article qui répond à votre requête: "
answ += ("[%s] -- " % title) + summary
return(answ)
def likelyCorrect(a):
retur | n(("Amboise" in a or "libertin" in a or "commune" in a) or # Bussy
("Git" in a and "Ruby" in a) or # Github
("Battle" in a or "Sunweb-Napoleon" in a) or # Napoleon
("cathédrale" in a)) | # Sully
class BackendWiki(Backend):
backendName = WIKI # defined in static.py
def answer(self, request, config):
if len(request.argsList) > 0:
return(wikiSummary(request))
else:
return("Vous avez mal tapé votre requête. Rappel: " + self.help())
def test(self, user):
r1 = Request(user, "wiki", ["Bussy"], [], "")
r2 = Request(user, "wiki", ["Bussy", "liste"], [], "")
r2 = Request(user, "wiki", ["Bussy", "recherche"], [], "")
r3 = Request(user, "wiki", ["Bussy", "liste", "numero 3"], [], "")
r4 = Request(user, "wiki", ["Bussy", "numero 3"], [], "")
r5 = Request(user, "wiki", ["Bussy", "en"], [], "")
r6 = Request(user, "wiki", ["Napoleon", "en", "liste"], [], "")
r7 = Request(user, "wiki", ["Napoleon", "recherche"], [], "")
r8 = Request(user, "wiki", ["Github", "fr"], [], "")
r9 = Request(user, "wiki", ["Sully"], [], "")
for r in [r1,r2,r3,r4,r5, r6, r7, r8, r9]:
logging.info("Checking a request [%s]" % r)
a = self.answer(r, {} |
geometalab/geoconverter | GeoConverter/database.py | Python | mit | 4,858 | 0.00597 | class DatabaseRouter(object):
'''
These functions are called when Django accesses the database.
Returns the name of the database to use depending on the app and model.
Returning None means use default.
'''
def db_for_read(self, model, **hints):
return self.__db_for_read_and_write(model, **hints)
def db_for_write(self, model, **hints):
return self.__db_for_read_and_write(model, **hints)
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_syncdb(self, db, model):
'''
Makes sure the correct databases are used when "python manage.py syncdb" is called.
Returning True means "model" should be synchronised with "db".
'''
allow = False
if db == 'default':
allow = model._meta.app_label != 'OGRgeoConverter'
allow = allow and model._meta.app_label != 'sessions'
elif db == 'sessions_db':
allow = model._meta.app_label == 'sessions'
elif db == 'ogrgeoconverter_db':
allow = model._meta.db_table != 'ogrgeoconverter_log_entries'
allow = allow and model._meta.db_table != 'ogrgeoconverter_ogr_log_entries'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_jobs'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_folders'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_file_matches'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_files'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_file_id_tracking'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_urls'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_shell_parameters'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_download_items'
allow = allow and model._meta.db_table != 'ogrgeoconverter_conversion_job_identification'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
elif db == 'ogrgeoconverter_conversion_jobs_db':
allow = model._meta.db_table == 'ogrgeoconverter_conversion_jobs'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_folders'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_matches'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_files'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_id_tracking'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_urls'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_shell_parameters'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_download_items'
allow = allow or model._meta.db_table == 'ogrgeoconverter_conversion_job_identification'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
elif db == 'ogrgeoconverter_log_db':
allow = model._meta.db_table == 'ogrgeoconverter_log_entries'
allow = allow or model._meta.db_table == 'ogrgeoconverter_ogr_log_entries'
allow = allow and model._meta.app_label == 'OGRgeoConverter'
else:
allow = None
return allow
def __db_for_read_and_write(self, model, **hints):
if model._meta.app_label == 'sessions':
return 'sessions_db'
elif model._meta.app_label == 'OGRgeoConverter':
if model._meta.db_table == 'ogrgeoconverter_log_entries' \
or model._meta.db_table == 'ogrgeoconverter_ogr_log_entries':
return 'ogrgeoconverter_log_db'
elif model._meta.db_table == 'ogrgeoconverter_conversion_jobs' \
or model._m | eta.db_table == 'ogrgeoconverter_conversion_job_folders' \
or model._meta.db_table == | 'ogrgeoconverter_conversion_job_file_matches' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_files' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_file_id_tracking' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_urls' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_shell_parameters' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_download_items' \
or model._meta.db_table == 'ogrgeoconverter_conversion_job_identification':
return 'ogrgeoconverter_conversion_jobs_db'
else:
return 'ogrgeoconverter_db'
return None
|
browseinfo/odoo_saas3_nicolas | addons/email_template/wizard/mail_compose_message.py | Python | agpl-3.0 | 12,835 | 0.004753 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010-Today OpenERP SA (<http://www.openerp.com>)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>
#
##############################################################################
from openerp import tools, SUPERUSER_ID
from openerp.osv import osv, fields
def _reopen(self, res_id, model):
return {'type': 'ir.actions.act_window',
'view_mode': 'form',
'view_type': 'form',
'res_id': res_id,
'res_model': self._name,
'target': 'new',
# save original model in context, because selecting the list of available
# templates requires a model in context
'context': {
'default_model': model,
},
}
class mail_compose_message(osv.TransientModel):
_inherit = 'mail.compose.message'
def default_get(self, cr, uid, fields, context=None):
""" Override to pre-fill the data when having a template in single-email mode """
if context is None:
context = {}
res = super(mail_compose_message, self).default_get(cr, uid, fields, context=context)
if res.get('composition_mode') != 'mass_mail' and context.get('default_template_id') and res.get('model') and res.get('res_id'):
res.update(
self.onchange_template_id(
cr, uid, [], context['default_template_id'], res.get('composition_mode'),
res.get('model'), res.get('res_id', context.get('active_id')), context=context
)['value']
)
return res
_columns = {
'template_id': fields.many2one('email.template', 'Use template', select=True),
'partner_to': fields.char('To (Partner IDs)',
help="Comma-separated list of recipient partners ids (placeholders may be used here)"),
'email_to': fields.char('To (Emails)',
help="Comma-separated recipient addresses (placeholders may be used here)",),
'email_cc': fields.char('Cc (Emails)',
help="Carbon copy recipients (placeholders may be used here)"),
}
def send_mail(self, cr, uid, ids, context=None):
""" Override of send_mail to duplicate attachments linked to the email.template.
Indeed, basic mail.compose.message wizard duplicates attachments in mass
mailing mode. But in 'single post' mod | e, attachments of an email template
also have to be duplicated to avoid changing their ownership. """
if context is None:
context = {}
wizard_context = dict(context)
| for wizard in self.browse(cr, uid, ids, context=context):
if wizard.template_id:
wizard_context['mail_notify_user_signature'] = False # template user_signature is added when generating body_html
wizard_context['mail_auto_delete'] = wizard.template_id.auto_delete # mass mailing: use template auto_delete value -> note, for emails mass mailing only
if not wizard.attachment_ids or wizard.composition_mode == 'mass_mail' or not wizard.template_id:
continue
new_attachment_ids = []
for attachment in wizard.attachment_ids:
if attachment in wizard.template_id.attachment_ids:
new_attachment_ids.append(self.pool.get('ir.attachment').copy(cr, uid, attachment.id, {'res_model': 'mail.compose.message', 'res_id': wizard.id}, context=context))
else:
new_attachment_ids.append(attachment.id)
self.write(cr, uid, wizard.id, {'attachment_ids': [(6, 0, new_attachment_ids)]}, context=context)
return super(mail_compose_message, self).send_mail(cr, uid, ids, context=wizard_context)
def onchange_template_id(self, cr, uid, ids, template_id, composition_mode, model, res_id, context=None):
""" - mass_mailing: we cannot render, so return the template values
- normal mode: return rendered values """
if template_id and composition_mode == 'mass_mail':
fields = ['subject', 'body_html', 'email_from', 'email_to', 'partner_to', 'email_cc', 'reply_to']
template = self.pool['email.template'].browse(cr, uid, template_id, context=context)
values = dict((field, getattr(template, field)) for field in fields if getattr(template, field))
if template.attachment_ids:
values['attachment_ids'] = [att.id for att in template.attachment_ids]
if template.mail_server_id:
values['mail_server_id'] = template.mail_server_id.id
if template.user_signature and 'body_html' in values:
signature = self.pool.get('res.users').browse(cr, uid, uid, context).signature
values['body_html'] = tools.append_content_to_html(values['body_html'], signature)
elif template_id:
values = self.generate_email_for_composer_batch(cr, uid, template_id, [res_id], context=context)[res_id]
# transform attachments into attachment_ids; not attached to the document because this will
# be done further in the posting process, allowing to clean database if email not send
values['attachment_ids'] = values.pop('attachment_ids', [])
ir_attach_obj = self.pool.get('ir.attachment')
for attach_fname, attach_datas in values.pop('attachments', []):
data_attach = {
'name': attach_fname,
'datas': attach_datas,
'datas_fname': attach_fname,
'res_model': 'mail.compose.message',
'res_id': 0,
'type': 'binary', # override default_type from context, possibly meant for another model!
}
values['attachment_ids'].append(ir_attach_obj.create(cr, uid, data_attach, context=context))
else:
values = self.default_get(cr, uid, ['subject', 'body', 'email_from', 'email_to', 'email_cc', 'partner_to', 'reply_to', 'attachment_ids', 'mail_server_id'], context=context)
if values.get('body_html'):
values['body'] = values.pop('body_html')
return {'value': values}
def save_as_template(self, cr, uid, ids, context=None):
""" hit save as template button: current form value will be a new
template attached to the current document. """
email_template = self.pool.get('email.template')
ir_model_pool = self.pool.get('ir.model')
for record in self.browse(cr, uid, ids, context=context):
model_ids = ir_model_pool.search(cr, uid, [('model', '=', record.model)], context=context)
model_id = model_ids and model_ids[0] or False
model_name = ''
if model_id:
model_name = ir_model_pool.browse(cr, uid, model_id, context=context).name
template_name = "%s: %s" % (model_name, tools.ustr(record.subject))
values = {
'name': template_name,
'subject': record.subject or False,
'body_html': record.body or False,
'model_id': model_id or False,
'attachment_ids': [(6, 0, [att.id for att in record.attachment_ids])],
}
template_id = email_template.create(cr, uid, values, context=context)
|
it-projects-llc/pos-addons | pos_wechat/__manifest__.py | Python | mit | 1,032 | 0 | # Copyright 2018 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
{
"name": """WeChat Payments in POS""",
"summary": """Support WeChat QR-based payments (scan and show)""",
"category": "Point of Sale",
# "live_test_url": "",
"images": ["images/main.jpg"],
"version": "12.0.1.0.0",
"application": False,
"author": "IT-Projects LLC, Ivan Yelizariev",
"support": "pos@it-projects.info",
"website": "https://apps.odoo.com/apps/modules/12.0/pos_payment_wechat/",
"license": "O | ther OSI approved licence", # MIT
"price": 165.00,
"currency": "EUR",
"depends": [
"wechat",
"pos_qr_scan",
"po | s_qr_show",
"pos_qr_payments",
"pos_longpolling",
],
"external_dependencies": {"python": [], "bin": []},
"data": ["views/assets.xml", "wizard/pos_payment_views.xml"],
"demo": [],
"qweb": ["static/src/xml/pos.xml"],
"auto_install": False,
"installable": False,
}
|
blazek/lrs | lrs/lrs/lrslayer.py | Python | gpl-2.0 | 3,504 | 0.001998 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
LrsPlugin
A QGIS plugin
Linear reference system builder and editor
-------------------
begin : 2017-5-29
copyright : (C) 2017 by Radim Blažek
email : radim.blazek@gmail.com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from time import sleep
from .error.lrserror import *
from .lrsbase import LrsBase
from .lrslayerpart import LrsLayerPart
from .lrslayerroute import LrsLayerRoute
# The class representing existing layer with measures
class LrsLayer(LrsBase):
def __init__(self, layer, **kwargs):
super(LrsLayer, self).__init__(**kwargs)
self.layer = layer
self.crs = layer.crs()
self.routeFieldName = None # field name
def setRouteFieldName(self, routeField):
self.routeFieldName = routeField
# load from layer
def load(self, progressFunction=None):
#debug("load %s %s" % (self.layer.name(), self.routeFieldName))
self.reset()
if not self.routeFieldName:
return
total = self.layer.featureCount()
count = 0
for feature in self.layer.getFeatures():
| # sleep(1) # progress debug
geo = feature.geometry()
# if geo:
# if self.lineTransform:
# geo.transform(self.lineTransform)
routeId = feature[self.routeFieldName]
route | = self.getRoute(routeId)
#line = LrsLine(feature.id(), routeId, geo)
#self.lines[feature.id()] = line
if geo:
for g in geo.asGeometryCollection():
part = LrsLayerPart(g)
route.addPart(part)
count += 1
percent = 100 * count / total;
if progressFunction:
progressFunction(percent)
for route in self.routes.values():
route.checkPartOverlaps()
# get route by id, create it if does not exist
# routeId does not have to be normalized
def getRoute(self, routeId):
normalId = normalizeRouteId(routeId)
# debug ( 'normalId = %s orig type = %s' % (normalId, type(routeId) ) )
if normalId not in self.routes:
self.routes[normalId] = LrsLayerRoute(routeId, parallelMode='error')
return self.routes[normalId]
def getRouteIds(self):
#debug("getRouteIds routeFieldName = %s" % self.routeFieldName)
if not self.layer or not self.routeFieldName:
return []
ids = set()
for feature in self.layer.getFeatures():
ids.add(feature[self.routeFieldName])
ids = list(ids)
ids.sort()
return ids
|
emergence/django-simple-history | simple_history/templatetags/simple_history_compare.py | Python | bsd-3-clause | 1,074 | 0.001862 | from __future__ import unicode_literals
import difflib
from django import template
register = template.Library()
@register.simple_tag
def diff_table(a, b, line_sp | lit="\n"):
differ = difflib.HtmlDiff(wrapcolumn=80)
try:
return differ.make_table(a.split(line_split), b.split(line_split))
except AttributeError:
if a != b:
a = '<span class="diff_sub">{a}</span>'.format(a=a)
b = '<span class="diff_add">{b}</span>'.format(b=b)
return """<table class="diff" id="difflib_chg_to0__top" cellspacing="0" cellpadding="0" rules="groups">
<colgroup></colgroup> <colgroup></colgrou | p> <colgroup></colgroup>
<colgroup></colgroup> <colgroup></colgroup> <colgroup></colgroup>
<tbody>
<tr><td class="diff_next"><a href="#difflib_chg_to0__top">t</a></td><td class="diff_header" id="from0_1">1</td><td nowrap="nowrap">{a}</td><td class="diff_next"><a href="#difflib_chg_to0__top">t</a></td><td class="diff_header" id="to0_1">1</td><td nowrap="nowrap">{b}</td></tr>
</tbody>
</table>
""".format(a=a, b=b)
|
paulharter/fam | src/fam/database/caching.py | Python | mit | 56 | 0.017857 | from | fam.buffer import buffered_db
cache = buffered_ | db
|
wyliew/AutoPokeDraw | index.py | Python | mit | 3,166 | 0.04801 | """
Auto-drawer for http://pokedraw.net/
All coordinates assume a screen resolution of 1366x768, and Chrome
maximized with the Bookmarks Toolbar enabled.
Down key has been hit 4 times to center play area in browser, and Let's Draw has been clicked.
"""
import math
import win32api, win32con, win32gui
import ImageGrab
import os
import time
# Globals
# ------------------
ref_x_pad = 148
ref_y_pad = 111
draw_x_pad = 774
draw_y_pad = 111
colors_ | x_pad= 804
colors_y_pad = 548
color_coords = [(829,571), (891,571), (955,571), (1017,571), (1079,571), (1145,571),
(829,629), (891,629), (955,629), (1017,629), (1079,629), (1145,629)]
color_rgb = [(244,42,53), (255,162,0), (255,213,0), (168,191,18), (46,181,47), (0,170,181),
(50,90, | 197), (250,208,222), (148,109,155), (135,94,55), (142,150,155), (0,0,0), (255,255,255)]
pixelMap = {}
def leftClick():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
time.sleep(.001)
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
def leftDown():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTDOWN, 0, 0)
def leftUp():
win32api.mouse_event(win32con.MOUSEEVENTF_LEFTUP, 0, 0)
def grabRef():
refBox = (ref_x_pad + 1,ref_y_pad + 1,ref_x_pad + 425,ref_y_pad + 427)
return ImageGrab.grab(refBox)
def grabDraw():
drawBox = (draw_x_pad + 1,draw_y_pad + 1,draw_x_pad + 425,draw_y_pad + 427)
return ImageGrab.grab(drawBox)
def grabColors():
colorsBox = (colors_x_pad + 1,colors_y_pad + 1,colors_x_pad + 365,colors_y_pad + 110)
return ImageGrab.grab(colorsBox)
def colorDiff(pixel1, pixel2):
r = pixel1[0] - pixel2[0]
g = pixel1[1] - pixel2[1]
b = pixel1[2] - pixel2[2]
return (r * .30) ** 2 + (g * .59) ** 2 + (b * .11) ** 2
#Returns closest index actually
def getClosestPixel(pixel):
if pixel in pixelMap:
return pixelMap[pixel]
else:
closestPixel = color_rgb[0]
closestIndex = 0
diff = colorDiff(closestPixel, pixel)
for index, pix in enumerate(color_rgb):
tempDiff = colorDiff(pix, pixel)
if (tempDiff <= diff):
diff = tempDiff
closestPixel = pix
closestIndex = index
pixelMap[pixel] = closestIndex
return closestIndex
def drawImage():
ref = grabRef()
colors = grabColors()
draw = grabDraw()
for x in range (0, ref.size[0], 8):
for y in range (0, ref.size[1], 8):
refPixel = ref.getpixel((x,y))
if refPixel != (255,255,255):
closestPixel = getClosestPixel(refPixel)
if (closestPixel != len(color_rgb) - 1):
coord = color_coords[closestPixel]
win32api.SetCursorPos(coord)
leftClick()
win32api.SetCursorPos((draw_x_pad + x, draw_y_pad + y ) )
leftClick()
def drawImageInBlackOrWhite():
ref = grabRef()
colors = grabColors()
draw = grabDraw()
for x in range (0, ref.size[0], 3):
for y in range (0, ref.size[1], 3):
refPixel = ref.getpixel((x,y))
L = 0.2126*refPixel[0] + 0.7152*refPixel[1] + 0.0722*refPixel[2]
if L < 128:
win32api.SetCursorPos((draw_x_pad + x, draw_y_pad + y ) )
leftClick()
time.sleep(.002)
def main():
startTime = time.time()
#drawImage()
drawImageInBlackOrWhite()
endTime = time.time()
print 'It took ' + str(endTime - startTime) + ' seconds'
main() |
schleichdi2/OPENNFR-6.3-CORE | opennfr-openembedded-core/meta/lib/oeqa/selftest/cases/gotoolchain.py | Python | gpl-2.0 | 2,594 | 0.000386 | #
# SPDX-License-Identifier: MIT
#
import glob
import os
import shutil
import tempfile
from oeqa.selftest.case import OESelftestTestCase
from oeqa.utils.commands import runCmd, bitbake, get_bb_vars
class oeGoToolchainSelfTest(OESelftestTestCase):
"""
Test cases for OE's Go toolchain
"""
@staticmethod
def get_sdk_environment(tmpdir_SDKQA):
pattern = os.path.join(tmpdir_SDKQA, "environment-setup-*")
# FIXME: this is a very naive implementation
return glob.glob(pattern)[0]
@staticmethod
def get_sdk_toolchain():
bb_vars = get_bb_vars(['SDK_DEPLOY', 'TOOLCHAIN_OUTPUTNAME'],
"meta-go-toolchain")
sdk_deploy = bb_vars['SDK_DEPLOY']
toolchain_name = bb_vars['TOOLCHAIN_OUTPUTNAME']
return os.path.join(sdk_deploy, toolchain_name + ".sh")
@classmethod
def setUpClass(cls):
super(oeGoToolchainSelfTest, cls).setUpClass()
cls.tmpdir_SDKQA = tempfile.mkdtemp(prefix='SDKQA')
cls.go_path = os.path.join(cls.tmpdir_SDKQA, "go")
# Build the SDK and locate it in DEPLOYDIR
bitbake("meta-go-toolchain")
cls.sdk_path = oeGoToolchainSelfTest.get_sdk_toolchain()
# Install the SDK into the tmpdir
runCmd("sh %s -y -d \"%s\"" % (cls.sdk_path, cls.tmpdir_SDKQA))
cls.env_SDK = oeGoToolchainSelfTest.get_sdk_environment(cls.tmpdir_SDKQA)
@classmethod
def tearDownClass(cls):
shutil.rmtree(cls.tmpdir_SDKQA, ignore_errors=True)
super(oeGoToolchainSelfTest, cls).tearDownClass()
def run_sdk_go_command(self, gocmd):
cmd = "cd %s; " % self.tmpdir_SDKQA
cmd = cmd + ". %s; " % self.env_SDK
cmd = cmd + "export GOPATH=%s; " % self.go_path
cmd = cmd + "${CROSS_COMPILE}go %s" % gocmd
return runCmd(cmd).status
def test_go_dep_build(self):
proj = "github.com/golang"
name = "dep"
ver = "v0.3.1"
archive = ".tar.gz"
url = "https://%s/%s/archive/%s%s" % (proj, name, ver, archive)
runCmd("cd %s; wget %s" % (self.tmpdir_SDKQA, url))
| runCmd("cd %s; tar -xf %s" % (self.tmpdir_SDKQA, ver+archive))
runCmd("mkdir -p %s/src/%s" % (self.go_path, proj))
runCmd("mv %s/dep-0.3.1 %s/src/%s/%s"
% (self.tmpdir_SDKQA, self.go_path, proj, name))
retv = self.run_sdk_go_command('build %s/%s/cmd/dep'
% (proj, name))
self.assertEqual(retv, 0,
| msg="Running go build failed for %s" % name)
|
MaximNevrov/neutron | neutron/common/utils.py | Python | apache-2.0 | 22,681 | 0.00022 | # Copyright 2011, VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# Borrowed from nova code base, more utilities will be added/borrowed as and
# when needed.
"""Utilities and helper functions."""
import collections
import decimal
import errno
import functools
import math
import multiprocessing
import os
import random
import signal
import socket
import sys
import tempfile
import time
import uuid
from eventlet.green import subprocess
import netaddr
from oslo_concurrency import lockutils
from oslo_config import cfg
from oslo_db import exception as db_exc
from oslo_log import log as logging
from oslo_utils import excutils
from oslo_utils import importutils
from oslo_utils import reflection
import six
from stevedore import driver
from neutron._i18n import _, _LE
from neutron.common import constants as n_const
from neutron.db import api as db_api
TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
LOG = logging.getLogger(__name__)
SYNCHRONIZED_PREFIX = 'neutron-'
# Unsigned 16 bit MAX.
MAX_UINT16 = 0xffff
synchronized = lockutils.synchronized_with_prefix(SYNCHRONIZED_PREFIX)
class cache_method_results(object):
"""This decorator is intended for object methods only."""
def __init__(self, func):
self.func = func
functools.update_wrapper(self, func)
self._first_call = True
self._not_cached = object()
def _get_from_cache(self, target_self, *args, **kwargs):
target_self_cls_name = reflection.get_class_name(target_self,
fully_qualified=False)
func_name = "%(module)s.%(class)s.%(func_name)s" % {
'module': target_self.__module__,
'class': target_self_cls_name,
'func_name': self.func.__name__,
}
key = (func_name,) + args
if kwargs:
key += dict2tuple(kwargs)
try:
item = target_self._cache.get(key, self._not_cached)
except TypeError:
LOG.debug("Method %(func_name)s cannot be cached due to "
"unhashable parameters: args: %(args)s, kwargs: "
"%(kwargs)s",
{'func_name': func_name,
'args': args,
'kwargs': kwargs})
return self.func(target_self, *args, **kwargs)
if item is self._not_cached:
item = self.func(target_self, *args, **kwargs)
target_self._cache.set(key, item, None)
return item
def __call__(self, target_self, *args, **kwargs):
target_self_cls_name = reflection.get_class_name(target_self,
fully_qualified=False)
if not hasattr(target_self, '_cache'):
raise NotImplementedError(
_("Instance of class %(module)s.%(class)s must contain _cache "
"attribute") % {
'module': target_self.__module__,
'class': target_self_cls_name})
if not target_self._cache:
if self._first_call:
LOG.debug("Instance of class %(module)s.%(class)s doesn't "
"contain attribute _cache therefore results "
"cannot be cached for %(func_name)s.",
{'module': target_self.__module__,
'class': target_self_cls_name,
'func_name': self.func.__name__})
self._first_call = False
return self.func(target_self, *args, **kwargs)
return self._get_from_cache(target_self, *args, **kwargs)
def __get__(self, obj, objtype):
return functools.partial(self.__call__, obj)
def ensure_dir(dir_path):
"""Ensure a directory with 755 | permissions mode."""
try:
os.makedirs(dir_path, 0o755)
except OSError as e:
# If the directory already existed, don't raise the error.
if e.errno != errno.EEXIST:
raise
def _subprocess_setup():
# Python installs a SIGPI | PE handler by default. This is usually not what
# non-Python subprocesses expect.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
def subprocess_popen(args, stdin=None, stdout=None, stderr=None, shell=False,
env=None, preexec_fn=_subprocess_setup, close_fds=True):
return subprocess.Popen(args, shell=shell, stdin=stdin, stdout=stdout,
stderr=stderr, preexec_fn=preexec_fn,
close_fds=close_fds, env=env)
def parse_mappings(mapping_list, unique_values=True, unique_keys=True):
"""Parse a list of mapping strings into a dictionary.
:param mapping_list: a list of strings of the form '<key>:<value>'
:param unique_values: values must be unique if True
:param unique_keys: keys must be unique if True, else implies that keys
and values are not unique
:returns: a dict mapping keys to values or to list of values
"""
mappings = {}
for mapping in mapping_list:
mapping = mapping.strip()
if not mapping:
continue
split_result = mapping.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid mapping: '%s'") % mapping)
key = split_result[0].strip()
if not key:
raise ValueError(_("Missing key in mapping: '%s'") % mapping)
value = split_result[1].strip()
if not value:
raise ValueError(_("Missing value in mapping: '%s'") % mapping)
if unique_keys:
if key in mappings:
raise ValueError(_("Key %(key)s in mapping: '%(mapping)s' not "
"unique") % {'key': key,
'mapping': mapping})
if unique_values and value in mappings.values():
raise ValueError(_("Value %(value)s in mapping: '%(mapping)s' "
"not unique") % {'value': value,
'mapping': mapping})
mappings[key] = value
else:
mappings.setdefault(key, [])
if value not in mappings[key]:
mappings[key].append(value)
return mappings
def get_hostname():
return socket.gethostname()
def get_first_host_ip(net, ip_version):
return str(netaddr.IPAddress(net.first + 1, ip_version))
def compare_elements(a, b):
"""Compare elements if a and b have same elements.
This method doesn't consider ordering
"""
if a is None:
a = []
if b is None:
b = []
return set(a) == set(b)
def safe_sort_key(value):
"""Return value hash or build one for dictionaries."""
if isinstance(value, collections.Mapping):
return sorted(value.items())
return value
def dict2str(dic):
return ','.join("%s=%s" % (key, val)
for key, val in sorted(six.iteritems(dic)))
def str2dict(string):
res_dict = {}
for keyvalue in string.split(','):
(key, value) = keyvalue.split('=', 1)
res_dict[key] = value
return res_dict
def dict2tuple(d):
items = list(d.items())
items.sort()
return tuple(items)
def diff_list_of_dict(old_list, new_list):
new_set = set([dict2str(l) for l in new_list])
old_set = set([dict2str(l) for l in old_list])
added = new_set - old_set
removed = old_set - new_set
return [str2dict(a) for a in added], [str2dict(r) for r in removed]
def compare_list_of_dict(old_list, new_list):
new_set = set([dict2str(l) for l in new_list |
civilian/competitive_programing | leetcode/4/443/best.py | Python | mit | 990 | 0.007071 | class Solution:
def compress(self, chars) -> int:
n = len(chars)
if n == 1:
return 1
l = r = 0
while r < n: |
current = chars[r]
count = 0
while r < n and chars[r] == current:
r += 1
count += 1
chars[l] = current
if count > 1:
for i in str(count):
chars[l + 1] = i
l += 1
l += 1
return l
# obj = Solution()
# print(obj.compress | (["a", "a", "b", "b", "c", "c", "c"]))
# print(obj.compress(["a", "a", "b", "b", "c", "c", "c"]))
# print(obj.compress(["a"]))
# print(obj.compress(["a", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b", "b"]))
# print(obj.compress(["a", "a", "a", "b", "b", "a", "a"]))
# print(obj.compress([chr(ord('a') + elem) for elem in [2 for i in range(1, 2001)]]))
# print(obj.compress([chr(ord('a') + elem) for elem in [i for i in range(1, 2001)]])) |
Hitatm/Concentrator | app/utils/num_of_rounds.py | Python | gpl-3.0 | 10,818 | 0.011941 | #coding:UTF-8
import time,datetime
import os
from gxn_get_sys_config import Config
import sqlite3
from db_operate import DBClass
# 根据调度计算所选时间段内轮数
def countrounds(start_time, end_time):
g = Config()
num_in_schedule = len(g.get_active_time()) # 24h中轮数
get_schedule = g.get_active_time()
# print get_schedule, num_in_schedule
# 先计算共有多少个小时
minute = int((time.mktime(time.strptime(end_time,'%Y-%m-%d %H:%M:%S')) - time.mktime(time.strptime(start_time,'%Y-%m-%d %H:%M:%S')))/60) #minutes
hour = minute/60
minutes_left = minute%hour
# print minute, hour, minutes_left
# 计算有几个整天(24h)
days = hour/24
hours_left = hour%24 #不足一天的小时数
start_time_minutes = start_time.split(' ')[1].split(':')[0]*60 + start_time.split(' ')[1].split(':')[1]
end_time_minutes = end_time.split(' ')[1].split(':')[0]*60 + end_time.split(' ')[1].split(':')[1]
if hours_left: #如果不为零,按照调度具体计算落在选取时间内的轮数
#四种情况
#2018:1:1 16:00:00 - 2018:1:1 23:00:00
#2018:1:1 16:00:00 - 2018:1:2 7:00:00
#2018:1:1 16:00:00 - 2018:1:1 16:20:00
#2018:1:1 16:20:00 - 2018:1:2 16:00:00/2018:1:1 16:00:00 - 2018:1:2 16:00:00
if (start_time.split(' ')[1].split(':')[0] != end_time.split(' ')[1].split(':')[0]): #前2种情况
if start_time[11:13] < end_time[11:13]: # 第一种情况
count = 0
#把字典中的时分转化为分,循环与starttime中的时分秒比较,在范围内即count+1
#{0: '00:00:00', 4: '00:40:00', 6: '01:00:00', 138: '23:00:00', 12: '02:00:00', 18: '03:00:00', 24: '04:00:00', 132: '22:00:00', 30: '05:00:00', 36: '06:00:00', 42: '07:00:00', 48: '08:00:00', 54: '09:00:00', 60: '10:00:00', 66: '11:00:00', 72: '12:00:00', 78: '13:00:00', 84: '14:00:00', 90: '15:00:00', 96: '16:00:00', 102: '17:00:00', 108: '18:00:00', 114: '19:00:00', 120: '20:00:00', 126: '21:00:00'}
for key, value in get_schedule.items():
bitmap_minutes = value.split(':')[0]*60 + value.split(':')[1]
if (start_time_minutes < bitmap_minutes and bitmap_minutes < end_time_minutes):
count += 1
count += days * num_in_schedule
return count
else: #第二种情况
count = 0
for key, value in get_schedule.items():
bitmap_minutes = value.split(':')[0]*60 + value.split(':')[1]
if (start_time_minutes < bitmap_minutes or bitmap_minutes < end_time_minutes):
count += 1
count += days * num_in_schedule
return count
else:
if start_time[14:16] < end_time[14:16]: #第三种情况
count = 0
for key, value in get_schedule.items():
bitmap_minutes = value.split(':')[0]*60 + value.split(':')[1]
if (start_time_minutes < bitmap_minutes and bitmap_minutes < end_time_minutes):
count += 1
count += days * num_in_schedule
return count
elif start_time[14:16] > end_time[14:16]: #4.1
count = 0
for key, value in get_schedule.items():
bitmap_minutes = value.split(':')[0]*60 + value.split(':')[1]
if (start_time_minutes < bitmap_minutes or bitmap_minutes < end_time_minutes):
count += 1
count += days * num_in_schedule
return count
else: #为整日(24h),第4.2种情况
count = days * num_in_schedule
return count
else: #第4.2种情况
count = days * num_in_schedule
return count
# def datacount(start_time, end_time, db, NodeID):
# DATABASE = DBClass()
# exist = 0
# appdata = DATABASE.my_db_execute('select * from '+ db +'where currenttime >= ? and currenttime <= ? and NodeID == ?;',(start_time, end_time, NodeID))
# if appdata:
# exist = 1
# return exist
def date_addone(year, month, day):
year = int(year)
month = int(month)
day = int(day)
runnian = 0
if year%4 == 0:
runnian = 1
if year%4 == 0 and year%100 == 0:
runnian = 0
if year%400 == 0:
runnian = 1
a = set([1,3,5,7,8])
b = set([4,6,9])
# 2月12月另算
if (month in a and day>=31):
newdate = str(year)+'-0'+str(month+1)+'-01'
elif (month in b and day>=30):
newdate = str(year)+'-0'+str(month+1)+'-01'
elif runnian==0 and month==2 and day>=28:
newdate = str(year)+'-0'+str(month+1)+'-01'
elif runnian==1 and month==2 and day>=29:
newdate = str(year)+'-0'+str(month+1)+'-01'
elif month==10 and day>=31:
newdate = str(year)+'-'+str(month+1)+'-01'
elif month==11 and day>=30:
newdate = str(year)+'-'+str(month+1)+'-01'
elif month==12 and day>=31:
newdate = str(year+1)+'-01-01'
elif month>10 and day>=9:
newdate = str(year)+'-'+str(month)+'-'+str(day+1)
elif month>10 and day<9:
newdate = str(year)+'-'+str(month)+'-0'+str(day+1)
elif month<10 and day<9:
newdate = str(year)+'-0'+str(month)+'-0'+str(day+1)
else:
newdate = str(year)+'-0'+str(month)+'-'+str(day+1)
return newdate
def get_schedule_time(start_time, end_time):
# 读取调度
g = Config()
get_schedule = g.get_active_time()
get_schedule = sorted(get_schedule.items(), key=lambda d:d[0])
schedule_list = list() #实际搜索的调度列表
# 读取日期和时间 #2017-05-02 11:04:01
start_date = start_time.split(' ')[0]
start_hms = start_time.split(' ')[1]
end_date = end_time.split(' ')[0]
end_hms = end_time.split(' ')[1]
start_time_second = int(start_hms.split(':')[0])*60*60 + int(start_hms.split(':')[1])*60 + int(start_hms.split(':')[2])
end_time_second = int(end_hms.split(':')[0])*60*60 + int(end_hms.split(':')[1])*60 + int(end_hms.split(':')[2])
days = (datetime.datetime(int(end_date.split('-')[0]), int(end_date.split('-')[1]), int(end_date.split('-')[2])) - datetime.datetime(int(start_date.split('-')[0]), int(start_date.split('-')[1]), int(start_date.split('-')[2]))).days
# print days
if days: #如果结束日期在开始日期后面 1:超过了24小时(2018-1-1 12:00:00 - 2018-1-2 18:00:00),2:没超过24小时(2018-1-1 12:00:00 - 2018-1-2 8:00:00)
#(2018-1-1 12:00:00 - 2018-1-7 18:00:00)
if (int(end_hms.split(':')[0])*60 + int(end_hms.split(':')[1])) >= (int(start_hms.split(':')[0])*60 + int(start_hms.split(':')[1])):
start_date0 = start_date
start_date1 = date_addone(start_date0.split('-')[0],start_date0.split('-')[1],start_date0.split('-')[2])
for i in range(days):
start_year = start_date0.split('-')[0]
start_month = start_date0.split('-')[1]
start_day = start_date0.split('-')[2]
for item in get_schedule:
bitmap_second = int(item[1].split(':')[0])*60*60 + int(item[1].split(':')[1])*60 + int(item[1].split(':')[2])
if (start_time_second < bitmap_second):
schedule_list.append(start_date0+' '+item[1])
start_date0 = date_addone(start_year,start_month,start_day)
for i in range(days-1):
start_year = start_date1.split('-')[0]
start_month = start_date1.split('-')[1]
start_day = start_date1.split('-')[2]
for item in get_schedule:
bitmap_second = int(item[1].split(':')[0])*60*60 + int(item[1].sp | lit(':')[1])*60 + int(item[1].split(':')[2])
if (start_time_second > bitmap_second):
schedule_list.append(start_date1+' '+item[1])
start_date1 = date_addone(start_year,start_month,start_day)
for item in get_schedule:
bitmap_second = int(item[1].split(':')[0]) | *60*60 + int(item[1].split(':')[1])*60 + int(item[1].split(':')[2])
if (start_time_second < bitmap_second and bitmap_second < end_time_second):
schedule_list.append(end_date+' '+item[1])
schedule_list = sorted(schedule_list)
# print "case1"
|
knutz3n/sportlocations | sportlocations/api/handlers.py | Python | apache-2.0 | 2,134 | 0.003749 | from piston.handler import BaseHandler
from piston.utils import FormValidationError, rc
from sportlocations.api.forms import SearchForm, PositionUpdateForm
from sportlocations.api import mongo_manager
# IMPORTANT!
# To have Post requests working with JQuery:
# In Piston's utils.py edit from
# if ctype == type_formencoded
# to
# if ctype.startswith(type_formencoded)
class FacilityPositionHandler(BaseHandler):
allowed_methods = ('POST',)
def create(self, request, _id, *a, **kw):
form = PositionUpdateForm(request.POST)
| if form.is_valid():
lat = form.cleaned_data['lat']
lng = form.cleaned_data['lng']
comment = form.cleaned_data['comment']
mongo_manager.update_facility_position(_id, lat, lng, comment)
return rc.ALL_OK
else:
raise FormValidationError(form)
class SearchHandler(BaseHandler):
all | owed_methods = ('GET',)
def read(self, request, *a, **kw):
form = SearchForm(request.GET)
if form.is_valid():
lat = form.cleaned_data['lat']
lng = form.cleaned_data['lng']
limit = form.cleaned_data['limit']
if limit is None:
limit = 20
facilities = mongo_manager.find_facilities(
lat, lng,
facility_categories=request.GET.getlist('facility_category'),
facility_types=request.GET.getlist('facility_type'),
limit=limit)
result = []
for facility in facilities:
result.append(facility)
return { "facilities": result }
else:
result = []
for facility in mongo_manager.find_facilities_without_position():
result.append(facility)
return { "facilities": result }
class TypeHandler(BaseHandler):
allowed_methods = ('GET',)
def read(self, request, type, *a, **kw):
filter_types = mongo_manager.get_distinct(type)
if filter_types is None:
return rc.NOT_HERE
else:
return filter_types
|
pshchelo/heat | heat/rpc/client.py | Python | apache-2.0 | 28,301 | 0 | #
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the heat engine RPC API.
"""
from heat.common import messaging
from heat.rpc import api as rpc_api
class EngineClient(object):
'''Client side of the heat engine rpc API.
API version history::
1.0 - Initial version.
1.1 - Add support_status argument to list_resource_types()
1.4 - Add support for service list
1.9 - Add template_type option to generate_template()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self._client = messaging.get_rpc_client(
topic=rpc_api.ENGINE_TOPIC,
version=self.BASE_RPC_API_VERSION)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def call(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.call(ctxt, method, **kwargs)
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.cast(ctxt, method, **kwargs)
def local_error_name(self, error):
"""
Returns the name of the error with any _Remote postfix removed.
:param error: Remote raised error to derive the name from.
"""
error_name = error.__class__.__name__
return error_name.split('_Remote')[0]
def ignore_error_named(self, error, name):
"""
Raises the error unless its local name matches the supplied name
:param error: Remote raised error to derive the local name from.
:param name: Name to compare local name to.
"""
if self.local_error_name(error) != name:
raise error
def identify_stack(self, ctxt, stack_name):
"""
The identify_stack method returns the full stack identifier for a
single, live stack given the stack name.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('identify_stack',
stack_name=stack_name))
def list_stacks(self, ctxt, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""
The list_stacks method returns attributes of all stacks. It supports
pagination (``limit`` and ``marker``), sorting (``sort_keys`` and
``sort_dir``) and filtering (``filters``) of the results.
:param ctxt: RPC context.
:param limit: the number of stacks to list (integer or string)
:param marker: the ID of the last item in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc')
:param filters: a dict with attribute:value to filter the list
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, show soft-deleted stacks
:param show_nested: if true, show nested stacks
:param show_hidden: if true, show hidden stacks
:param tags: show stacks containing these tags, combine multiple
tags using the boolean AND expression
:param tags_any: show stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: show stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: show stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a list of stacks
"""
return self.call(ctxt,
self.make_msg('list_stacks', limit=limit,
sort_keys=sort_keys, marker=marker,
sort_dir=sort_dir, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags, tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def count_stacks | (self, ctxt, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_ | any=None, not_tags=None,
not_tags_any=None):
"""
Return the number of stacks that match the given filters
:param ctxt: RPC context.
:param filters: a dict of ATTR:VALUE to match against stacks
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, count will include the deleted stacks
:param show_nested: if true, count will include nested stacks
:param show_hidden: if true, count will include hidden stacks
:param tags: count stacks containing these tags, combine multiple tags
using the boolean AND expression
:param tags_any: count stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: count stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: count stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a integer representing the number of matched stacks
"""
return self.call(ctxt, self.make_msg('count_stacks',
filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def show_stack(self, ctxt, stack_identity):
"""
Return detailed information about one or all stacks.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to show, or None to
show all
"""
return self.call(ctxt, self.make_msg('show_stack',
stack_identity=stack_identity))
def preview_stack(self, ctxt, stack_name, template, params, files, args):
"""
Simulates a new stack using the provided template.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC con |
code64/stepcounter | examples/json/logger-gpio.py | Python | gpl-3.0 | 781 | 0 | #!/usr/bin/env python
import time |
import json
import RPi.GPIO as GPIO
import sys
# Defaults
visitors = int(sys.argv[1]) if len(sys.argv) > 1 else 0
# Hardware interface
pin = 18
GPIO.setmode( | GPIO.BCM)
GPIO.setup(pin, GPIO.IN)
# Log visitor to console
def log(count):
print 'Logged visitor #%d' % count
# Update JSON file
def store(count):
# Prepare dictionary
data = {
'data': str(count)
}
# Open and write to file
with open('./logs/data.json', 'w') as outfile:
json.dump(data, outfile)
outfile.close()
# Listen for trigger
try:
while True:
if GPIO.input(pin):
visitors += 1
store(visitors)
log(visitors)
time.sleep(2)
except KeyboardInterrupt:
GPIO.cleanup()
|
batxes/4Cin | SHH_WT_models_highres/SHH_WT_models_highres_final_output_0.1_-0.1_5000/mtx1_models/SHH_WT_models_highres12150.py | Python | gpl-3.0 | 88,213 | 0.02452 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((2947.21, -1078.44, 2911.71), (0.7, 0.7, 0.7), 182.271)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((2816.6, -947.04, 2865.62), (0.7, 0.7, 0.7), 258.199)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((2779.08, -620.61, 2606.35), (0.7, 0.7, 0.7), 123.897)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((2829.72, -939.091, 2332.88), (0.7, 0.7, 0.7), 146.739)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((2854.86, -1224.37, 1966.67), (0.7, 0.7, 0.7), 179.098)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((2725.3, -672.487, 1932.28), (0.7, 0.7, 0.7), 148.854)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2622.73, -193.504, 1831.42), (0.7, 0.7, 0.7), 196.357)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2875.43, -582.19, 1590.26), (0.7, 0.7, 0.7), 166.873)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((3085.9, -945.313, 1274.97), (0.7, 0.7, 0.7), 95.4711)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((2920.62, -567.844, 1316.31), (0.7, 0.7, 0.7), 185.401)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((2677.5, -220.914, 1509.09), (0.7, 0.7, 0.7), 151.984)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((2341.24, 140.905, 1836.17), (0.7, 0.7, 0.7), 185.612)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((1984.94, 125.219, 1884.35), (0.7, 0.7, 0.7), 210.273)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((2279.2, -28.8063, 1810.38), (0.7, 0.7, 0.7), 106.892)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2059.65, -274.61, 1735.26), (0.7, 0.7, 0.7), 202.025)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1775.27, -602.884, 1859.91), (0.7, 0.7, 0.7), 192.169)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((1386.63, -919.852, 2030.19), (0.7, 0.7, 0.7), 241.11)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((1001.52, -975.906, 2289.08), (0.7, 0.7, 0.7), 128.465)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((681.387, -1153.52, 2653.1), (0.7, 0.7, 0.7), 217.38)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((517.802, -1719.83, 2975.96), (0.7, 0.7, 0.7), 184.555)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((803.588, -1176.07, 2896.92), (0.7, 0.7, 0.7), 140.055)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((988.322, -1063.05, 2533.81), (0.7, 0.7, 0.7), 169.708)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((1040.81, -1158.65, 2149), (0.7, 0.7, 0.7), 184.639)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((835.835, -981.095, 2249.02), (0.7, 0.7, 0.7), 119.286)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((723.872, -976.166, 2535.5), (0.7, 0.7, 0.7), 147.754)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((615.183, -765.7, 2780.06), (0.7, 0.7, 0.7), 171.4)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((747.63, -378.202, 2581.43), (0.7, 0.7, 0.7), 156.341)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((1036.98, 140.304, 2732.7), (0.7, 0.7, 0.7), 186.501)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((1298.82, 618.495, 2880.58), (0.7, 0.7, 0.7), 308.325)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((1646.01, 670.8, 2613.34), (0. | 7, 0.7, 0.7), 138.617)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((1774.11, 800.71, 2362.81), (0.7, 0.7, 0.7), 130.03)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((1498.17, 703.761, 2457.23), (0.7, 0.7, 0.7), 15 | 6.552)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["par |
Pathel/deuterium | game/gamesrc/objects/character.py | Python | bsd-3-clause | 1,817 | 0 | """
Template for Characters
Copy this module up one level and name it as you like, then
use it as a template to create your own Character class.
To make new logins default to creating characters
of your new type, change settings.BASE_CHARACTER_TYPECLASS to point to
your new class, e.g.
settings.BASE_CHARACTER_TYPECLASS = "game.gamesrc.objects.mychar.MyChar"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Character as DefaultCharacter
class Character(DefaultCharacter):
"""
The Character is like any normal Object (see | example/object.py for
a list of properties and methods), except it actually implements
some of its hook methods to do some work:
| at_basetype_setup - always assigns the default_cmdset to this object type
(important!)sets locks so character cannot be picked up
and its commands only be called by itself, not anyone else.
(to change things, use at_object_creation() instead)
at_after_move - launches the "look" command
at_post_puppet(player) - when Player disconnects from the Character, we
store the current location, so the "unconnected" character
object does not need to stay on grid but can be given a
None-location while offline.
at_pre_puppet - just before Player re-connects, retrieves the character's
old location and puts it back on the grid with a "charname
has connected" message echoed to the room
"""
def at_object_creation(self):
"""This is called when object is first created. Only. """
self.db.power = 1
self.db.combat_score = 1
|
anthropo-lab/XP | EPHEMER/dill_resp_project/settings.py | Python | gpl-3.0 | 2,310 | 0.000867 | from os import environ
# if you set a property in SESSION_CONFIG_DEFAULTS, it will be inherited by all configs
# in SESSION_CONFIGS, except those that explicitly override it.
# the session config can be accessed from methods in your apps as self.session.config,
# e.g. self.session.config['participation_fee']
SESSION_CONFIG_DEFAULTS = {
'real_world_currency_per_point': 1.00,
'participation_fee': 0.00,
'doc': "",
}
SESSION_CONFIGS = [
{
'name': 'dill_resp_punish_first',
'display_name': "Dilution de responsabilité, Punish First",
'num_demo_participants': 12,
'app_sequence': ['dill_resp'],
'treatment_order': 'punish_first'
},
{
'name': 'dill_resp_punish_last',
'display_name': "Dilution de responsabilité, Punish Last",
'num_demo_participants': 12,
'app_sequence': ['dill_resp'],
'treatment_order': 'punish_last'
},
]
# ISO-639 code
# for example: de, fr, ja, ko, zh-hans
LANGUAGE_CODE = 'en'
# e.g. EUR, GBP, CNY, JPY
REAL_WORLD_CURRENCY_CODE = 'USD'
USE_POINTS = True
ROOMS = []
CHANNEL_ROUTING = 'redirect.routing.channel_routing'
# AUTH_LEVEL:
# this setting controls which parts of your site are freely accessible,
# and which are password protected:
# - If it's not set (the default), then the whole site is freely accessible.
# - If you are launching a study and want visitors to only be able to
# play your app if you provided them with a start link, set it to STUDY.
# - If you would like to put your site online in public demo mode where
# anybody can play a demo version of your game, but not access the rest
# of the admin interface, set it to DEMO.
# for flexibility, you can set it in the environment variable OTREE_AUTH_LEVEL
AUTH_LEVEL = environ.get('OTREE_AUTH_LEVEL')
ADMIN_USERNAME = 'admin'
# for security, best to | set admin password in an environment variable
ADMIN_PASSWORD = environ.get('OTREE_ADMIN_PASSWORD')
# Consider '', None, and '0' to be empty/false
DEBUG = (environ.get('OTREE_PRODUCTION') in {None, '', '0'})
DEMO_PAGE_INTRO_HTML = """ """
# don't share this with anybody.
SECRET_KEY = '29*rluv^s95qdbcfe6&mql^2$-_^e7nvtxi_j7r%wl# | 8g27p(q'
# if an app is included in SESSION_CONFIGS, you don't need to list it here
INSTALLED_APPS = ['otree']
|
simonwydooghe/ansible | lib/ansible/modules/network/aci/mso_schema_template_external_epg_contract.py | Python | gpl-3.0 | 7,387 | 0.002031 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: mso_schema_template_external_epg_contract
short_description: Manage Extrnal EPG contracts in schema templates
description:
- Manage External EPG contracts in schema templates on Cisco ACI Multi-Site.
author:
- Devarshi Shah (@devarshishah3)
version_added: '2.10'
options:
schema:
description:
- The name of the schema.
type: str
required: yes
template:
description:
- The name of the template to change.
type: str
required: yes
external_epg:
description:
- The name of the EPG to manage.
type: str
required: yes
contract:
description:
- A contract associated to this EPG.
type: dict
suboptions:
name:
description:
- The name of the Contract to associate with.
required: true
type: str
schema:
description:
- The schema that defines the referenced BD.
- If this parameter is unspecified, it defaults to the current schema.
type: str
template:
description:
- The template that defines the referenced BD.
type: str
type:
description:
- The type of contract.
type: str
required: true
choices: [ consumer, provider ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
seealso:
- module: mso_schema_template_externalepg
- module: mso_schema_template_contract_filter
extends_documentation_fragment: mso
'''
EXAMPLES = r'''
- name: Add a contract to an EPG
mso_schema_template_external_epg_contract:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
epg: EPG 1
contract:
name: Contract | 1
type: consumer
state: present
delegate_to: localhost
- name: Remove a Contract
mso_schema_template_external_epg_contract:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
epg: EPG 1
contract:
name: Contract 1
state: absent
delegate_to: localhost
- name: Query a specific Contract
mso_schema_template_external_epg_contract:
host: mso_host
username: admin
password: SomeSec | retPassword
schema: Schema 1
template: Template 1
epg: EPG 1
contract:
name: Contract 1
state: query
delegate_to: localhost
register: query_result
- name: Query all Contracts
mso_schema_template_external_epg_contract:
host: mso_host
username: admin
password: SomeSecretPassword
schema: Schema 1
template: Template 1
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.mso import MSOModule, mso_argument_spec, mso_contractref_spec, issubset
def main():
argument_spec = mso_argument_spec()
argument_spec.update(
schema=dict(type='str', required=True),
template=dict(type='str', required=True),
external_epg=dict(type='str', required=True),
contract=dict(type='dict', options=mso_contractref_spec()),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['contract']],
['state', 'present', ['contract']],
],
)
schema = module.params['schema']
template = module.params['template']
external_epg = module.params['external_epg']
contract = module.params['contract']
state = module.params['state']
mso = MSOModule(module)
if contract:
if contract.get('schema') is None:
contract['schema'] = schema
contract['schema_id'] = mso.lookup_schema(contract['schema'])
if contract.get('template') is None:
contract['template'] = template
# Get schema_id
schema_obj = mso.get_obj('schemas', displayName=schema)
if schema_obj:
schema_id = schema_obj['id']
else:
mso.fail_json(msg="Provided schema '{0}' does not exist".format(schema))
schema_path = 'schemas/{id}'.format(**schema_obj)
# Get template
templates = [t['name'] for t in schema_obj['templates']]
if template not in templates:
mso.fail_json(msg="Provided template '{0}' does not exist. Existing templates: {1}".format(template, ', '.join(templates)))
template_idx = templates.index(template)
# Get EPG
epgs = [e['name'] for e in schema_obj['templates'][template_idx]['externalEpgs']]
if external_epg not in epgs:
mso.fail_json(msg="Provided epg '{epg}' does not exist. Existing epgs: {epgs}".format(epg=external_epg, epgs=', '.join(epgs)))
epg_idx = epgs.index(external_epg)
# Get Contract
if contract:
contracts = [(c['contractRef'],
c['relationshipType']) for c in schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships']]
contract_ref = mso.contract_ref(**contract)
if (contract_ref, contract['type']) in contracts:
contract_idx = contracts.index((contract_ref, contract['type']))
contract_path = '/templates/{0}/externalEpgs/{1}/contractRelationships/{2}'.format(template, external_epg, contract)
mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships'][contract_idx]
if state == 'query':
if not contract:
mso.existing = schema_obj['templates'][template_idx]['externalEpgs'][epg_idx]['contractRelationships']
elif not mso.existing:
mso.fail_json(msg="Contract '{0}' not found".format(contract_ref))
mso.exit_json()
contracts_path = '/templates/{0}/externalEpgs/{1}/contractRelationships'.format(template, external_epg)
ops = []
mso.previous = mso.existing
if state == 'absent':
if mso.existing:
mso.sent = mso.existing = {}
ops.append(dict(op='remove', path=contract_path))
elif state == 'present':
payload = dict(
relationshipType=contract['type'],
contractRef=dict(
contractName=contract['name'],
templateName=contract['template'],
schemaId=contract['schema_id'],
),
)
mso.sanitize(payload, collate=True)
if mso.existing:
ops.append(dict(op='replace', path=contract_path, value=mso.sent))
else:
ops.append(dict(op='add', path=contracts_path + '/-', value=mso.sent))
mso.existing = mso.proposed
if not module.check_mode:
mso.request(schema_path, method='PATCH', data=ops)
mso.exit_json()
if __name__ == "__main__":
main()
|
wallnerryan/quantum_migrate | quantum/plugins/common/constants.py | Python | apache-2.0 | 1,169 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obt | ain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, | either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# service type constants:
CORE = "CORE"
DUMMY = "DUMMY"
LOADBALANCER = "LOADBALANCER"
# TODO(salvatore-orlando): Move these (or derive them) from conf file
ALLOWED_SERVICES = [CORE, DUMMY, LOADBALANCER]
COMMON_PREFIXES = {
CORE: "",
DUMMY: "/dummy_svc",
LOADBALANCER: "/lb",
}
# Service operation status constants
ACTIVE = "ACTIVE"
PENDING_CREATE = "PENDING_CREATE"
PENDING_UPDATE = "PENDING_UPDATE"
PENDING_DELETE = "PENDING_DELETE"
INACTIVE = "INACTIVE"
ERROR = "ERROR"
|
ebrahimraeyat/civilTools | py_widget/torsion.py | Python | gpl-3.0 | 2,302 | 0.003475 | from pathlib import Path
from PySide2 import QtWidgets
import FreeCADGui as Gui
from PySide2.QtCore import Qt
civiltools_path = Path(__file__).absolute().parent.parent
class Form(QtWidgets.QWidget):
def __init__(self, | etabs_obj):
super(Form, self).__init__()
self.form = Gui.PySideUic.loadUi(str(civiltools_path / 'widgets' / 'torsion.ui'))
# self.setupUi(self)
# self.form = self
self.etabs = etabs_obj
self.fill_xy_loadcase_names()
def fill_xy_loadcase_names(self):
x_names, y_names = self.etabs.load_patterns.get_load_patterns_in_XYdirection()
drift_load_p | atterns = self.etabs.load_patterns.get_drift_load_pattern_names()
all_load_case = self.etabs.SapModel.Analyze.GetCaseStatus()[1]
x_names = set(x_names).intersection(set(all_load_case))
y_names = set(y_names).intersection(set(all_load_case))
self.form.x_loadcase_list.addItems(x_names)
self.form.y_loadcase_list.addItems(y_names)
for lw in (self.form.x_loadcase_list, self.form.y_loadcase_list):
for i in range(lw.count()):
item = lw.item(i)
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
item.setCheckState(Qt.Checked)
for name in drift_load_patterns:
if name in x_names:
matching_items = self.form.x_loadcase_list.findItems(name, Qt.MatchExactly)
elif name in y_names:
matching_items = self.form.y_loadcase_list.findItems(name, Qt.MatchExactly)
for item in matching_items:
item.setCheckState(Qt.Unchecked)
def accept(self):
import table_model
loadcases = []
for lw in (self.form.x_loadcase_list, self.form.y_loadcase_list):
for i in range(lw.count()):
item = lw.item(i)
if item.checkState() == Qt.Checked:
loadcases.append(item.text())
df = self.etabs.get_diaphragm_max_over_avg_drifts(loadcases=loadcases)
data, headers = df.values, list(df.columns)
table_model.show_results(data, headers, table_model.TorsionModel, self.etabs.view.show_point)
def reject(self):
import FreeCADGui as Gui
Gui.Control.closeDialog()
|
nthall/pip | pip/__init__.py | Python | mit | 10,278 | 0 | #!/usr/bin/env python
from __future__ import absolute_import
import logging
import os
import optparse
import warnings
import sys
import re
from pip.exceptions import InstallationError, CommandError, PipError
from pip.utils import get_installed_distributions, get_prog
from pip.utils import deprecation, dist_is_editable
from pip.vcs import git, mercurial, subversion, bazaar # noqa
from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter
from pip.commands import get_summaries, get_similar_commands
from pip.commands import commands_dict
from pip._vendor.requests.packages.urllib3.exceptions import (
InsecureRequestWarning,
)
# assignment for flake8 to be happy
# This fixes a peculiarity when importing via __import__ - as we are
# initialising the pip module, "from pip import cmdoptions" is recursive
# and appears not to work properly in that situation.
import pip.cmdoptions
cmdoptions = pip.cmdoptions
# The version as used in the setup.py and the docs conf.py
__version__ = "8.1.0.dev0"
logger = logging.getLogger(__name__)
# Hide the InsecureRequestWArning from urllib3
warnings.filterwarnings("ignore", category=InsecureRequestWarning)
def autocomplete():
"""Command and option completion for the main option parser (and options)
and its subcommands (and options).
Enable by sourcing one of the completion shell scripts (bash or zsh).
"""
# Don't complete if user hasn't sourced bash_completion file.
if 'PIP_AUTO_COMPLETE' not in os.environ:
return
cwords = os.environ['COMP_WORDS'].split()[1:]
cword = int(os.environ['COMP_CWORD'])
try:
current = cwords[cword - 1]
except IndexError:
current = ''
subcommands = [cmd for cmd, summary in get_summaries()]
options = []
# subcommand
try:
subcommand_name = [w for w in cwords if w in subcommands][0]
except IndexError:
subcommand_name = None
parser = create_main_parser()
# subcommand options
if subcommand_name:
# special case: 'help' subcommand has no options
if subcommand_name == 'help':
sys.exit(1)
# special case: list locally installed dists for uninstall command
if subcommand_name == 'uninstall' and not current.startswith('-'):
installed = []
lc = current.lower()
for dist in get_installed_distributions(local_only=True):
if dist.key.startswith(lc) and dist.key not in cwords[1:]:
installed.append(dist.key)
# if there are no dists installed, fall back to option completion
if installed:
for dist in installed:
print(dist)
sys.exit(1)
subcommand = commands_dict[subcommand_name]()
options += [(opt.get_opt_string(), opt.nargs)
for opt in subcommand.parser.option_list_all
if opt.help != optparse.SUPPRESS_HELP]
# filter out previously specified options from available options
prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]]
options = [(x, v) for (x, v) in options if x not in prev_opts]
# filter options by current input
options = [(k, v) for k, v in options if k.startswith(current)]
for option in options:
opt_label = option[0]
# append '=' to options which require args
if option[1]:
opt_label += '='
print(opt_label)
else:
# show main parser options only when necessary
if current.startswith('-') or current.startswith('--'):
opts = [i.option_list for i in parser.option_groups]
opts.append(parser.option_list)
opts = (o for it in opts for o in it)
subcommands += [i.get_opt_string() for i in opts
if i.help != optparse.SUPPRESS_HELP]
print(' '.join([x for x in subcommands if x.startswith(current)]))
sys.exit(1)
def create_main_parser():
parser_kw = {
'usage': '\n%prog <command> [options]',
'add_help_option': False,
'formatter': UpdatingDefaultsHelpFormatter(),
'name': 'global',
'prog': get_prog(),
}
parser = ConfigOptionParser(**parser_kw)
parser.disable_interspersed_args()
pip_pkg_dir = os.path.dirn | ame(os.path.dirname(os.path.abspath(__file__)))
parser.version = 'pip %s from %s (python %s)' % (
__version__, pip_pkg_dir, sys.version[:3])
# add the general options
gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, pa | rser)
parser.add_option_group(gen_opts)
parser.main = True # so the help formatter knows
# create command listing for description
command_summaries = get_summaries()
description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries]
parser.description = '\n'.join(description)
return parser
def parseopts(args):
parser = create_main_parser()
# Note: parser calls disable_interspersed_args(), so the result of this
# call is to split the initial args into the general options before the
# subcommand and everything else.
# For example:
# args: ['--timeout=5', 'install', '--user', 'INITools']
# general_options: ['--timeout==5']
# args_else: ['install', '--user', 'INITools']
general_options, args_else = parser.parse_args(args)
# --version
if general_options.version:
sys.stdout.write(parser.version)
sys.stdout.write(os.linesep)
sys.exit()
# pip || pip help -> print_help()
if not args_else or (args_else[0] == 'help' and len(args_else) == 1):
parser.print_help()
sys.exit()
# the subcommand name
cmd_name = args_else[0]
if cmd_name not in commands_dict:
guess = get_similar_commands(cmd_name)
msg = ['unknown command "%s"' % cmd_name]
if guess:
msg.append('maybe you meant "%s"' % guess)
raise CommandError(' - '.join(msg))
# all the args without the subcommand
cmd_args = args[:]
cmd_args.remove(cmd_name)
return cmd_name, cmd_args
def check_isolated(args):
isolated = False
if "--isolated" in args:
isolated = True
return isolated
def main(args=None):
if args is None:
args = sys.argv[1:]
# Configure our deprecation warnings to be sent through loggers
deprecation.install_warning_logger()
autocomplete()
try:
cmd_name, cmd_args = parseopts(args)
except PipError as exc:
sys.stderr.write("ERROR: %s" % exc)
sys.stderr.write(os.linesep)
sys.exit(1)
command = commands_dict[cmd_name](isolated=check_isolated(cmd_args))
return command.main(cmd_args)
# ###########################################################
# # Writing freeze files
class FrozenRequirement(object):
def __init__(self, name, req, editable, comments=()):
self.name = name
self.req = req
self.editable = editable
self.comments = comments
_rev_re = re.compile(r'-r(\d+)$')
_date_re = re.compile(r'-(20\d\d\d\d\d\d)$')
@classmethod
def from_dist(cls, dist, dependency_links):
location = os.path.normcase(os.path.abspath(dist.location))
comments = []
from pip.vcs import vcs, get_src_requirement
if dist_is_editable(dist) and vcs.get_backend_name(location):
editable = True
try:
req = get_src_requirement(dist, location)
except InstallationError as exc:
logger.warning(
"Error when trying to get requirement for VCS system %s, "
"falling back to uneditable format", exc
)
req = None
if req is None:
logger.warning(
'Could not determine repository location of %s', location
)
comments.append(
'## !! Could not determine repository location'
)
req = dist.as_requirement()
editable = Fa |
thomasbarillot/DAQ | HHGMonitor/ADQAPI_python/SDR14_Playlist_example.py | Python | mit | 8,460 | 0.012648 | #
# (C)opyright 2015 Signal Processing Devices Sweden AB
#
# This script showcases in Python
# - How to connect to ADQ devices in Python
# - Upload of waveforms to the SDR14
# - Using a playlist on the SDR14
# - How to setup an acquisition of data
# - How to read data by GetData API in Python
# - How to plot data in Python
#
# Note: The example is intended to use the SDR14 device connected in loopback mode (i.e. connect DAC output to ADC input)
import numpy as np
import ctypes as ct
import matplotlib.pyplot as plt
def set_playlist( adq_cu, adq_num, dac_id, tcstr ):
tc = {}
if (tcstr == 'basic1'):
ns = 2 # Number of items
tc["ns"] = ns
# | 1 2 3 4 5 6 | 7 8 9
tc["index"] = (ct.c_uint32 * ns)( 1, 2)
tc["segment"] = (ct.c_uint32 * ns)( 1, 2)
tc["next"] = (ct.c_uint32 * ns)( 2, 1)
tc["wrap"] = (ct.c_uint32 * ns)( 4, 3)
tc["ulsign"] = (ct.c_uint32 * ns)( 0, 0)
tc["trigtype"] = (ct.c_uint32 * ns)( 1, 1)
tc["triglength"] = (ct.c_uint32 * ns)( 50, 50)
tc["trigpolarity"]=(ct.c_uint32 * ns)( 0, 0)
tc["trigsample"]= (ct.c_uint32 * ns)( 1, 1)
tc["writemask"]= (ct.c_uint32 * ns)( 15, 15)
# Transfer playlist to device
ADQAPI.ADQ_AWGWritePlaylist( adq_cu, adq_num, dac_id, tc['ns'], ct.byref(tc['index']), ct.byref(tc['writemask']), ct.byref(tc['segment']), ct.byref(tc['wrap']), ct.byref(tc['next']), ct.byref(tc['trigtype']), ct.byref(tc['triglength']), ct.byref(tc['trigpolarity']), ct.byref(tc['trigsample']), ct.byref(tc['ulsign']) )
# Select the Playlist mode
ADQAPI.ADQ_AWGPlaylistMode( adq_cu, adq_num, dac_id, 1)
return tc
def lessen_to_14bits( databuf ):
for x in range(0,4096):
databuf[x] = databuf[x] & 0x3FFF;
return databuf
def define_and_upload_segments( adq_cu, adq_num, dac_id ):
# Setup target buffers for upload of data
number_of_data_segments = 3
data_length = 4096
data_buffers=(ct.POINTER(ct.c_int16*data_length)*number_of_data_segments)()
databuf = np.zeros((number_of_data_segments,data_length))
for bufp in data_buffers:
bufp.contents = (ct.c_int16*data_length)()
# Re-arrange data in numpy arrays
databuf = np.frombuffer(data_buffers[0].contents,dtype=np.int16)
#Create sawtooth
for x in range(0, 1024):
databuf[x] = x
databuf[x+1024] = 1024 - x
databuf[x+2048] = -x
databuf[x+2048+1024] = -1024 + x
databuf = lessen_to_14bits(databuf)
databuf = np.frombuffer(data_buffers[1].contents,dtype=np.int16)
#Create positive pulse
for x in range(0, 128):
databuf[x] = 1024+x
databuf[x+128] = 1300+x
databuf[x+256] = 1300+128-x
for x in range(384, 4096):
databuf[x] = 0
databuf = lessen_to_14bits(databuf)
#Create negative pulse (one level)
databuf = np.frombuffer(data_buffers[2].contents,dtype=np.int16)
for x in range(0, 256):
databuf[x] = -512
for x in range(256, 4096):
databuf[x] = 0
databuf = lessen_to_14bits(databuf)
length_np = (ct.c_uint32 * number_of_data_segments)(data_length, data_length, data_length)
segId_np = (ct.c_uint32 * number_of_data_segments)(1, 2, 3)
NofLaps_np = (ct.c_uint32 * number_of_data_segments)(3, 3, 3)
for idx,bufp in enumerate(data_buffers):
ADQAPI.ADQ_AWGSegmentMalloc( adq_cu, adq_num, dac_id, idx+1, length_np[idx], 0)
ADQAPI.ADQ_AWGWriteSegments( adq_cu, adq_num, dac_id, number_of_data_segments, ct.byref(segId_np), ct.byref(NofLaps_np), ct.byref(length_np), data_buffers )
# Note: In playlist mode, all used segments must be in the enabled range, otherwise plaqyback will stop
ADQAPI.ADQ_AWGEnableSegments( adq_cu, adq_num, dac_id, number_of_data_segments )
return
# For Python under Linux (uncomment in Linux)
#ADQAPI = ct.cdll.LoadLibrary("libadq.so")
# For Python under Windows
ADQAPI = ct.cdll.LoadLibrary("ADQAPI.dll")
ADQAPI.ADQAPI_GetRevision()
# Manually set return type from some ADQAPI functions
ADQAPI.CreateADQControlUnit.restype = ct.c_void_p
ADQAPI.ADQ_GetRevision.restype = ct.c_void_p
ADQAPI.ADQ_GetPtrStream.restype = ct.POINTER(ct.c_int16)
ADQAPI.ADQControlUnit_FindDevices.argtypes = [ct.c_void_p]
# Create ADQControlUnit
adq_cu = ct.c_void_p(ADQAPI.CreateADQControlUnit())
ADQAPI.ADQControlUnit_EnableErrorTrace(adq_cu, 3, '.')
adq_num = 1
dac_id = 1
bypass_analog = 1
# Convenience function
def adq_status(status):
if (status==0):
return 'FAILURE'
else:
return 'OK'
# Find ADQ devices
ADQAPI.ADQControlUnit_FindDevices(adq_cu)
n_of_ADQ = ADQAPI.ADQControlUnit_NofADQ(adq_cu)
print('Number of ADQ found: {}'.format(n_of_ADQ))
if n_of_ADQ > 0:
# Get revision info from ADQ
rev = ADQAPI.ADQ_GetRevision(adq_cu, adq_num)
revision = ct.cast(rev,ct.POINTER(ct.c_int))
print('\nConnected to ADQ #1')
# Print revision information
print('FPGA Revision: {}'.format(revision[0]))
if (revision[1]):
print('Local copy')
else :
print('SVN Managed')
if (revision[2]):
print('Mixed Revision')
else :
print('SVN Updated')
print('')
# Choose whether to bypass_analog
ADQAPI.ADQ_WriteRegister(adq_cu, adq_num, 10240, 0, 2*bypass_analog);
# Upload data to SDR14
define_and_upload_segments(adq_cu, adq_num, dac_id)
set_playlist(adq_cu, adq_num, dac_id, 'basic1')
ADQAPI.ADQ_AWGAutoRearm(adq_cu, adq_num, dac_id, 1)
ADQAPI.ADQ_AWGContinuous(adq_cu, adq_num, dac_id, 0)
ADQAPI.ADQ_AWGSetTriggerEnable(adq_cu, adq_num, 31)
ADQAPI.ADQ_AWGArm(adq_cu, adq_num, dac_id)
#ADQAPI.ADQ_AWGTrig(adq_cu, adq_num, dac_id)
# Set clock source
ADQ_CLOCK_INT_INTREF = 0
ADQAPI.ADQ_SetClockSource(adq_cu, adq_num, ADQ_CLOCK_INT_INTREF);
# Set trig mode
SW_TRIG = 1
EXT_TRIG_1 = 2
EXT_TRIG_2 = 7
EXT_TRIG_3 = 8
LVL_TRIG = 3
INT_TRIG = 4
LVL_FALLING = 0
LVL_RISING = 1
trigger = SW_TRIG
success = ADQAPI.ADQ_SetTriggerMode(adq_cu, adq_num, trigger)
if (success == 0):
print('ADQ_SetTriggerMode failed.')
number_of_records = 1
samples_per_record = 65536
# Start acquisition
ADQAPI.ADQ_MultiRecordSetup(adq_cu, adq_num,
number_of_records,
samples_per_record)
ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)
ADQAPI.ADQ_ArmTrigger(adq_cu, adq_num)
while(ADQAPI.ADQ_GetAcquiredAll(adq_cu,adq_num) == 0):
if (trigger == SW_TRIG):
ADQAPI.ADQ_SWTrig(adq_cu, adq_num)
print('Waiting for trigger')
# Setup target buffers for data
max_number_of_channels = 2
target_buffers=(ct.POINTER(ct.c_int16*samples_per_record*number_of_records)*max_number_of_channels)()
for bufp in target_buffers:
bufp.contents = (ct.c_int16*samples_per_record*number_of_records)()
# Get data from ADQ
ADQ_TRANSFER_MODE_NORMAL = 0
ADQ_CHANNELS_MASK = 0x3
status = ADQAPI.ADQ_GetData(adq_cu, adq_num, target_buffers,
samples_per_record*number_of_records, 2,
0, number_of_records, ADQ_CHANNELS_MASK,
0, samples_per_record, ADQ_TRANSFER_MODE_NORMAL);
print('ADQ_GetData returned {}'.format(adq_status(status)))
# Re-arrange data in numpy arrays
data_16bit_ch0 = np.frombuffer(target_buffers[0].contents[0],dtype=np.int16)
data_16bit_ch1 = np.frombuffer(target_buffers[1].contents[0],dtype=np.int16)
# Plot data
if True:
plt.figure(1)
plt.clf()
plt.plot(data_16bit_ch0, '.-')
plt.plot(data_16bit_ch1, '.--')
plt.show()
# Only disarm trigger after data is collected
ADQAPI.ADQ_DisarmTrigger(adq_cu, adq_num)
ADQ |
stormeyes/moli | tests/testClient.py | Python | apache-2.0 | 210 | 0.004762 | from moli.client import Client
c = Client('http://127.0.0.1:8013/')
@c.on('ss')
def user_event(conne | ction | ):
pass
#print('ss event is trigger', connection)
c.emit('mybaby', 'aaaa')
c.run_forever()
|
patrickwestphal/owlapy | owlapy/model/owlobjecthasself.py | Python | gpl-3.0 | 863 | 0 | from .owlclassexpressionvisitor import OWLClassExpressionVisitor, \
OWLClassExpressionVisitorEx
from .owlobjectvisitor import OWLObjectVisitor, OWLObjectVisitorEx
from .owlrestriction import OWLRestriction
from owlapy.util import accept_default, accept_default_ex
class OWLObjectHasSelf(OWLRestriction):
"""TODO; implement"""
def __init__(self, property):
"""
:param property: an owlapy.model.OWLObjectPropertyExpression object
"" | "
super().__init__(property)
self._accept_fn_for_visitor_cls[OWLClassExpressionVisitor] = \
| accept_default
self._accept_fn_for_visitor_cls[OWLClassExpressionVisitorEx] = \
accept_default_ex
self._accept_fn_for_visitor_cls[OWLObjectVisitor] = accept_default
self._accept_fn_for_visitor_cls[OWLObjectVisitorEx] = accept_default_ex
|
lscalendars/ccoal | src/vsop87/compute-vsop.py | Python | cc0-1.0 | 5,375 | 0.015814 | #!/usr/bin/python
# coding=utf-8
#
# Python vsop optimized file generator.
#
# Copyright (c) 2013 Elie Roux <elie.roux@telecom-bretagne.eu>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# This script converts files from ftp://ftp.imcce.fr/pub/ephem/planets/vsop87/
# into useable and configurable C files. It's been used to compile the files
# in tibastro, but won't be useful for average users. It is only provided for
# people who might need it for other purposes.
#
# This script should be called with following arguments:
# compute-vsop.py infile
# with infile the file you want to convert, coming from the official
# dataset (ftp://ftp.imcce.fr/pub/ephem/planets/vsop87/)
# it outputs the result on stdout, so you should redirect it.
import re
import sys
# regexp to match a normal line, to be searched or matched starting at 48th char
vsoplineregex = re.compile("\s?-?\d+\.\d+\s+-?\d+\.\d+\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)\s+(-?\d+\.\d+)")
# correspondance between file extensions and planet:
fecor = {"ear":"earth",
"jup":"jupiter",
"mar":"mars",
"mer":"mercury",
"nep":"neptune",
"sat":"saturn",
"ura":"uranus",
"ven":"venus",
"emb":"emb"}
def main():
if len(sys.argv) != 2:
print """This script should be used with exactly one argument which is the name of the
file you want to convert.
"""
exit(1)
try:
f = open(sys.argv[1])
# we get the planet it is for
planet = fecor[sys.argv[1].split(".")[-1]]
vsop_letter = sys.argv[1][6].lower()
fout = open("vsop87-%c-%s.c" % (vsop_letter, planet), "w")
lines = f.readlines()
previous_numbers = None
functionnumber = 0
fout.write("#include \"vsop.h\"\n")
for line in lines:
# First the case of a "normal" line
# we don't care about things before column 48
m = vsoplineregex.match(line,48)
if m:
if (previous_numbers):
fout.write(" twoops(_(%s), _(%s), _(%s),\n _(%s), _(%s), _(%s));\n" % (previous_numbers[0], previous_numbers[1], previous_numbers[2], m.group(1), m.group(2), m.group(3)))
previous_numbers = None
else:
previous_numbers = [m.group(1), m.group(2), m.group(3)]
#else, it should be a head line, the only interesting thing is the 42nd and 60th character:
elif len(line) > 60:
previousfunctionnumber = functionnumber
functionnumber = int(line[59])
variablenumber = int(line[41])
print "fun : %d, var : %d" % (functionnumber, variablenumber)
if (previous_numbers):
fout.write(" oneop( _(%s), _(%s), _(%s));\n" % (previous_numbers[0], previous_numbers[1], previous_numbers[2]))
if (functionnumber != 0 or variablenumber != 1):
fout.write(" end();\n}\n")
if (variablenumber != 1 and functionnumber == 0):
print_sum_function(previousfunctionnumber, variablenumber -1, planet, vsop_letter, fout)
fout.write("\ncoord_t vsop87_%c_%s_%d_%d (time_t t) {\n initialize();\n" % (vsop_letter, planet, variablenumber, functionnumber))
# if it's none of the two, we just ignore the line
if (previous_numbers):
fout.write(" oneop( _(%s), _(%s), _(%s));\n" % (previous_numbers[0], previous_numbers[1], previous_numbers[2]))
fout.write(" end();\n}\n")
print_sum_function(functionnumber, 3, planet, vsop_letter, fout)
f.close()
fout.close()
except IOError:
print "Error: cannot open file %s" % sys.argv[1]
def print_sum_function(factor, variable_number, planet, vsop_letter, fout):
common_str = "vsop87_%c_%s_%d" % (vsop_letter, planet, variable_number)
if factor == 5:
fout.writ | e("""
coord_t %s (time_t t) {
return ((((%s_5(t) *t + %s_4(t)) * t + %s_3(t)) *t + %s_2(t)) * t + %s_1(t)) *t + %s_0(t);
}
""" % (common_str, common_str, common_str, common_str, common_str, common_str, common_str))
elif factor == 4:
fout.write("""
coord_t %s (time_t t) {
return (((%s_4(t) * t + %s_3(t)) *t + %s_2(t)) * t + %s_1(t)) *t + %s_0(t);
}
""" % (common | _str, common_str, common_str, common_str, common_str, common_str))
elif factor == 3:
fout.write("""
coord_t %s (time_t t) {
return ((%s_3(t) *t + %s_2(t)) * t + %s_1(t)) *t + %s_0(t);
}
""" % (common_str, common_str, common_str, common_str, common_str))
else:
print "error!!! Factor %d not taken into account" % factor
exit(1)
main()
|
haifengkao/ReactiveCache | .ycm_extra_conf.py | Python | mit | 13,022 | 0.024113 | #!/usr/local/bin/python
import os
# import ycm_core
# return the filename in the path without extension
def findFileName(path, ext):
name = ''
for projFile in os.listdir(path):
# cocoapods will generate _Pods.xcodeproj as well
if projFile.endswith(ext) and not projFile.startswith('_Pods'):
name= projFile[:-len(ext):]
return name
# WARNING!! No / in the end
def DirectoryOfThisScript():
return os.path.dirname( os.path.abspath( __file__ ) )
def findProjectName(working_directory):
projectName = findFileName(working_directory, '.xcodeproj')
if len(projectName) <= 0:
# cocoapod projects
projectName = findFileName(working_directory, '.podspec')
return projectName
flags = [
# TODO: find the correct cache path automatically
'-D__IPHONE_OS_VERSION_MIN_REQUIRED=80000',
'-miphoneos-version-min=9.3',
'-arch', 'arm64',
'-fblocks',
'-fmodules',
'-fobjc-arc',
'-fobjc-exceptions',
'-fexceptions',
'-isystem',
'/Library/Developer/CommandLineTools/usr/include/c++/v1', # for c++ headers <string>, <iostream> definition
'-x',
'objective-c',
'-Wno-#pragma-messages',
'-Wno-#warnings',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/Library/Frameworks',
# '-F/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks',
# '-I/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneSimulator.platform/Developer/SDKs/iPhoneSimulator.sdk/System/Library/Frameworks/Foundation.framework/Headers',
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include',
# '-isystem', '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/include/c++/v1'
# '-I/Library/Developer/CommandLineTools/usr/include',
#custom definition, include subfolders
'-ProductFrameworkInclude', # include the framework in the products(in derivedData) folder
'-I./Example/'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
'-ISUB./Pod/Classes', # old cocoapods directory
'-ISUB./'+findProjectName(DirectoryOfThisScript()), # new cocoapods directory
# use headers in framework instead
#'-ISUB./Example/Pods', # new cocoapods directory
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/Kiwi/',
# '-include',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
'-isysroot', '/Applications/Xcode.app/Contents/Developer/Platforms/iPhoneOS.platform/Developer/SDKs/iPhoneOS.sdk'
# '-fencode-extended-block-signature', #libclang may report error on this
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include', # let IncludeClangInXCToolChain handle it
# include-pch will make YouCompleteMe show 'no errors founded'
# '-include-pch',
# './Example/Tests/Tests-Prefix.pch', # test project prefix header
# modules failed trials
# '-fmodule-implementation-of',
# '-fimplicit-module-maps',
# '-F/Users/Lono/Library/Developer/Xcode/DerivedData/Scrapio-dliwlpgcvwijijcdxarawwtrfuuh/Build/Products/Debug-iphonesimulator/CocoaLumberjack',
# '-Wnon-modular-include-in-framework-module',
]
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
# Set this to the absolute path to the folder (NOT the file!) containing the
# compile_commands.json file to use that instead of 'flags'. See here for
# more details: http://clang.llvm.org/docs/JSONCompilationDatabase.html
#
# You can get CMake to generate this file for you by adding:
# set( CMAKE_EXPORT_COMPILE_COMMANDS 1 )
# to your CMakeLists.txt file.
#
# Most projects will NOT need to set this to anything; you can just change the
# 'flags' list of compilation flags. Notice that YCM itself uses that approach.
compilation_database_folder = ''
# if os.path.exists( compilation_database_folder ):
# database = ycm_core.CompilationDatabase( compilation_database_folder )
# else:
# we don't use compilation database
database = None
SOURCE_EXTENSIONS = [ '.cpp', '.cxx', '.cc', '.c', '.m', '.mm' ]
def Subdirectories(directory):
res = []
for path, subdirs, files in os.walk(directory):
for name in subdirs:
item = os.path.join(path, name)
res.append(item)
return res
def sorted_ls(path):
mtime = lambda f: os.stat(os.path.join(path, f)).st_mtime
return list(sorted(os.listdir(path), key=mtime))
def IncludeClangInXCToolChain(flags, working_directory):
if not working_directory:
return list( flags )
new_flags = list(flags)
# '-I/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/7.0.2/include',
path = '/Applications/Xcode.app/Contents/Developer/Toolchains/XcodeDefault.xctoolchain/usr/lib/clang/'
clangPath = sorted_ls(path)[::-1] # newest file first
includePath = ''
if (len(clangPath) > 0):
includePath = os.path.join('', *[path, clangPath[0], 'include'])
new_flags.append('-I'+includePath)
return new_flags
def FindDerivedDataPath( derivedDataPath, projectName ):
simulatorPaths = ['Build/Intermediates/CodeCoverage/Products/Debug-iphonesimulator/', # if you enable CodeCoverage, the framework of test target will be put in coverage folder, strange
'Build/Products/Debug-iphonesimulator/']
# search ~/Library/Developer/Xcode/DerivedData/ to find <project_name>-dliwlpgcvwijijcdxarawwtrfuuh
derivedPath = sorted_ls(derivedDataPath)[::-1] # newest file first
for productPath in derivedPath:
if productPath.lower().startswith( projectName.lower() ):
for simulatorPath in simulatorPaths:
projectPath = os.path.join('', *[derivedDataPath, productPath, simulatorPath])
if (len(projectPath) > 0) and os.path.exists(projectPath):
return projectPath # the lastest product is what we want (really?)
return ''
def IncludeFlagsOfFrameworkHeaders( flags, working_directory ):
if not working_directory:
return flags
new_flags = []
path_flag = '-ProductFrameworkInclude'
derivedDataPath = os.path.expanduser('~/Library/Developer/Xcode/DerivedData/')
# find the project name
projectName = findProjectName(working_directory)
if len(projectName) <= 0:
return flags
# add all frameworks in the /Build/Products/Debug-iphonesimulator/xxx/xxx.framework
for flag in flags:
if not flag.startswith( path_flag ):
new_flags.append(flag)
continue
projectPath = FindDerivedDataPath( derivedDataPath, projectName )
if (len(projectPath) <= 0) or not os.path.exists(projectPath):
continue
# iterate through all frameworks folders /Debug-iphonesimulator/xxx/xxx.framework
for frameworkFolder in os.listdir(projectPath):
frameworkPath = os.path.join('', projectPath, frameworkFolder)
if not os.path.isdir(frameworkPath):
continue
# framwork folder '-F/Debug-iphonesimulator/<framework-name>'
# solve <Kiwi/KiwiConfigurations.h> not found problem
new_flags.append('-F'+frameworkPath)
# the framework name might be different than folder name
# we need to iterate all frameworks
for frameworkFile in os.listdir(frameworkPath):
if frameworkFile.endswith('framework'):
# include headers '-I/Debug-iphonesimulator/xxx/yyy.framework/Headers'
# allow you to use #import "Kiwi.h". NOT REQUIRED, but I am too lazy to change existing codes
new_flags.append('-I' + os.path.join('', frameworkPath, frameworkFile,'Headers'))
return new_flags
def IncludeFlagsOfSubdirectory( flags, working_directory ):
if not working_directory:
return list( flags )
new_flags = []
make_next_include_subdir = False
path_flags = [ '-ISUB']
for fla | g in flags:
# include the directory of flag as well
new_flag = [flag.replace('-ISUB', '-I')]
if make_next_include_subdir:
make_next_include_subdir = False
for sub | dir in Subdirectories(os.path.join(working_directory, flag)):
new_flag.append('-I')
new_flag.append(subdir)
for path_flag in path_flags:
if flag == path_flag:
make_next_include_subdir = True
break
|
achanda/flocker | admin/acceptance.py | Python | apache-2.0 | 32,426 | 0.000185 | # Copyright Hybrid Logic Ltd. See LICENSE file for details.
"""
Run the acceptance tests.
"""
import sys
import os
import yaml
import json
from pipes import quote as shell_quote
from tempfile import mkdtemp
from zope.interface import Interface, implementer
from characteristic import attributes
from eliot import add_destination, write_failure
from pyrsistent import pvector
from twisted.internet.error import ProcessTerminated
from twisted.python.usage import Options, UsageError
from twisted.python.filepath import FilePath
from twisted.internet.defer import inlineCallbacks, returnValue, succeed
from twisted.python.reflect import prefixedMethodNames
from effect import parallel
from effect.twisted import perform
from admin.vagrant import vagrant_version
from flocker.common.version import make_rpm_version
from flocker.provision import PackageSource, Variants, CLOUD_ | PROVIDERS
import flocker
from flocker.provision._ssh import (
run_remotely)
from flocker.provision._install impo | rt (
ManagedNode,
task_pull_docker_images,
uninstall_flocker,
install_flocker,
configure_cluster,
configure_zfs,
)
from flocker.provision._ca import Certificates
from flocker.provision._ssh._conch import make_dispatcher
from flocker.provision._common import Cluster
from flocker.acceptance.testtools import DatasetBackend
from flocker.testtools.cluster_utils import (
make_cluster_id, Providers, TestTypes
)
from .runner import run
def extend_environ(**kwargs):
"""
Return a copy of ``os.environ`` with some additional environment variables
added.
:param **kwargs: The enviroment variables to add.
:return dict: The new environment.
"""
env = os.environ.copy()
env.update(kwargs)
return env
def remove_known_host(reactor, hostname):
"""
Remove all keys belonging to hostname from a known_hosts file.
:param reactor: Reactor to use.
:param bytes hostname: Remove all keys belonging to this hostname from
known_hosts.
"""
return run(reactor, ['ssh-keygen', '-R', hostname])
def get_trial_environment(cluster):
"""
Return a dictionary of environment varibles describing a cluster for
accetpance testing.
:param Cluster cluster: Description of the cluster to get environment
variables for.
"""
return {
'FLOCKER_ACCEPTANCE_CONTROL_NODE': cluster.control_node.address,
'FLOCKER_ACCEPTANCE_NUM_AGENT_NODES': str(len(cluster.agent_nodes)),
'FLOCKER_ACCEPTANCE_VOLUME_BACKEND': cluster.dataset_backend.name,
'FLOCKER_ACCEPTANCE_API_CERTIFICATES_PATH':
cluster.certificates_path.path,
'FLOCKER_ACCEPTANCE_HOSTNAME_TO_PUBLIC_ADDRESS': json.dumps({
node.private_address: node.address
for node in cluster.agent_nodes
if node.private_address is not None
}),
}
def run_tests(reactor, cluster, trial_args):
"""
Run the acceptance tests.
:param Cluster cluster: The cluster to run acceptance tests against.
:param list trial_args: Arguments to pass to trial. If not
provided, defaults to ``['flocker.acceptance']``.
:return int: The exit-code of trial.
"""
if not trial_args:
trial_args = ['--rterrors', 'flocker.acceptance']
def check_result(f):
f.trap(ProcessTerminated)
if f.value.exitCode is not None:
return f.value.exitCode
else:
return f
return run(
reactor,
['trial'] + list(trial_args),
env=extend_environ(
**get_trial_environment(cluster)
)).addCallbacks(
callback=lambda _: 0,
errback=check_result,
)
class IClusterRunner(Interface):
"""
Interface for starting and stopping a cluster for acceptance testing.
"""
def start_cluster(reactor):
"""
Start cluster for running acceptance tests.
:param reactor: Reactor to use.
:return Deferred: Deferred which fires with a cluster to run
tests against.
"""
def stop_cluster(reactor):
"""
Stop the cluster started by `start_cluster`.
:param reactor: Reactor to use.
:return Deferred: Deferred which fires when the cluster has been
stopped.
"""
RUNNER_ATTRIBUTES = [
# Name of the distribution the nodes run - eg "ubuntu-14.04"
'distribution',
'top_level', 'config', 'package_source', 'variants',
# DatasetBackend named constant of the dataset backend the nodes use - eg
# DatasetBackend.zfs
'dataset_backend',
# dict giving configuration for the dataset backend the nodes use - eg
# {"pool": "flocker"}
'dataset_backend_configuration',
]
@implementer(IClusterRunner)
class ManagedRunner(object):
"""
An ``IClusterRunner`` implementation that doesn't start or stop nodes but
only gives out access to nodes that are already running and managed by
someone else.
:ivar pvector _nodes: The ``ManagedNode`` instances representing the nodes
that are already running that this object will pretend to start and
stop.
:ivar PackageSource package_source: The version of the software this object
will install on the nodes when it "starts" them.
:ivar NamedConstant dataset_backend: The ``DatasetBackend`` constant
representing the dataset backend that the nodes will be configured to
use when they are "started".
:ivar dict dataset_backend_configuration: The backend-specific
configuration the nodes will be given for their dataset backend.
"""
def __init__(self, node_addresses, package_source, distribution,
dataset_backend, dataset_backend_configuration):
self._nodes = pvector(
ManagedNode(address=address, distribution=distribution)
for address in node_addresses
)
self.package_source = package_source
self.dataset_backend = dataset_backend
self.dataset_backend_configuration = dataset_backend_configuration
def _upgrade_flocker(self, reactor, nodes, package_source):
"""
Put the version of Flocker indicated by ``package_source`` onto all of
the given nodes.
This takes a primitive approach of uninstalling the software and then
installing the new version instead of trying to take advantage of any
OS-level package upgrade support. Because it's easier. The package
removal step is allowed to fail in case the package is not installed
yet (other failures are not differentiated). The only action taken on
failure is that the failure is logged.
:param pvector nodes: The ``ManagedNode``\ s on which to upgrade the
software.
:param PackageSource package_source: The version of the software to
which to upgrade.
:return: A ``Deferred`` that fires when the software has been upgraded.
"""
dispatcher = make_dispatcher(reactor)
uninstalling = perform(dispatcher, uninstall_flocker(nodes))
uninstalling.addErrback(write_failure, logger=None)
def install(ignored):
return perform(
dispatcher,
install_flocker(nodes, package_source),
)
installing = uninstalling.addCallback(install)
return installing
def start_cluster(self, reactor):
"""
Don't start any nodes. Give back the addresses of the configured,
already-started nodes.
"""
if self.package_source is not None:
upgrading = self._upgrade_flocker(
reactor, self._nodes, self.package_source
)
else:
upgrading = succeed(None)
def configure(ignored):
return configured_cluster_for_nodes(
reactor,
generate_certificates(
make_cluster_id(
TestTypes.ACCEPTANCE,
_provider_for_cluster_id(self.dataset_backend),
),
self._nodes),
|
syl20bnr/nupic | nupic/test/temporal_memory_test_machine.py | Python | gpl-3.0 | 7,649 | 0.006929 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Utilities for running data through the TM, and analyzing the results.
"""
from prettytable import PrettyTable
class TemporalMemoryTestMachine(object):
"""
Base TM test machine class.
"""
def __init__(self, tm):
"""
@param tm (TM) Temporal memory
"""
# Save member variables
self.tm = tm
def feedSequence(self, sequence, learn=True):
"""
Feed a sequence through the TM.
@param sequence (list) List of patterns, with None for resets
@param learn (bool) Learning enabled
@return (list) List of sets containing predictive cells,
one for each element in `sequence`
"""
results = []
for pattern in sequence:
if pattern == None:
self.tm.reset()
else:
self.tm.compute(pattern, learn=learn)
results.append(self.tm.predictiveCells)
return results
def computeDetailedResults(self, results, sequence):
"""
Compute detailed results from results of `feedSequence`.
@param results (list) Results from `feedSequence`
@param sequence (list) Sequence that generated the results
@return (tuple) Contains:
`predictedActiveCellsList` (list),
`predictedInactiveCellsList` (list),
`predictedActiveColumnsList` (list),
`predictedInactiveColumnsList` (list),
`unpredictedActiveColumnsList` (list)
"""
predictedActiveCellsList = [set()]
predictedInactiveCellsList = [set()]
predictedActiveColumnsList = [set()]
predictedInactiveColumnsList = [set()]
unpredictedActiveColumnsList = [set()]
# TODO: Make sure the first row is accurate, not just empty
for i in xrange(1, len(results)):
pattern = sequence[i]
predictedActiveCells = set()
predictedInactiveCells = set()
predictedActiveColumns = set()
predictedInactiveColumns = set()
unpredictedActiveColumns = set()
if pattern != None:
prevPredictedCells = results[i-1]
for prevPredictedCell in prevPredictedCells:
prevPredictedColumn = self.tm.connections.columnForCell(
prevPredictedCell)
if prevPredictedColumn in pattern:
predictedActiveCells.add(prevPredictedCell)
predictedActiveColumns.add(prevPredictedColumn)
else:
predictedInactiveCells.add(prevPredictedCell)
predictedInactiveColumns.add(prevPredictedColumn)
unpredictedActiveColumns = pattern - predictedActiveColumns
predictedActiveCellsList.append(predictedActiveCells)
predictedInactiveCellsList.append(predictedInactiveCells)
predictedActiveColumnsList.append(predictedActiveColumns)
predictedInactiveColumnsList.append(predictedInactiveColumns)
unpredictedActiveColumnsList.append(unpredictedActiveColumns)
return (predictedActiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveColumnsList,
unpredictedActiveColumnsList)
@staticmethod
def prettyPrintDetailedResults(detailedResults,
sequence,
patternMachine,
verbosity=1):
"""
Pretty print the detailed results from `feedSequence`.
@param detailedResults (list) Detailed results from
`computeDetailedResults`
@param sequence (list) Sequence that generated the results
@param patternMachine (PatternMachine) Pattern machine
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
cols = ["Pattern",
"predicted active columns",
"predicted inactive columns",
"unpredicted active columns",
"# predicted active cells",
"# predicted inactive cells"]
if verbosity > 2:
cols += ["predicted active cells",
"predicted inactive cells"]
table = PrettyTable(cols)
(
predictedA | ctiveCellsList,
predictedInactiveCellsList,
predictedActiveColumnsList,
predictedInactiveCo | lumnsList,
unpredictedActiveColumnsList
) = detailedResults
for i in xrange(len(sequence)):
pattern = sequence[i]
if pattern == None:
row = ["<reset>", 0, 0, 0, 0, 0]
if verbosity > 2:
row += [0, 0]
else:
row = []
row.append(patternMachine.prettyPrintPattern(pattern,
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedActiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(predictedInactiveColumnsList[i],
verbosity=verbosity))
row.append(
patternMachine.prettyPrintPattern(unpredictedActiveColumnsList[i],
verbosity=verbosity))
row.append(len(predictedActiveCellsList[i]))
row.append(len(predictedInactiveCellsList[i]))
if verbosity > 2:
row.append(list(predictedActiveCellsList[i]))
row.append(list(predictedInactiveCellsList[i]))
table.add_row(row)
return table.get_string()
def prettyPrintConnections(self):
"""
Pretty print the connections in the temporal memory.
@param verbosity (int) Verbosity level
@return (string) Pretty-printed text
"""
tm = self.tm
text = ""
text += ("Segments: (format => "
"{segment: [(source cell, permanence), ...])\n")
text += "------------------------------------\n"
columns = range(tm.connections.numberOfColumns())
for column in columns:
cells = tm.connections.cellsForColumn(column)
for cell in cells:
segmentDict = dict()
for seg in tm.connections.segmentsForCell(cell):
synapseList = []
for synapse in tm.connections.synapsesForSegment(seg):
(_, sourceCell, permanence) = tm.connections.dataForSynapse(synapse)
synapseList.append([sourceCell,
permanence])
segmentDict[seg] = synapseList
text += ("Column {0} / Cell {1}:\t{2}\n".format(
column, cell, segmentDict))
if column < len(columns) - 1: # not last
text += "\n"
text += "------------------------------------\n"
return text
|
pisskidney/leetcode | medium/378.py | Python | mit | 745 | 0.010738 | #!/usr/bin/python
from heapq import heapify, heapreplace
class Solution(object):
def kthSmallest(self, matrix, k):
"""
:type matrix: List[List[int]]
:type k: int
:rtyp | e: int
"""
| if len(matrix) is 1:
return matrix[0][0]
z = zip(*matrix[1:])
h = [(matrix[0][i], z[i]) for i in xrange(len(matrix))]
heapify(h)
i = 0
while i < k - 1:
val, nextval = h[0]
if nextval:
heapreplace(h, (nextval[0], nextval[1:]))
else:
heappop(h)
i += 1
return h[0][0]
a = [[1,5,10], [4,5,11], [7,8,12]]
s = Solution()
print s.kthSmallest(a, 3)
|
martinghunt/Fastaq | pyfastaq/runners/count_sequences.py | Python | gpl-3.0 | 379 | 0.013193 | import argparse
from pyfastaq import tas | ks
def run(description):
parser = argparse.ArgumentParser(
description = 'Prints the number of sequences in input file to stdout',
usage = 'fastaq count_sequences <infile>')
parser.add_argument('infile', help='Name of input file')
options = parser.parse_args()
| print(tasks.count_sequences(options.infile))
|
dmlb2000/pacifica-archiveinterface | setup.py | Python | lgpl-3.0 | 1,765 | 0.001133 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Setup and install the archive interface with hpss."""
import sys
from os.path import isfile
from os import path
from setuptools.extension import Extension
from setuptools import setup, find_packages
HPSS = Extension(
'pacifica.archiveinterface.backends.hpss._hpssExtensions',
sources=[
'pacifica/archiveinterface/backends/hpss/hpssExtensions.c'
],
include_dirs=['/opt/hpss/include'],
library_dirs=['/opt/hpss/lib'],
libraries=['tirpc', 'hpsscs', 'hpss'],
extra_compile_args=['-DLINUX', '-DHPSS51', '-DLITTLEEND']
)
EXT_MODULES = []
if '--with-hpss' in sys.argv:
EXT_MODULES.append(HPSS)
sys.argv.remove('--with-hpss')
elif isfile('/opt/hpss/include/hpss_api.h'):
EXT_MODULES.append(HPSS)
if '--without-hpss' in sys.argv:
EXT_MODULES = []
sys.argv.remove('--without-hpss')
setup(
name='pacifica-archiveinterface',
use_scm_version=True,
setup_requires=['setuptools_scm'],
description='Pacifica Archive Interface',
url='https://github.com/pacifica/pacifica-archiveinterface/',
long_description=open(path.join(
path.abspath(path.dirname(__file__)),
| 'README.md')).read(),
long_description_content_type='text/markdown',
author='David Brown',
author_email='david.brown@pnnl.gov',
packages=find_packages(include='pacifica.*'),
namespace_packages=['pacifica'],
entry_points={
'console_scripts': [
'pacifica-archiveinterface=pacifica.archiveinterface.__main__:main',
'pacifica-archiveinterface-cmd=pacifica.archiveinterface.__main__:cmd'
], |
},
install_requires=[
'cherrypy',
'peewee>2',
'PyMySQL',
],
ext_modules=EXT_MODULES
)
|
390910131/Misago | misago/core/shortcuts.py | Python | gpl-2.0 | 736 | 0 | from django.shortcuts import * # noqa
def paginate(object_list, page, per_page, orphans=0,
allow_empty_first_page=True):
from django.http import Http404
from django.core.paginator import Paginator, EmptyPage
from misago.core.exceptions import ExplicitFirstPage
if page in (1, "1"):
raise ExplicitFirstPage()
elif not page:
page = 1
try:
return Paginator(
| object_list, per_page, orphans=orphans,
allow_empty_first_page=allow_empty_first_page).page(page)
except EmptyPage:
raise Http404()
def validate_slug(model, slug):
from misago.core.exceptions import OutdatedSlug
if model.slug != slug:
raise Outdate | dSlug(model)
|
pFernbach/hpp-rbprm-corba | script/scenarios/sandbox/dynamic/plateform_hrp2_interp_testTransition.py | Python | lgpl-3.0 | 7,681 | 0.021612 | from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.gepetto import Viewer
import time
from constraint_to_dae import *
from hpp.corbaserver.rbprm.rbprmstate import State,StateHelper
from display_tools import *
import plateform_hrp2_path as tp
import time
tPlanning = tp.tPlanning
packageName = "hrp2_14_description"
meshPackageName = "hrp2_14_description"
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "hrp2_14"
urdfSuffix = "_reduced"
srdfSuffix = ""
pId = tp.ps.numberPaths() -1
fullBody = FullBody ()
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
fullBody.setJointBounds ("base_joint_xyz", [-1,2, -0.5, 0.5, 0.5, 0.8])
fullBody.client.basic.robot.setDimensionExtraConfigSpace(tp.extraDof)
fullBody.client.basic.robot.setExtraConfigSpaceBounds([-1,1,-1,1,-0.5,0.5,0,0,0,0,0,0])
ps = tp.ProblemSolver( fullBody )
ps.client.problem.setParameter("aMax",tp.aMax)
ps.client.problem.setParameter("vMax",tp.vMax)
r = tp.Viewer (ps,viewerClient=tp.r.client,displayArrows = True, displayCoM = True)
q_init =[0., 0., 0.648702, 1.0, 0.0 , 0.0, 0.0,0.0, 0.0, 0.0, 0.0,0.261799388, 0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.261799388, -0.174532925, 0.0, -0.523598776, 0.0, 0.0, 0.17,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0.0, 0.0, -0.453785606, 0.872664626, -0.41887902, 0.0,0,0,0,0,0,0]; r (q_init)
q_ref = q_init[::]
fullBody.setCurrentConfig (q_init)
qfar=q_ref[::]
qfar[2] = -5
#~ AFTER loading obstacles
rLegId = 'hrp2_rleg_rom'
lLegId = 'hrp2_lleg_rom'
tStart = time.time()
rLeg = 'RLEG_JOINT0'
rLegOffset = [0,0,-0.0955]
rLegLimbOffset=[0,0,-0.035]#0.035
rLegNormal = [0,0,1]
rLegx = 0.09; rLegy = 0.05
#fullBody.addLimbDatabase("./db/hrp2_rleg_db.db",rLegId,"forward")
fullBody.addLimb(rLegId,rLeg,'',rLegOffset,rLegNormal, rLegx, rLegy, 100000, "fixedStep1", 0.01,"_6_DOF",limbOffset=rLegLimbOffset)
fullBody.runLimbSampleAnalysis(rLegId, "ReferenceConfiguration", True)
#fullBody.saveLimbDatabase(rLegId, "./db/hrp2_rleg_db.db")
lLeg = 'LLEG_JOINT0'
lLegOffset = [0,0,-0.0955]
lLegLimbOffset=[0,0,0.035]
lLegNormal = [0,0,1]
lLegx = 0.09; lLegy = 0.05
#fullBody.addLimbDatabase("./db/hrp2_lleg_db.db",lLegId,"forward")
fullBody.addLimb(lLegId,lLeg,'',lLegOffset,rLegNormal, lLegx, lLegy, 100000, "fixedStep1", 0.01,"_6_DOF",limbOffset=lLegLimbOffset)
fullBody.runLimbSampleAnalysis(lLegId, "ReferenceConfiguration", True)
#fullBody.saveLimbDatabase(lLegId, "./db/hrp2_lleg_db.db")
fullBody.setReferenceConfig (q_ref)
## Add arms (not used for contact) :
tGenerate = time.time() - tStart
print "generate databases in : "+str(tGenerate)+" s"
q_0 = fullBody.getCurrentConfig();
#~ fullBody.createOctreeBoxes(r.client.gui, 1, rarmId, q_0,)
configSize = fullBody.getConfigSize() -fullBody.client.basic.robot.getDimensionExtraConfigSpace()
q_init = fullBody.getCurrentConfig(); q_init[0:7] = tp.ps.configAtParam(pId,0.01)[0:7] # use this to get the correct orientation
q_goal = fullBody.getCurrentConfig(); q_goal[0:7] = tp.ps.configAtParam(pId,tp.ps.pathLength(pId))[0:7]
dir_init = tp.ps.configAtParam(pId,0.01)[tp.indexECS:tp.indexECS+3]
acc_init = tp.ps.configAtParam(pId,0.01)[tp.indexECS+3:tp.indexECS+6]
dir_goal = tp.ps.configAtParam(pId,tp.ps.pathLength(pId)-0.01)[tp.indexECS:tp.indexECS+3]
acc_goal = [0,0,0]
robTreshold = 3
# copy extraconfig for start and init configurations
q_init[configSize:configSize+3] = dir_init[::]
q_init[configSize+3:configSize+6] = acc_init[::]
q_goal[configSize:configSize+3] = dir_goal[::]
q_goal[configSize+3:configSize+6] = [0,0,0]
# FIXME : test
q_init[2] = q_ref[2]+0.011
q_goal[2] = q_ref[2]+0.011
fullBody.setStaticStability(True)
# Randomly generating a contact configuration at q_init
fullBody.setCurrentConfig (q_init)
r(q_init)
#q_init = fullBody.generateContacts(q_init,dir_init,acc_init,robTreshold)
r(q_init)
# Randomly generating a contact configuration at q_end
fullBody.setCurrentConfig (q_goal)
#q_goal = fullBody.generateContacts(q_goal, dir_goal,acc_goal,robTreshold)
r(q_goal)
# specifying the full body configurations as start and goal state of the problem
r.addLandmark('hrp2_14/BODY',0.3)
r(q_init)
fullBody.setStartState(q_init,[lLegId,rLegId])
fullBody.setEndState(q_goal,[lLegId,rLegId])
#p = fullBody.computeContactPointsAtState(init_sid)
#p = fullBody.computeContactPointsAtState(int_sid)
"""
q = q_init[::]
q[0] += 0.3
q = fullBody.generateContacts(q,dir_init,acc_init,robTreshold)
mid_sid = fullBody.addState(q,[lLegId,rLegId])
"""
from hpp.gepetto import PathPlayer
pp = PathPlayer (fullBody.client.basic, r)
import fullBodyPlayerHrp2
"""
tStart = time.time()
configsFull = fullBody.interpolate(0.001,pathId=pId,robustnessTreshold = 1, | filterStates = False)
tInterpolateConfigs = time.time() - tStart
print "number of configs :", len(configsFull)
"""
q_init[0] += 0.05
createSphere('s',r)
n = [0,0,1]
p = [0,0.1,0]
q_init[-6:-3] = [0.2,0,0]
q_goal[-6:-3] = [0.1,0,0]
sf = State(fullBody,q=q_goal,limbsIncontact=[lLegId,rLegId])
si = State(fullBody,q=q_init,limbsIncontact=[lLegId,rLegId])
n = [0.0, -0.42261828000211843, 0.9063077785212101]
p = [0.775, 0.22, -0.019]
moveSphere('s',r,p)
smid,success = StateHelper.addNewConta | ct(si,lLegId,p,n,100)
assert(success)
smid2,success = StateHelper.addNewContact(sf,lLegId,p,n,100)
assert(success)
r(smid.q())
sf2 = State(fullBody,q=q_goal,limbsIncontact=[lLegId,rLegId])
"""
fullBody.isDynamicallyReachableFromState(smid.sId,smid2.sId,True)
fullBody.isDynamicallyReachableFromState(smid.sId,smid2.sId,timings=[0.4,0.2,0.4])
fullBody.isDynamicallyReachableFromState(si.sId,smid.sId,timings=[0.8,0.6,0.8])
fullBody.isDynamicallyReachableFromState(smid2.sId,sf.sId,timings=[0.8,0.6,0.8])
import disp_bezier
pp.dt = 0.00001
disp_bezier.showPath(r,pp,pid)
x = [0.776624, 0.219798, 0.846351]
moveSphere('s',r,x)
displayBezierConstraints(r)
path = "/local/dev_hpp/screenBlender/iros2018/polytopes/platform/path"
for i in range(1,4):
r.client.gui.writeNodeFile('path_'+str(int(pid[i]))+'_root',path+str(i-1)+'.obj')
r.client.gui.writeNodeFile('s',path+'_S.stl')
"""
"""
com = fullBody.getCenterOfMass()
com[1] = 0
"""
"""
pids = []
pids += [fullBody.isDynamicallyReachableFromState(si.sId,smid.sId)]
pids += [fullBody.isDynamicallyReachableFromState(smid.sId,smid2.sId)]
pids += [fullBody.isDynamicallyReachableFromState(smid2.sId,sf2.sId)]
for pid in pids :
if pid > 0:
print "success"
#pp.displayPath(pid,color=r.color.blue)
#r.client.gui.setVisibility('path_'+str(pid)+'_root','ALWAYS_ON_TOP')
else:
print "fail."
"""
"""
n = [0,0,1]
p = [1.15,0.1,0]
moveSphere('s',r,p)
sE,success = StateHelper.addNewContact(si,lLegId,p,n)
assert(success)
p = [1.15,-0.1,0]
sfe, success = StateHelper.addNewContact(sE,rLegId,p,n)
assert(success)
pids = []
pids += [fullBody.isDynamicallyReachableFromState(si.sId,sE.sId)]
pids += [fullBody.isDynamicallyReachableFromState(sE.sId,sfe.sId)]
for pid in pids :
if pid > 0:
print "success"
pp.displayPath(pid,color=r.color.blue)
r.client.gui.setVisibility('path_'+str(pid)+'_root','ALWAYS_ON_TOP')
else:
print "fail."
"""
configs = []
configs += [si.q()]
configs += [smid.q()]
configs += [smid2.q()]
configs += [sf2.q()]
from planning.config import *
from generate_contact_sequence import *
beginState = si.sId
endState = sf2.sId
cs = generateContactSequence(fullBody,configs,beginState, endState,r)
filename = OUTPUT_DIR + "/" + OUTPUT_SEQUENCE_FILE
cs.saveAsXML(filename, "ContactSequence")
print "save contact sequence : ",filename
|
chrisxue815/leetcode_python | problems/test_0396.py | Python | unlicense | 723 | 0 | import unittest
import itertools
class Solution:
def maxRotateFunction(self, A):
"""
:type A: List[int]
:rtype: int
"""
if not A:
return 0
n = len(A)
sum_ = sum(A)
f = sum(i * num for i, num in enumerate(A))
max_ = f
for num in itertools.islice(A, n - 1):
f += n * num - sum_
if f > max_:
max_ = f
ret | urn max_
class Test(unittest.TestCase):
def test | (self):
self._test([4, 3, 2, 6], 26)
def _test(self, A, expected):
actual = Solution().maxRotateFunction(A)
self.assertEqual(expected, actual)
if __name__ == '__main__':
unittest.main()
|
khattori/kimpira | kimpira/main.py | Python | mit | 745 | 0.002685 | #!/usr/bin/env python
import optparse
import sys
from kimpira.core import Runtime
from kimpira.version import get_version
def parse_args():
usage = "usage: %prog [opti | ons] <jobfile>"
p = optparse.OptionParser(usage)
p.add_option('-V', '--version', action='store_true', dest='show_version', default=False, help='show version number and exit')
p.add_option('-t', '--task', dest='task_name', default=None, help='task name to run')
return p.parse_args()
def main():
options, args = parse_args()
if options.show_version:
print("Kimpira {0}".format(g | et_version()))
sys.exit(0)
if len(args) > 0:
Runtime().run(args[0], options.task_name, *args[1:])
if __name__ == '__main__':
main()
|
qilicun/python | python3/src/ode1/pop_exp_growth.py | Python | gpl-3.0 | 376 | 0.005319 | #!/usr/bin/env python
import numpy as np
import sys
try:
t1 = int(sys.argv[1])
except:
print "usage:", sys.argv[0], "n (number of years)"
sys.exit(1)
t0 = 1750
u0 = 2
t = np.linspace(t0, t0 | + t1 + .5, t1)
u = np.zeros(t1 + 1)
a = 0.0218
u[0] = | u0
for i in range(len(u) - 1):
u[i+1] = (1 + a)*u[i]
print "Expected population in year %d is" %(t0 + t1), u[-1]
|
pozdnyakov/chromium-crosswalk | tools/telemetry/telemetry/core/chrome/form_based_credentials_backend_unittest_base.py | Python | bsd-3-clause | 4,429 | 0.011741 | # Copyright (c) 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by | a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import unittest
from telemetry.core import browser_finder
from telemetry.unittest import simple_mock
from telemetry.unittest import options_for_unittests
from telemetry.unittest import DisabledTest
_ = simple_mock.DONT_CARE
def _GetCredentialsPath():
# TODO: This shouldn't depend on tools/perf.
credentials_ | path = os.path.join(
os.path.dirname(__file__),
'..', '..', '..', '..', 'perf', 'data', 'credentials.json')
if not os.path.exists(credentials_path):
return None
return credentials_path
class FormBasedCredentialsBackendUnitTestBase(unittest.TestCase):
def setUp(self):
self._credentials_type = None
@DisabledTest
def testRealLoginIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
with browser_finder.FindBrowser(options).Create() as b:
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
ret = b.credentials.LoginNeeded(b.tabs[0], self._credentials_type)
self.assertTrue(ret)
@DisabledTest
def testRealLoginWithDontOverrideProfileIfPossible(self):
credentials_path = _GetCredentialsPath()
if not credentials_path:
logging.warning('Credentials file not found, skipping test.')
return
options = options_for_unittests.GetCopy()
# Login once to make sure our default profile is logged in.
with browser_finder.FindBrowser(options).Create() as b:
b.credentials.credentials_path = credentials_path
if not b.credentials.CanLogin(self._credentials_type):
return
tab = b.tabs[0]
# Should not be logged in, since this is a fresh credentials
# instance.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
# Log in.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure login was successful.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
# Reset state. Now the backend thinks we're logged out, even
# though we are logged in in our current browser session. This
# simulates the effects of running with --dont-override-profile.
b.credentials._ResetLoggedInState() # pylint: disable=W0212
# Make sure the backend thinks we're logged out.
self.assertFalse(b.credentials.IsLoggedIn(self._credentials_type))
self.assertTrue(b.credentials.CanLogin(self._credentials_type))
# Attempt to login again. This should detect that we've hit
# the 'logged in' page instead of the login form, and succeed
# instead of timing out.
ret = b.credentials.LoginNeeded(tab, self._credentials_type)
# Make sure our login attempt did in fact succeed and set the
# backend's internal state to 'logged in'.
self.assertTrue(ret)
self.assertTrue(b.credentials.IsLoggedIn(self._credentials_type))
def testLoginUsingMock(self):
raise NotImplementedError()
def _LoginUsingMock(self, backend, login_page_url, email_element_id,
password_element_id): # pylint: disable=R0201
tab = simple_mock.MockObject()
config = {'username': 'blah',
'password': 'blargh'}
tab.ExpectCall('Navigate', login_page_url)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(True)
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
tab.ExpectCall('WaitForDocumentReadyStateToBeInteractiveOrBetter')
def VerifyEmail(js):
assert email_element_id in js
assert 'blah' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyEmail)
def VerifyPw(js):
assert password_element_id in js
assert 'largh' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifyPw)
def VerifySubmit(js):
assert '.submit' in js
tab.ExpectCall('ExecuteJavaScript', _).WhenCalled(VerifySubmit)
# Checking for form still up.
tab.ExpectCall('EvaluateJavaScript', _).WillReturn(False)
backend.LoginNeeded(tab, config)
|
Caranarq/01_Dmine | 03_UsoDeSuelo/P0311/P0311.py | Python | gpl-3.0 | 8,216 | 0.005242 | # -*- coding: utf-8 -*-
"""
Started on Wed Sep 13 15:55:22 2017
@author: carlos.arana
Descripcion: Creación de dataset para el parámetro 0311 "Superficie Agricola"
Informacion disponible para 2015
"""
import pandas as pd
import numpy as np
import sys
# Librerias locales utilizadas
module_path = r'D:\PCCS\01_Dmine\Scripts'
if module_path not in sys.path:
sys.path.append(module_path)
from SUN.asignar_sun import asignar_sun
from SUN_integridad.SUN_integridad import SUN_integridad
from PCCS_variables.PCCS_variables import variables
from ParametroEstandar.ParametroEstandar import ParametroEstandar
from AsignarDimension.AsignarDimension import AsignarDimension
from DocumentarParametro.DocumentarParametro import DocumentarParametro
"""
Las librerias locales utilizadas renglones arriba se encuentran disponibles en las siguientes direcciones:
SCRIPT: | DISPONIBLE EN:
------ | ------
asignar_sun | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN
SUN_integridad | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/SUN_integridad
variables | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/PCCS_variables
ParametroEstandar | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/ParametroEstandar
AsignarDimension | https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/AsignarDimension
DocumentarParametro 1 https://github.com/INECC-PCCS/01_Dmine/tree/master/Scripts/DocumentarParametro
"""
# Documentacion del Parametro ---------------------------------------------------------------------------------------
# Descripciones del Parametro
ClaveParametro = 'P0311'
DescParam = 'Superficie Agricola'
UnidadesParam = 'Kilómetros Cuadrados'
NombreParametro = 'Superficie Agrícola'
TituloParametro = 'SF_AGRICOLA' # Para nombrar la columna del parametro
# Descripciones del proceso de Minería
DirFuente = r'D:\PCCS\01_Dmine\Datasets\BS02'
DSBase = '"BS02.xlsx", disponible en https://github.com/INECC-PCCS/01_Dmine/tree/master/Datasets/BS02'
NomDataset = r'Uso de Suelo y Vegetacion'
DescDataset = r'Datos por municipio de Superficie continental, vegetal, acuifera y urbana'
ContenidoHojaDatos = 'Datos de Superficie Agricola de municipios, etiquetados con clave SUN'
Notas = 'S/N'
DescVarIntegridad = 'La variable de integridad municipal para esta Dataset es binaria: \n' \
'1 = El municipio cuenta con informacion \n0 = El municipio no cuenta con información'
NomFuente = 'SIMBAD - Sistema Estatal y municipal de Base de Datos (INEGI)'
UrlFuente = 'http://sc.inegi.org.mx/cobdem/'
ActDatos = '2005'
DispTemp = '2005'
PeriodoAct = 'Anual'
DesagrMax = 'Municipal'
# Descripciones generadas desde la clave del parámetro
ClaveDimension = ClaveParametro[1:3]
NomDimension = AsignarDimension(ClaveDimension)['nombre']
DirDimension = ClaveDimension + "_" + AsignarDimension(ClaveDimension)['directorio']
RepoMina = 'https://github.com/INECC-PCCS/01_Dmine/tree/master/{}/{}'.format(DirDimension, ClaveParametro)
DirDestino = r'D:\PCCS\01_Dmine\{}'.format(ClaveDimension+"_"+AsignarDimension(ClaveDimension)['directorio'])
# Construccion del Parámetro -----------------------------------------------------------------------------------------
# Dataset Inicial
dataset = pd.read_excel(DirFuente + r'\BS02.xlsx', sheetname="DATOS", dtype={'CVE_MUN': str})
dataset.set_index('CVE_MUN', inplace=True)
# Elegir columna de Parametro y reconvertir a dataset
dataset = dataset['Agricultura']
proxy = pd.DataFrame()
proxy[TituloParametro] = dataset
dataset = proxy
# Calculo de Variable de Integridad.
faltantes = dataset[TituloParametro].isnull()
dataset['FALTANTES'] = faltantes
dataset['VAR_INTEGRIDAD'] = faltantes.apply(lambda x: int(not x))
variables_dataset = list(dataset)
# Consolidar datos por ciudad
dataset['CVE_MUN'] = dataset.index
variables_SUN = ['CVE_MUN', 'NOM_MUN', 'CVE_SUN', 'NOM_SUN', 'TIPO_SUN', 'NOM_ENT']
DatosLimpios = asignar_sun(dataset, vars=variables_SUN)
OrdenColumnas = (variables_SUN + variables_dataset)
DatosLimpios = DatosLimpios[OrdenColumnas] # Reordenar las columnas
# Revision de integridad
integridad_parametro = SUN_integridad(DatosLimpios)
info_completa = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 1) # Para generar grafico de integridad
info_sin_info = sum(integridad_parametro['INTEGRIDAD']['INTEGRIDAD'] == 0) # Para generar grafico de integridad
info_incomple = 135 - info_completa - info_sin_info # Para generar grafico de integridad
# Construccion del Parametro
param_dataset = DatosLimpios.set_index('CVE_SUN')
param_dataset['CVE_SUN'] = param_dataset.index
param = param_dataset.groupby(by='CVE_SUN').agg('sum')[TituloParametro] # Agregacion por clave SUN
intparam = param_dataset.groupby(by='CVE_SUN').agg('mean')['VAR_INTEGRIDAD'] # Integridad por ciudad
std_nomsun = param_dataset['CVE_SUN'].map(str)+' - '+param_dataset['NOM_SUN'] # Nombres estandar CVE_SUN + NOM_SUN
std_nomsun.drop_duplicates(keep='first', inplace=True)
Parametro = pd.DataFrame()
Parametro['CIUDAD'] = std_nomsun
Parametro[ClaveParametro] = param
Parametro['INTEGRIDAD'] = intparam
Parametro = Parametro.sort_index()
# Creacion de documentos de memoria del Parametro --------------------------------------------------------------------
# Lista de Variables
variables_locales = sorted(list(set(list(DatosLimpios) +
list(integridad_parametro['INTEGRIDAD']) +
list(integridad_parametro['EXISTENCIA']) +
list(Parametro))))
metavariables = variables(variables_locales)
# Metadatos
d_parametro = {
'DESCRIPCION DEL PARAMETRO': np.nan,
'Clave': ClaveParametro,
'Nombre del Parametro': NombreParametro,
'Descripcion del Parametro': DescParam,
'Unidades': UnidadesParam
}
d_hojas = {
'METADATOS': 'Descripciones y notas relativas al Dataset',
'PARAMETRO': 'Dataset resultado de la minería, agregado por clave del Sistema Urbano Nacional, '
'para utilizarse en la construcción de Indicadores',
'DATOS': ContenidoHojaDatos,
'INTEG | RIDAD': 'Revision de integridad de la información POR CLAVE DEL SUN. '
'Promedio de | VAR_INTEGRIDAD de los municipios que componen una ciudad. '
'Si no se tiene información para el municipio, VAR_INTEGRIDAD es igual a cero',
'EXISTENCIA': 'Revision de integridad de la información POR MUNICIPIO.',
' ': np.nan,
'DESCRIPCION DE VARIABLES': np.nan
}
d_mineria = {
' ': np.nan,
'DESCRIPCION DEL PROCESO DE MINERIA:': np.nan,
'Nombre del Dataset': NomDataset,
'Descripcion del dataset': DescDataset,
'Disponibilidad Temporal': DispTemp,
'Periodo de actualizacion': PeriodoAct,
'Nivel de Desagregacion': DesagrMax,
'Notas': Notas,
'Fuente': NomFuente,
'URL_Fuente': UrlFuente,
'Dataset base': DSBase,
'Repositorio de mineria': RepoMina,
'VAR_INTEGRIDAD': DescVarIntegridad,
' ': np.nan,
'HOJAS INCLUIDAS EN EL LIBRO': np.nan
}
descripcion_parametro = pd.DataFrame.from_dict(d_parametro, orient='index').rename(columns={0: 'DESCRIPCION'})
descripcion_mineria = pd.DataFrame.from_dict(d_mineria, orient='index').rename(columns={0: 'DESCRIPCION'})
descripcion_hojas = pd.DataFrame.from_dict(d_hojas, orient='index').rename(columns={0: 'DESCRIPCION'})
MetaParametro = descripcion_parametro.append(descripcion_mineria).append(descripcion_hojas).append(metavariables)
# Diccionario de Descripciones
DescParametro = {
'ClaveParametro': ClaveParametro,
'NombreParametro': NombreParametro,
'info_completa': info_completa,
'info_sin_info': info_sin_info,
'info_incomple': info_incomple,
'RutaSalida': DirDestino,
'Clave de Dimension': ClaveDimension,
'Nombre de Dimension': NomDimension,
'Titulo de Columna': TituloParametro,
'Actualizacion de datos': ActDatos
}
# Crear archivo de Excel y documentar parametro
ParametroEstandar(DescParametro, MetaParametro, Parametro, DatosLimpios, integridad_parametro)
DocumentarParametro(DescParametro, Met |
Brocade-OpenSource/OpenStack-DNRM-Neutron | neutron/plugins/cisco/db/nexus_models_v2.py | Python | apache-2.0 | 1,756 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012, Cisco Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# @author: Rohit Agarwalla, Cisco Systems, Inc.
from sqlalchemy import Column, Integer, String
from neutron.db import model_base
c | lass NexusPortBinding(model_base.BASEV2):
"""Represents a binding of VM's to nexus ports."""
__tablename__ = "nexusport_bindings"
id = Column(Integer, primary_key=True, autoincrement=True)
port_id = Column(String(255))
vlan_id = Column(Integer, nullable=False)
switch_ip = Column(String(255))
instance_id = Column(String(255))
def __init__(self, port_id, vlan | _id, switch_ip, instance_id):
self.port_id = port_id
self.vlan_id = vlan_id
self.switch_ip = switch_ip
self.instance_id = instance_id
def __repr__(self):
return "<NexusPortBinding (%s,%d, %s, %s)>" % \
(self.port_id, self.vlan_id, self.switch_ip, self.instance_id)
def __eq__(self, other):
return (
self.port_id == other.port_id and
self.vlan_id == other.vlan_id and
self.switch_ip == other.switch_ip and
self.instance_id == other.instance_id
)
|
aelse/MrHappy | botplugins/cmd_ping.py | Python | gpl-3.0 | 158 | 0 | from botplugin import BotPlugin
class CommandPing(BotPlugin):
def command_ping(self, bot, command, args, n | ick):
| bot.speak('Pong!', paste=True)
|
att-comdev/drydock | tests/unit/test_reference_resolver.py | Python | apache-2.0 | 2,055 | 0 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Test the data reference resolver."""
import base64
import responses
from drydock_provisioner.statemgmt.design.resolver import ReferenceResolver
class TestClass(object):
def test_resolve_file_url(self, input_files):
"""Test that the resolver will resolve file URLs."""
input_file = input_files.join("fullsite.yaml")
url = 'file://%s' % str(input_file)
content = ReferenceResolver.resolve_reference(url)
assert len(content) > 0
@responses.activate
def test_resolve_http_url(self):
"""Test that the resolver will resolve http URLs."""
url = 'http://foo.com/test.yaml'
responses.add(responses.GET, url)
ReferenceResolver.resolve_reference(url)
assert len(responses.calls) == 1
assert responses.calls[0].request.url == url
@responses.activate
def test_resolve_http_basicauth_url(self):
"""Test the resolver will resolve http URLs w/ basic auth."""
| url = 'http://user:pass@foo.com/test.yaml'
auth_header = "Basic %s" % base64.b64encode(
"user:pass".encode('utf-8')).decode('utf-8')
responses.add(responses.GET, url)
ReferenceResolver.resolve_reference(url)
assert len(responses.calls) == 1
assert 'Authorization' in responses.calls[0].request.headers
| assert responses.calls[0].request.headers.get(
'Authorization') == auth_header
|
FakerKimg/sulley | process_monitor.py | Python | gpl-2.0 | 13,992 | 0.006075 | #!c:\\python\\python.exe
import subprocess
import threading
import getopt
import time
import sys
import os
import pydbg
import pydbg.defines
import utils
from sulley import pedrpc
PORT = 26002
ERR = lambda msg: sys.stderr.write("ERR> " + msg + "\n") or sys.exit(1)
USAGE = """USAGE: process_monitor.py
<-c|--crash_bin FILENAME> filename to serialize crash bin class to
[-p|--proc_name NAME] process name to search for and attach to
[-i|--ignore_pid PID] ignore this PID when searching for the target process
[-l|--log_level LEVEL] log level (default 1), increase for more verbosity
[--port PORT] TCP port to bind this agent to
"""
########################################################################################################################
class DebuggerThread (threading.Thread):
def __init__ (self, process_monitor, process, pid_to_ignore=None):
"""
Instantiate a new PyDbg instance and register user and access violation callbacks.
"""
threading.Thread.__init__(self)
self.process_monitor = process_monitor
self.proc_name = process
self.ignore_pid = pid_to_ignore
self.access_violation = False
self.active = True
self.dbg = pydbg.pydbg()
self.pid = None
# give this thread a unique name.
self.setName("%d" % time.time())
self.process_monitor.log("debugger thread initialized with UID: %s" % self.getName(), 5)
# set the user callback which is response for checking if this thread has been killed.
self.dbg.set_callback(pydbg.defines.USER_CALLBACK_DEBUG_EVENT, self.dbg_callback_user)
self.dbg.set_callback(pydbg.defines.EXCEPTION_ACCESS_VIOLATION, self.dbg_callback_access_violation)
def dbg_callback_access_violation (self, dbg):
"""
Ignore first chance exceptions. Record all unhandled exceptions to the process monitor crash bin and kill
the target process.
"""
# ignore first chance exceptions.
if dbg.dbg.u.Exception.dwFirstChance:
return pydbg.defines.DBG_EXCEPTION_NOT_HANDLED
# raise the access violation flag.
self.access_violation = True
# record the crash to the process monitor crash bin.
# include the test case number in the "extra" information block.
self.process_monitor.crash_bin.record_crash(dbg, self.process_monitor.test_number)
# save the the crash synopsis.
self.process_monitor.last_synopsis = self.process_monitor.crash_bin.crash_synopsis()
first_line = self.process_monitor.last_synopsis.split("\n")[0]
self.process_monitor.log("debugger thread-%s caught access violation: '%s'" % (self.getName(), first_line))
# this instance of pydbg should no longer be accessed, i want to know if it is.
self.process_monitor.crash_bin.pydbg = None
# kill the process.
dbg.terminate_process()
return pydbg.defines.DBG_CONTINUE
def dbg_callback_user (self, dbg):
"""
The user callback is run roughly every 100 milliseconds (WaitForDebugEvent() timeout from pydbg_core.py). Simply
check if the active flag was lowered and if so detach from the target process. The thread should then exit.
"""
if not self.active:
self.process_monitor.log("debugger thread-%s detaching" % self.getName(), 5)
dbg.detach()
return pydbg.defines.DBG_CONTINUE
def run (self):
"""
Main thread routine, called on thread.start(). Thread exits when this routine returns.
"""
self.process_monitor.log("debugger thread-%s looking for process name: %s" % (self.getName(), self.proc_name))
# watch for and try attaching to the process.
try:
self.watch()
self.dbg.attach(self.pid)
self.dbg.run()
self.process_monito | r.log("debugger thread-%s exiting" % self.getName())
except:
pass
# TODO: removing the following line appears to cause some concurrency issue | s.
del self.dbg
def watch (self):
"""
Continuously loop, watching for the target process. This routine "blocks" until the target process is found.
Update self.pid when found and return.
"""
while not self.pid:
for (pid, name) in self.dbg.enumerate_processes():
# ignore the optionally specified PID.
if pid == self.ignore_pid:
continue
if name.lower() == self.proc_name.lower():
self.pid = pid
break
self.process_monitor.log("debugger thread-%s found match on pid %d" % (self.getName(), self.pid))
########################################################################################################################
class ProcessMonitorPedrpcServer (pedrpc.server):
def __init__ (self, host, port, crash_filename, proc=None, pid_to_ignore=None, level=1):
"""
@type host: str
@param host: Hostname or IP address
@type port: int
@param port: Port to bind server to
@type crash_filename: str
@param crash_filename: Name of file to (un)serialize crash bin to/from
@type proc: str
@param proc: (Optional, def=None) Process name to search for and attach to
@type pid_to_ignore: int
@param pid_to_ignore: (Optional, def=None) Ignore this PID when searching for the target process
@type level: int
@param level: (Optional, def=1) Log output level, increase for more verbosity
"""
# initialize the PED-RPC server.
pedrpc.server.__init__(self, host, port)
self.crash_filename = crash_filename
self.proc_name = proc
self.ignore_pid = pid_to_ignore
self.log_level = level
self.stop_commands = []
self.start_commands = []
self.test_number = None
self.debugger_thread = None
self.crash_bin = utils.crash_binning.crash_binning()
self.last_synopsis = ""
if not os.access(os.path.dirname(self.crash_filename), os.X_OK):
self.log("invalid path specified for crash bin: %s" % self.crash_filename)
raise Exception
# restore any previously recorded crashes.
try:
self.crash_bin.import_file(self.crash_filename)
except:
pass
self.log("Process Monitor PED-RPC server initialized:")
self.log("\t crash file: %s" % self.crash_filename)
self.log("\t # records: %d" % len(self.crash_bin.bins))
self.log("\t proc name: %s" % self.proc_name)
self.log("\t log level: %d" % self.log_level)
self.log("awaiting requests...")
def alive (self):
"""
Returns True. Useful for PED-RPC clients who want to see if the PED-RPC connection is still alive.
"""
return True
def get_crash_synopsis (self):
"""
Return the last recorded crash synopsis.
@rtype: String
@return: Synopsis of last recorded crash.
"""
return self.last_synopsis
def get_bin_keys (self):
"""
Return the crash bin keys, ie: the unique list of exception addresses.
@rtype: List
@return: List of crash bin exception addresses (keys).
"""
return self.crash_bin.bins.keys()
def get_bin (self, binary):
"""
Return the crash entries from the specified bin or False if the bin key is invalid.
@type binary: Integer (DWORD)
@param binary: Crash bin key (ie: exceptio |
grandmasterchef/WhatManager2 | WhatManager2/management/commands/file_metadata_indexer.py | Python | mit | 1,499 | 0.004003 | from django.core.management.base import BaseCommand
from home.models import ReplicaSet, WhatTorrent, \
TransTorrent, WhatFileMetadataCache
class Command(BaseCommand):
help = u'Cache all .flac and .mp3 metadata in download locations.'
def handle(self, *args, **options):
masters = ReplicaSet.get_what_master().transinstance_set.all()
what_torrent_ids = WhatTorrent.objects.all().values_list('id', flat=True)
start = 0
page_size = 128
while start < len(what_torrent_ids):
print 'Updating objects {0}-{1}/{2}'.format(start, start + page_size,
len(what_torrent_ids))
bulk = WhatTorrent.objects.defer('torrent_file').in_bulk(
what_torrent_ids[start:start + pag | e_size])
start += page_size
trans_torrents = {
t.what_torrent_id: t for t in
TransTorrent.objects.filter(instance__in=masters, what_torrent__in=bulk.values())
}
for what_torrent in bulk.itervalues():
trans_ | torrent = trans_torrents.get(what_torrent.id)
if trans_torrent is not None and trans_torrent.torrent_done == 1:
try:
WhatFileMetadataCache.get_metadata_batch(what_torrent, trans_torrent, True)
except Exception as ex:
print 'Failed updating torrent {0}: {1}'.format(what_torrent.id, ex)
|
illing2005/django-airports-apis | airports/serializers.py | Python | mit | 433 | 0.027714 | from rest_framework import serializers |
from rest_framework import pagination
from .models import Airport
class AirportSerializer(serializers.ModelSerializer):
read_only_fields = ('id','name','city','country','country_code','iata','icao')
class Meta:
model = Airport
class PaginationAirportSerializer(pagination.PaginationSerializer):
class Meta:
o | bject_serializer_class = AirportSerializer |
deanishe/alfred-fakeum | src/libs/faker/providers/phone_number/pl_PL/__init__.py | Python | mit | 937 | 0 | from __future__ import unicode_literals
from .. import Provider as PhoneNumberProvider
class Provider(PhoneNumberProvider):
formats = (
# Mobile
# Government website: http://www.uke.gov.pl/numeracja-843
'50# ### ###',
'51# ### ###',
'53# ### ###',
'57# ### ###',
'60# ### ###',
'66# ### ###',
'69# ### ###',
'72# ### ###',
'73# ### ###',
'78# ### ###',
'79# ### ###',
'88# ### ###',
'+48 50# ### ###',
'+48 51# ### ###',
'+48 53# ### ###',
'+48 57# ### ###',
'+48 60# ### # | ##',
'+48 66# ### ###',
'+48 69# ### ###',
'+48 72# ### ###',
'+48 73# ### ###',
'+48 78# ### ###',
'+ | 48 79# ### ###',
'+48 88# ### ###',
'32 ### ## ##',
'+48 32 ### ## ##',
'22 ### ## ##',
'+48 22 ### ## ##',
)
|
tzpBingo/github-trending | codespace/python/telegram/ext/messagequeue.py | Python | mit | 14,773 | 0.004468 | #!/usr/bin/env python
#
# Module author:
# Tymofii A. Khodniev (thodnev) <thodnev@mail.ru>
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2022
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/]
"""A throughput-limiting message processor for Telegram bots."""
import functools
import queue as q
import threading
import time
import warnings
from typing import TYPE_CHECKING, Callable, List, NoReturn
from telegram.ext.utils.promise import Promise
from telegram.utils.deprecate import TelegramDeprecationWarning
if TYPE_CHECKING:
from telegram import Bot
# We need to count < 1s intervals, so the most accurate timer is needed
curtime = time.perf_counter
class DelayQueueError(RuntimeError):
"""Indicates processing errors."""
__slots__ = ()
class DelayQueue(threading.Thread):
"""
Processes callbacks from queue with specified throughput limits. Creates a separate thread to
process callbacks with delays.
.. deprecated:: 13.3
:class:`telegram.ext.DelayQueue` in its current form is deprecated and will be reinvented
in a future release. See `this thread <https://git.io/JtDbF>`_ for a list of known bugs.
Args:
queue (:obj:`Queue`, optional): Used to pass callbacks to thread. Creates ``Queue``
implicitly if not provided.
burst_limit (:obj:`int`, optional): Number of maximum callbacks to process per time-window
defined by :attr:`time_limit_ms`. Defaults to 30.
time_limit_ms (:obj:`int`, optional): Defines width of time-window used when each
processing limit is calculated. Defaults to 1000.
exc_route (:obj:`callable`, optional): A callable, accepting 1 positional argument; used to
route exceptions from processor thread to main thread; is called on `Exception`
subclass exceptions. If not provided, exceptions are routed through dummy handler,
which re-raises them.
autostart (:obj:`bool`, optional): If :obj:`True`, processor is started immediately after
object's creation; if :obj:`False`, should be started manually by `start` method.
Defaults to :obj:`True`.
name (:obj:`str`, optional): Thread's name. Defaults to ``'DelayQueue-N'``, where N is
sequential number of object created.
Attributes:
burst_limit (:obj:`int`): Number of maximum callbacks to process per time-window.
time_limit (:obj:`int`): Defines width of time-window used when each processing limit is
calculated.
exc_route (:obj:`callable`): A callable, accepting 1 positional argument; used to route
exceptions from processor thread to main thread;
name (:obj:`str`): Thread's name.
"""
_instcnt = 0 # instance counter
def __init__(
self,
queue: q.Queue = None,
burst_limit: int = 30,
time_limit_ms: int = 1000,
exc_route: Callable[[Exception], None] = None,
autostart: bool = True,
name: str = None,
):
warnings.warn(
'DelayQueue in its current form is deprecated and will be reinvented in a future '
'release. See https://git.io/JtDbF for a list of known bugs.',
category=TelegramDeprecationWarning,
)
self._queue = queue if queue is not None else q.Queue()
self.burst_limit = burst_limit
self.time_limit = time_limit_ms / 1000
self.exc_route = exc_route if exc_route is not None else self._default_exception_handler
self.__exit_req = False # flag to gently exit thread
self.__class__._instcnt += 1
if name is None:
name = f'{self.__class__.__name__}-{self.__class__._instcnt}'
super().__init__(name=name)
self.daemon = False
if autostart: # immediately start processing
super().start()
def run(self) -> None:
"""
Do not use the method except for unthreaded testing purposes, the method normally is
automatically called by autostart argument.
"""
times: List[float] = [] # used to store each callable processing time
while True:
item = self._queue.get()
if self.__exit_req:
return # shutdown thread
# delay routine
now = time.perf_counter()
t_delta = now - self.time_limit # calculate early to improve perf.
if times and t_delta > times[-1]:
# if last call was before the limit time-window
# used to impr. perf. in long-interval calls case
times = [now]
else:
# collect last in current limit time-window
times = [t for t in times if t >= t_delta]
times.append(now)
if len(times) >= self.burst_limit: # if throughput limit was hit
time.sleep(times[1] - t_delta)
# finally process one
try:
func, args, kwargs = item
func(*args, **kwargs)
except Exception as exc: # re-route any exceptions
self.exc_route(exc) # to prevent thread exit
def stop(self, timeout: float | = None) -> None:
"""Used to gently stop processor and shutdown its thread.
Args:
timeout (:obj:`float`): Indicates maximum time to wait for processor to stop and its
thread to exit. If tim | eout exceeds and processor has not stopped, method silently
returns. :attr:`is_alive` could be used afterwards to check the actual status.
``timeout`` set to :obj:`None`, blocks until processor is shut down.
Defaults to :obj:`None`.
"""
self.__exit_req = True # gently request
self._queue.put(None) # put something to unfreeze if frozen
super().join(timeout=timeout)
@staticmethod
def _default_exception_handler(exc: Exception) -> NoReturn:
"""
Dummy exception handler which re-raises exception in thread. Could be possibly overwritten
by subclasses.
"""
raise exc
def __call__(self, func: Callable, *args: object, **kwargs: object) -> None:
"""Used to process callbacks in throughput-limiting thread through queue.
Args:
func (:obj:`callable`): The actual function (or any callable) that is processed through
queue.
*args (:obj:`list`): Variable-length `func` arguments.
**kwargs (:obj:`dict`): Arbitrary keyword-arguments to `func`.
"""
if not self.is_alive() or self.__exit_req:
raise DelayQueueError('Could not process callback in stopped thread')
self._queue.put((func, args, kwargs))
# The most straightforward way to implement this is to use 2 sequential delay
# queues, like on classic delay chain schematics in electronics.
# So, message path is:
# msg --> group delay if group msg, else no delay --> normal msg delay --> out
# This way OS threading scheduler cares of timings accuracy.
# (see time.time, time.clock, time.perf_counter, time.sleep @ docs.python.org)
class MessageQueue:
"""
Implements callback processing with proper delays to avoid hitting Telegram's message limits.
Contains two ``DelayQueue``, for group and for all messages, interconnected in delay chain.
Callables are processed through *group* ``DelayQueue``, then through *all* ``DelayQueue`` for
|
headmastersquall/caatinga | caatinga/lscaat/diff.py | Python | gpl-3.0 | 2,568 | 0 | #!/usr/bin/env python
# Copyright 2015 Chris Taylor
#
# This file is part of caatinga.
#
# Caatinga is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Caatinga is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with caatinga. If not, see <http://www.gnu.org/licenses/>.
import os
import sys
import caatinga.core.functions as fn
from os.path import join
from difflib import Differ
def diff(args, settings):
"""
Main function for the diff option.
"""
_validateArgs(args)
wordArgs = fn.parseWordArgs(args)
_validateWordArgs(wordArgs)
home = fn.getBackupHome(settings.backupLocation, settings.hostName)
fn.insureBackupHomeExists(home)
backup = fn.getBackupOrLatest(wordArgs, home)
cwd = os.getcwd()
backupWd = fn.removeAltRoot(settings.root, cwd)
items = fn.expandGlob(home, backup, backupWd, wordArgs["glob"])
_validateItems(items)
localFile = _getLines(join(cwd, wordArgs["glob"]))
backupFile = _getLines(items[0])
diff = _getDiff()
sys.stdout.writelines(diff(backupFile, localFile))
def _validateArgs(args):
"""
Insure that only one file or glob pattern is passed as an arg.
"""
if len(args) == 0:
raise Exception("No file provided to diff.")
if len(args) > 1:
_raiseMultipleFileException()
def _validateWordArgs(wordArgs):
"""
Insure the word args that were provided are valid.
"""
if wordArgs["id"] == "all":
raise Exception("Cannot compare items from 'all' backups.")
def _validateItems(items):
"""
Make sure we | have proper number items to compare.
"""
if not items:
raise Exception("No items found to compare.")
if len(items) > 1:
_raiseMultipleFileException()
def _raiseMultipleFileException():
raise Exception("Cannot perform diff on more than one file.")
def _getLines(fileName):
"""
Read and return all the lines from the provided file.
"""
with open(fileName, 'U') as f:
return f.readlines()
def | _getDiff():
"""
Get the program that will be used to perform the diff.
"""
return Differ().compare
|
mfalesni/cfme_tests | cfme/tests/test_db_migrate.py | Python | gpl-2.0 | 8,770 | 0.002281 | import tempfile
import pytest
from os import path as os_path
from wait_for import wait_for
from cfme.base.ui import navigate_to
from cfme.utils import version, os
from cfme.utils.appliance import ApplianceException
from cfme.utils.blockers import BZ
from cfme.utils.conf import cfme_data, credentials
from cfme.utils.log import logger
from cfme.utils.repo_gen import process_url, build_file
from cfme.utils.version import get_stream
pytestmark = [pytest.mark.uncollectif(lambda appliance: appliance.is_dev, reason="rails server")]
def pytest_generate_tests(metafunc):
if metafunc.function in {test_upgrade_single_inplace, test_db_migrate_replication}:
return
argnames, argvalues, idlist = ['db_url', 'db_version', 'db_desc'], [], []
db_backups = cfme_data.get('db_backups', {})
if not db_backups:
pytest.skip('No db backup information available!')
for key, data in db_backups.iteritems():
# Once we can access the appliance in here, we can do
# if data.version >= appliance.version or \
# get_stream(data.version) == get_stream(appliance.version):
# continue
argvalues.append((data.url, data.version, data.desc))
idlist.append(key)
return metafunc.parametrize(argnames=argnames, argvalues=argvalues, ids=idlist)
@pytest.fixture(scope="module")
def temp_appliance_extended_db(temp_appliance_preconfig):
app = temp_appliance_preconfig
app.evmserverd.stop()
app.db.extend_partition()
app.start_evm_service()
return app
@pytest.fixture(scope="function")
def temp_appliance_remote(temp_appliance_preconfig_funcscope):
"""Needed for db_migrate_replication as you can't drop a remote db due to subscription"""
app = temp_appliance_preconfig_funcscope
app.evmserverd.stop()
app.db.extend_partition()
app.start_evm_service()
return app
@pytest.fixture(scope="function")
def temp_appliance_global_region(temp_appliance_unconfig_funcscope_rhevm):
temp_appliance_unconfig_funcscope_rhevm.appliance_console_cli.configure_appliance_internal(
99, 'localhost', credentials['database']['username'], credentials['database']['password'],
'vmdb_production', temp_appliance_unconfig_funcscope_rhevm.unpartitioned_disks[0])
temp_appliance_unconfig_funcscope_rhevm.wait_for_evm_service()
temp_appliance_unconfig_funcscope_rhevm.wait_for_web_ui()
return temp_appliance_unconfig_funcscope_rhevm
@pytest.yield_fixture(scope="function")
def appliance_preupdate(temp_appliance_preconfig_funcscope_upgrade, appliance):
"""Reconfigure appliance partitions and adds repo file for upgrade"""
update_url = ('update_url_' + ''.join([i for i in get_stream(appliance.version)
if i.isdigit()]))
temp_appliance_preconfig_funcscope_upgrade.db.extend_partition()
urls = process_url(cfme_data['basic_info'][update_url])
output = build_file(urls)
with tempfile.NamedTemporaryFile('w') as f:
f.write(output)
f.flush()
os.fsync(f.fileno())
temp_appliance_preconfig_funcscope_upgrade.ssh_client.put_file(
f.name, '/etc/yum.repos.d/update.repo')
return temp_appliance_preconfig_funcscope_upgrade
@pytest.mark.ignore_stream('5.5', 'upstream')
@pytest.mark.tier(2)
@pytest.mark.meta(
blockers=[BZ(1354466, unblock=lambda db_url: 'ldap' not in db_url)])
def test_db_migrate(temp_appliance_extended_db, db_url, db_version, db_desc):
app = temp_appliance_extended_db
# Download the database
logger.info("Downloading database: {}".format(db_desc))
url_basename = os_path.basename(db_url)
loc = "/tmp/"
rc, out = app.ssh_client.run_command(
'curl -o "{}{}" "{}"'.format(loc, url_basename, db_url), timeout=30)
assert rc == 0, "Failed to download database: {}".format(out)
# The v2_key is potentially here
v2key_url = os_path.join(os_path.dirname(db_url), "v2_key")
# Stop EVM service and drop vmdb_production DB
app.evmserverd.stop()
app.db.drop()
app.db.create()
# restore new DB
rc, out = app.ssh_client.run_command(
'pg_restore -v --dbname=vmdb_production {}{}'.format(loc, url_basename), timeout=600)
assert rc == 0, "Failed to restore new database: {}".format(out)
app.db.migrate()
# fetch v2_key
try:
rc, out = app.ssh_client.run_command(
'curl "{}"'.format(v2key_url), timeout=15)
assert rc == 0, "Failed to download v2_key: {}".format(out)
assert ":key:" in out, "Not a v2_key file: {}".format(out)
rc, out = app.ssh_client.run_command(
'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}"'.format(v2key_url), timeout=15)
assert rc == 0, "Failed to download v2_key: {}".format(out)
# or change all invalid (now unavailable) passwords to 'invalid'
except AssertionError:
app.db.fix_auth_key()
app.db.fix_auth_dbyml()
# start evmserverd, wait for web UI to start and try to log in
try:
app.start_evm_service()
except ApplianceException:
rc, out = app.ssh_client.run_rake_command("evm:start")
assert rc == 0, "Couldn't start evmserverd: {}".format(out)
app.wait_for_web_ui(timeout=600)
app.db.reset_user_pass()
wait_for(lambda: navigate_to(app.server, 'LoginScreen'), handle_exception=True)
app.server.login(app.user)
@pytest.mark.uncollectif(
lambda dbversion: dbversion == 'scvmm_58' and version.current_version() < "5.9" or
dbversion == 'ec2_5540' and version.current_version() < "5.9")
@pytest.mark.parametrize('dbversion', ['ec2_5540', 'azure_5620', 'rhev_57', 'scvmm_58'],
ids=['55', '56', '57', '58'])
def test_db_migrate_replication(temp_appliance_remote, dbversion, temp_appliance_global_region):
app = temp_appliance_remote
app2 = temp_appliance_global_region
# Download the database
logger.info("Downloading database: {}".format(dbversion))
db_url = cfme_data['db_backups'][dbversion]['url']
url_basename = os_path.basename(db_url)
rc, out = app.ssh_client.run_command(
'curl -o "/tmp/{}" "{}"'.format(url_basename, db_url), timeout=30)
assert rc == 0, "Failed to downlo | ad database: {}".format(out)
# The v2_key is potentially here
v2key_url | = os_path.join(os_path.dirname(db_url), "v2_key")
# Stop EVM service and drop vmdb_production DB
app.evmserverd.stop()
app.db.drop()
app.db.create()
# restore new DB and migrate it
rc, out = app.ssh_client.run_command(
'pg_restore -v --dbname=vmdb_production /tmp/{}'.format(url_basename), timeout=600)
assert rc == 0, "Failed to restore new database: {}".format(out)
app.db.migrate()
# fetch v2_key
try:
rc, out = app.ssh_client.run_command(
'curl "{}"'.format(v2key_url), timeout=15)
assert rc == 0, "Failed to download v2_key: {}".format(out)
assert ":key:" in out, "Not a v2_key file: {}".format(out)
rc, out = app.ssh_client.run_command(
'curl -o "/var/www/miq/vmdb/certs/v2_key" "{}"'.format(v2key_url), timeout=15)
assert rc == 0, "Failed to download v2_key: {}".format(out)
# or change all invalid (now unavailable) passwords to 'invalid'
except AssertionError:
app.db.fix_auth_key()
app.db.fix_auth_dbyml()
# start evmserverd, wait for web UI to start and try to log in
try:
app.start_evm_service()
except ApplianceException:
rc, out = app.ssh_client.run_rake_command("evm:start")
assert rc == 0, "Couldn't start evmserverd: {}".format(out)
app.wait_for_web_ui(timeout=600)
# Reset user's password, just in case (necessary for customer DBs)
app.db.reset_user_pass()
app.server.login(app.user)
app.set_pglogical_replication(replication_type=':remote')
app2.set_pglogical_replication(replication_type=':global')
app2.add_pglogical_replication_subscription(app.hostname)
def is_provider_replicated(app, app2):
return set(app.managed_provider_names) == set(app2.managed_provider_names)
wait_for(is_provider_replicated, func_args=[app, app2], timeout=30)
def test_upgrade_single_inplace(appl |
rail-berkeley/d4rl | scripts/generation/generate_kitchen_datasets.py | Python | apache-2.0 | 5,546 | 0.000902 | """Script for generating the datasets for kitchen environments."""
import d4rl.kitchen
import glob
import gym
import h5py
import numpy as np
import os
import pickle
np.set_printoptions(precision=2, suppress=True)
SAVE_DIRECTORY = '~/.offline_rl/datasets'
DEMOS_DIRECTORY = '~/relay-policy-learning/kitchen_demos_multitask'
DEMOS_SUBDIR_PATTERN = '*'
ENVIRONMENTS = ['kitchen_microwave_kettle_light_slider-v0',
'kitchen_microwave_kettle_bottomburner_light-v0']
# Uncomment lines below for "mini_kitchen_microwave_kettle_light_slider-v0'".
DEMOS_SUBDIR_PATTERN = '*microwave_kettle_switch_slide'
ENVIRONMENTS = ['mini_kitchen_microwave_kettle_light_slider-v0']
OBS_ELEMENT_INDICES = [
[11, 12], # Bottom burners.
[15, 16], # Top burners.
[17, 18], # Light switch.
[19], # Slide.
[20, 21], # Hinge.
[22], # Microwave.
[23, 24, 25, 26, 27, 28, 29], # Kettle.
]
FLAT_OBS_ELEMENT_INDICES = sum(OBS_ELEMENT_INDICES, [])
def _relabel_obs_with_goal(obs_array, goal):
obs_array[..., 30:] = goal
return obs_array
def _obs_array_to_obs_dict(obs_array, goal=None):
obs_dict = {
'qp': obs_array[:9],
'obj_qp': obs_array[9:30],
'goal': goal,
}
if obs_dict['goal'] is None:
obs_dict['goal'] = obs_array[30:]
return obs_dict
def main():
pattern = os.path.join(DEMOS_DIRECTORY, DEMOS_SUBDIR_PATTERN)
demo_subdirs = sorted(glob.glob(pattern))
print('Found %d demo subdirs.' % len(demo_subdirs))
all_demos = {}
for demo_subdir in demo_subdirs:
demo_files = glob.glob(os.path.join(demo_subdir, '*.pkl'))
print('Found %d demos in %s.' % (len(demo_files), demo_subdir))
demos = []
for demo_file in demo_files:
with open(demo_file, 'rb') as f:
demo = pickle.load(f)
demos.append(demo)
all_demos[demo_subdir] = demos
# For debugging...
all_observations = [demo['observations'] for demo in demos]
first_elements = [obs[0, FLAT_OBS_ELEMENT_INDICES]
for obs in all_observations]
last_elements = [obs[-1, FLAT_OBS_ELEMENT_INDICES]
for obs in all_observations]
# End for debugging.
for env_name in ENVIRONMENTS:
env = gym.make(env_name).unwrapped
env.REMOVE_TASKS_WHEN_COMPLETE = False # This enables a Markovian reward.
all_obs = []
all_actions = []
all_rewards = []
all_terminals = []
all_infos = []
print('Relabelling data for %s.' % env_name)
for demo_subdir, demos in all_demos.items():
print('On demo from %s.' % demo_subdir)
demos_obs = []
demos_actions = []
demos_rewards = []
demos_terminals = []
demos_infos = []
for idx, demo in enumerate(demos):
env_goal = env._get_task_goal()
rewards = []
relabelled_obs = _relabel_obs_with_goal(demo['observations'], env_goal)
for obs in relabelled_obs:
reward_dict, score = env._get_reward_n_score(
_obs_array_to_obs_dict(obs))
rewards.append(reward_dict['r_total'])
terminate_at = len(rewards)
rewards = rewards[:terminate_at]
demos_obs.a | ppend(relab | elled_obs[:terminate_at])
demos_actions.append(demo['actions'][:terminate_at])
demos_rewards.append(np.array(rewards))
demos_terminals.append(np.arange(len(rewards)) >= len(rewards) - 1)
demos_infos.append([idx] * len(rewards))
all_obs.append(np.concatenate(demos_obs))
all_actions.append(np.concatenate(demos_actions))
all_rewards.append(np.concatenate(demos_rewards))
all_terminals.append(np.concatenate(demos_terminals))
all_infos.append(np.concatenate(demos_infos))
episode_rewards = [np.sum(rewards) for rewards in demos_rewards]
last_rewards = [rewards[-1] for rewards in demos_rewards]
print('Avg episode rewards %f.' % np.mean(episode_rewards))
print('Avg last step rewards %f.' % np.mean(last_rewards))
dataset_obs = np.concatenate(all_obs).astype('float32')
dataset_actions = np.concatenate(all_actions).astype('float32')
dataset_rewards = np.concatenate(all_rewards).astype('float32')
dataset_terminals = np.concatenate(all_terminals).astype('float32')
dataset_infos = np.concatenate(all_infos)
dataset_size = len(dataset_obs)
assert dataset_size == len(dataset_actions)
assert dataset_size == len(dataset_rewards)
assert dataset_size == len(dataset_terminals)
assert dataset_size == len(dataset_infos)
dataset = {
'observations': dataset_obs,
'actions': dataset_actions,
'rewards': dataset_rewards,
'terminals': dataset_terminals,
'infos': dataset_infos,
}
print('Generated dataset with %d total steps.' % dataset_size)
save_filename = os.path.join(SAVE_DIRECTORY, '%s.hdf5' % env_name)
print('Saving dataset to %s.' % save_filename)
h5_dataset = h5py.File(save_filename, 'w')
for key in dataset:
h5_dataset.create_dataset(key, data=dataset[key], compression='gzip')
print('Done.')
if __name__ == '__main__':
main()
|
JavaRabbit/CS496_capstone | compute/autoscaler/demo/frontend_test.py | Python | apache-2.0 | 2,404 | 0 | # Copyright 2015, Google, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless | required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
import frontend
class FakeTime(object):
"""Fake implementations of GetUserCpuTime, GetUserCpuTime and BusyWait.
Each call to BusyWait advances both the cpu | and the wall clocks by fixed
intervals (cpu_time_step and wall_time_step, respectively). This can be
used to simulate arbitrary fraction of CPU time available to the process.
"""
def __init__(self, cpu_time_step=1.0, wall_time_step=1.0):
self.cpu_time = 0.0
self.wall_time = 0.0
self.cpu_time_step = cpu_time_step
self.wall_time_step = wall_time_step
def get_walltime(self):
return self.wall_time
def get_user_cputime(self):
return self.cpu_time
def busy_wait(self):
self.wall_time += self.wall_time_step
self.cpu_time += self.cpu_time_step
@pytest.fixture
def faketime():
return FakeTime()
@pytest.fixture
def cpuburner(faketime):
cpuburner = frontend.CpuBurner()
cpuburner.get_user_cputime = faketime.get_user_cputime
cpuburner.get_walltime = faketime.get_walltime
cpuburner.busy_wait = faketime.busy_wait
return cpuburner
# In this test scenario CPU time advances at 25% of the wall time speed.
# Given the request requires 1 CPU core second, we expect it to finish
# within the timeout (5 seconds) and return success.
def test_ok_response(faketime, cpuburner):
faketime.cpu_time_step = 0.25
(code, _) = cpuburner.handle_http_request()
assert code == 200
# In this test scenario CPU time advances at 15% of the wall time speed.
# Given the request requires 1 CPU core second, we expect it to timeout
# after 5 simulated wall time seconds and return error 500.
def test_timeout(faketime, cpuburner):
faketime.cpu_time_step = 0.15
(code, _) = cpuburner.handle_http_request()
assert code == 500
|
gileno/tapioca-github | tests/fixtures.py | Python | mit | 4,950 | 0.00303 | # -*- coding: utf-8 -*-
# flake8: noqa
import json
SINGLE_GIST_PAYLOAD = json.loads('''{
"url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b",
"forks_url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b/forks",
"commits_url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b/commits",
"id": "1383930894d29dde5015f24c0e279a2b",
"git_pull_url": "https://gist.github.com/1383930894d29dde5015f24c0e279a2b.git",
"git_push_url": "https://gist.github.com/1383930894d29dde5015f24c0e279a2b.git",
"html_url": "https://gist.github.com/1383930894d29dde5015f24c0e279a2b",
"files": {
"test-for-tapioca-github.md": {
"filename": "test-for-tapioca-github.md",
"type": "text/plain",
"language": "Markdown",
"raw_url": " | https://gist.githubusercontent.com/fjsj/1383930894d29dde5015f24c0e279a2b/raw/5f4a4c279359819437b0749a7e925216612e5cb8/test-for-tapioca-github.md",
"size": 9,
"truncated": false,
"content": "test\\ngist"
}
},
"public": true,
| "created_at": "2016-09-22T21:32:03Z",
"updated_at": "2016-09-22T21:39:19Z",
"description": "Test for gileno/tapioca-github",
"comments": 1,
"user": null,
"comments_url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b/comments",
"owner": {
"login": "fjsj",
"id": 397989,
"avatar_url": "https://avatars.githubusercontent.com/u/397989?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/fjsj",
"html_url": "https://github.com/fjsj",
"followers_url": "https://api.github.com/users/fjsj/followers",
"following_url": "https://api.github.com/users/fjsj/following{/other_user}",
"gists_url": "https://api.github.com/users/fjsj/gists{/gist_id}",
"starred_url": "https://api.github.com/users/fjsj/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fjsj/subscriptions",
"organizations_url": "https://api.github.com/users/fjsj/orgs",
"repos_url": "https://api.github.com/users/fjsj/repos",
"events_url": "https://api.github.com/users/fjsj/events{/privacy}",
"received_events_url": "https://api.github.com/users/fjsj/received_events",
"type": "User",
"site_admin": false
},
"forks": [
],
"history": [
{
"user": {
"login": "fjsj",
"id": 397989,
"avatar_url": "https://avatars.githubusercontent.com/u/397989?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/fjsj",
"html_url": "https://github.com/fjsj",
"followers_url": "https://api.github.com/users/fjsj/followers",
"following_url": "https://api.github.com/users/fjsj/following{/other_user}",
"gists_url": "https://api.github.com/users/fjsj/gists{/gist_id}",
"starred_url": "https://api.github.com/users/fjsj/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fjsj/subscriptions",
"organizations_url": "https://api.github.com/users/fjsj/orgs",
"repos_url": "https://api.github.com/users/fjsj/repos",
"events_url": "https://api.github.com/users/fjsj/events{/privacy}",
"received_events_url": "https://api.github.com/users/fjsj/received_events",
"type": "User",
"site_admin": false
},
"version": "531d5066564c530bb55572e44ae827db63cf9fdd",
"committed_at": "2016-09-22T21:32:11Z",
"change_status": {
},
"url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b/531d5066564c530bb55572e44ae827db63cf9fdd"
},
{
"user": {
"login": "fjsj",
"id": 397989,
"avatar_url": "https://avatars.githubusercontent.com/u/397989?v=3",
"gravatar_id": "",
"url": "https://api.github.com/users/fjsj",
"html_url": "https://github.com/fjsj",
"followers_url": "https://api.github.com/users/fjsj/followers",
"following_url": "https://api.github.com/users/fjsj/following{/other_user}",
"gists_url": "https://api.github.com/users/fjsj/gists{/gist_id}",
"starred_url": "https://api.github.com/users/fjsj/starred{/owner}{/repo}",
"subscriptions_url": "https://api.github.com/users/fjsj/subscriptions",
"organizations_url": "https://api.github.com/users/fjsj/orgs",
"repos_url": "https://api.github.com/users/fjsj/repos",
"events_url": "https://api.github.com/users/fjsj/events{/privacy}",
"received_events_url": "https://api.github.com/users/fjsj/received_events",
"type": "User",
"site_admin": false
},
"version": "742173b388427c1194e3872aa1b39d3ecb877798",
"committed_at": "2016-09-22T21:32:03Z",
"change_status": {
"total": 2,
"additions": 2,
"deletions": 0
},
"url": "https://api.github.com/gists/1383930894d29dde5015f24c0e279a2b/742173b388427c1194e3872aa1b39d3ecb877798"
}
],
"truncated": false
}''')
|
lucemia/django-p | django_p/migrations/0002_auto_20160529_1026.py | Python | mit | 549 | 0.001821 | # -*- coding: utf-8 -*-
# Generated by Django 1.9.4 on 2016-05-29 10:26
from __future__ impo | rt unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('dj | ango_p', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='barrier',
name='target',
field=models.OneToOneField(
on_delete=django.db.models.deletion.CASCADE, to='django_p.Pipeline'),
),
]
|
pombredanne/bokeh | examples/models/custom.py | Python | bsd-3-clause | 2,401 | 0.006247 | from __future__ import print_function
from bokeh.core.properties import String
from bokeh.document import Document
from bokeh.embed import file_html
from bokeh.models.callbacks import Callback
from bokeh.models.glyphs import Circle
from bokeh.models import Plot, DataRange1d, LinearAxis, ColumnDataSource, PanTool, WheelZoomTool, TapTool
from bokeh.models.layouts import HBox
from bokeh.resources import INLINE
from bokeh.util.browser import view
class Popup(Callback):
__implementation__ = """
_ = require "underscore"
Util = require "util/util"
Model = require "model"
p = require "core/properties"
class Popup extends Model
type: "Popup"
execute: (data_source) ->
for i in Util.get_indices(data_source)
message = Util.replace_placeholders(@get("message"), data_source, i)
window.alert(message)
null
@define {
message: [ p.String, "" ]
}
module.exports =
Model: Popup
"""
message = String("", help="""
Message to display in a popup window. This can be a template string,
| which will be formatted with data from the data source.
""")
class MyHBox(HBox):
__im | plementation__ = """
HBox = require "models/layouts/hbox"
class MyHBoxView extends HBox.View
render: () ->
super()
@$el.css({border: "5px solid black"})
class MyHBox extends HBox.Model
type: "MyHBox"
default_view: MyHBoxView
module.exports = {
Model: MyHBox
View: MyHBoxView
}
"""
source = ColumnDataSource(
data = dict(
x = [1, 2, 3, 4, 4, 5, 5],
y = [5, 4, 3, 2, 2.1, 1, 1.1],
color = ["rgb(0, 100, 120)", "green", "blue", "#2c7fb8", "#2c7fb8", "rgba(120, 230, 150, 0.5)", "rgba(120, 230, 150, 0.5)"]
)
)
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr)
circle = Circle(x="x", y="y", radius=0.2, fill_color="color", line_color="black")
circle_renderer = plot.add_glyph(source, circle)
plot.add_layout(LinearAxis(), 'below')
plot.add_layout(LinearAxis(), 'left')
tap = TapTool(renderers=[circle_renderer], callback=Popup(message="Selected color: @color"))
plot.add_tools(PanTool(), WheelZoomTool(), tap)
doc = Document()
doc.add_root(MyHBox(children=[plot]))
if __name__ == "__main__":
filename = "custom.html"
with open(filename, "w") as f:
f.write(file_html(doc, INLINE, "Demonstration of user-defined models"))
print("Wrote %s" % filename)
view(filename)
|
wesm/statsmodels | scikits/statsmodels/tsa/tests/test_ar.py | Python | bsd-3-clause | 7,036 | 0.007533 | """
Test AR Model
"""
import scikits.statsmodels.api as sm
from scikits.statsmodels.tsa.ar_model import AR
from numpy.testing import assert_almost_equal, assert_equal
from results import results_ar
import numpy as np
import numpy.testing as npt
DECIMAL_6 = 6
DECIMAL_5 = 5
DECIMAL_4 = 4
class CheckAR(object):
def test_params(self):
assert_almost_equal(self.res1.params, self.res2.params, DECIMAL_6)
def test_bse(self):
bse = np.sqrt(np.diag(self.res1.cov_params())) # no dof correction
# for compatability with Stata
assert_almost_equal(bse, self.res2.bse_stata, DECIMAL_6)
assert_almost_equal(self.res1.bse, self.res2.bse_gretl, DECIMAL_5)
def test_llf(self):
assert_almost_equal(self.res1.llf, self.res2.llf, DECIMAL_6)
def test_fpe(self):
assert_almost_equal(self.res1.fpe, self.res2.fpe, DECIMAL_6)
class TestAROLSConstant(CheckAR):
"""
Test AR fit by OLS with a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9, method='cmle')
cls.res2 = results_ar.ARResultsOLS(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
class TestAROLSNoConstant(CheckAR):
"""f
Test AR fit by OLS without a constant.
"""
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method='cmle',trend='nc')
cls.res2 = results_ar.ARResultsOLS(constant=False)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start0,
DECIMAL_4)
assert_almost_equal(model.predict(params),self.res2.FVOLSnneg1start9,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=100),
self.res2.FVOLSnneg1start100, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=200),
self.res2.FVOLSn200start0, DECIMAL_4)
assert_almost_equal(model.predict(params, start=200, end=400),
self.res2.FVOLSn200start200, DECIMAL_4)
#assert_almost_equal(model.predict(params, n=200,start=-109),
# self.res2.FVOLSn200startneg109, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308,end=424),
self.res2.FVOLSn100start325, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=310),
self.res2.FVOLSn301start9, DECIMAL_4)
assert_almost_equal(model.predict(params),
self.res2.FVOLSdefault, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=316),
self.res2.FVOLSn4start312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=327),
self.res2.FVOLSn15start312, DECIMAL_4)
#class TestARMLEConstant(CheckAR):
class TestARMLEConstant(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
cls.res1 = AR(data.endog).fit(maxlag=9,method="mle", disp=-1)
cls.res2 = results_ar.ARResultsMLE(constant=True)
def test_predict(self):
model = self.res1.model
params = self.res1.params
assert_almost_equal(model.predict(params), self.res2.FVMLEdefault,
DECIMAL_4)
assert_almost_equal(model.predict(params, start=9, end=308),
self.res2.FVMLEstart9end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=100, end=308),
self.res2.FVMLEstart100end308, DECIMAL_4)
assert_almost_equal(model.predict(params, start=0, end=200),
self.res2.FVMLEstart0end200, DECIMAL_4)
assert_almo | st_equal(model.predict(params, start=200, end=333),
self.res2.FVMLEstart200end334, DECIMAL_4)
assert_almost_equal(model.predict(params, start=308, end=333),
self.res2.FVMLEstart308end334, DECIMAL_4)
assert_almost_equal(model.predict(params, start=9,end=309),
self.res2.FVMLEstart9end309, DECIMAL_4)
assert_almost_equal(model.predict(params, end=301 | ),
self.res2.FVMLEstart0end301, DECIMAL_4)
assert_almost_equal(model.predict(params, start=4, end=312),
self.res2.FVMLEstart4end312, DECIMAL_4)
assert_almost_equal(model.predict(params, start=2, end=7),
self.res2.FVMLEstart2end7, DECIMAL_4)
class TestAutolagAR(object):
@classmethod
def setupClass(cls):
data = sm.datasets.sunspots.load()
endog = data.endog
results = []
for lag in range(1,16+1):
endog_tmp = endog[16-lag:]
r = AR(endog_tmp).fit(maxlag=lag)
results.append([r.aic, r.hqic, r.bic, r.fpe])
cls.res1 = np.asarray(results).T.reshape(4,-1, order='C')
cls.res2 = results_ar.ARLagResults("const").ic
def test_ic(self):
npt.assert_almost_equal(self.res1, self.res2, DECIMAL_6)
#TODO: likelihood for ARX model?
#class TestAutolagARX(object):
# def setup(self):
# data = sm.datasets.macrodata.load()
# endog = data.data.realgdp
# exog = data.data.realint
# results = []
# for lag in range(1, 26):
# endog_tmp = endog[26-lag:]
# exog_tmp = exog[26-lag:]
# r = AR(endog_tmp, exog_tmp).fit(maxlag=lag, trend='ct')
# results.append([r.aic, r.hqic, r.bic, r.fpe])
# self.res1 = np.asarray(results).T.reshape(4,-1, order='C')
|
alanconway/dispatch | console/dispatch-dashboard/dispatch/topology/urls.py | Python | apache-2.0 | 693 | 0 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless r | equired by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from djang | o.conf.urls import url
from dispatch.topology import views
urlpatterns = [
url(r'^$', views.IndexView.as_view(), name='index'),
]
|
rsgemignani/dotfiles | root/usr/lib/gedit/plugins/smart_highlight/smart_highlight.py | Python | gpl-2.0 | 9,699 | 0.0299 | # -*- encoding:utf-8 -*-
# smart_highlight.py is part of smart-highlighting-gedit.
#
#
# Copyright 2010-2012 swatch
#
# smart-highlighting-gedit is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
from gi.repository import Gtk, Gdk, Gedit
import re
import os.path
#import pango
import shutil
from . import config_manager
from .config_ui import ConfigUI
import gettext
APP_NAME = 'smart_highlight' #Same as module name defined at .plugin file.
CONFIG_DIR = os.path.expanduser('~/.local/share/gedit/plugins/' + APP_NAME + '/config')
#LOCALE_DIR = '/usr/share/locale'
LOCALE_DIR = os.path.join(os.path.dirname(__file__), 'locale')
print(LOCALE_DIR)
if not os.path.exists(LOCALE_DIR):
LOCALE_DIR = '/usr/share/locale'
print('locale')
try:
t = gettext.translation(APP_NAME, LOCALE_DIR)
_ = t.gettext
print('gettext')
except:
print('none')
pass
#gettext.install(APP_NAME, LOCALE_DIR, unicode=True)
ui_str = """<ui>
<menubar name="MenuBar">
<menu name="ToolsMenu" action="Tools">
<placeholder name="ToolsOps_0">
<separator/>
<menu name="SmartHighlightMenu" action="SmartHighlightMenu">
<placeholder name="SmartHighlightMenuHolder">
<menuitem name="smart_highlight_configure" action="smart_highlight_configure"/>
</placeholder>
</menu>
<separator/>
</placeholder>
</menu>
</menubar>
</ui>
"""
class SmartHighlightWindowHelper:
def __init__(self, plugin, window):
self._window = window
self._plugin = plugin
self.current_selection = ''
self.start_iter = None
self.end_iter = None
self.vadj_value = 0
views = self._window.get_views()
for view in views:
view.get_buffer().connect('mark-set', self.on_textbuffer_markset_event)
view.get_vadjustment().connect('value-changed', self.on_view_vadjustment_value_changed)
#view.connect('button-press-event', self.on_view_button_press_event)
self.active_tab_added_id = self._window.connect("tab-added", self.tab_added_action)
user_configfile = os.path.join(CONFIG_DIR, 'config.xml')
if not os.path.exists(user_configfile):
if not os.path.exists(os.path.dirname(user_configfile)):
os.makedirs(os.path.dirname(user_configfile))
shutil.copy2(os.path.dirname(__file__) + "/config/config.xml", os.path.dirname(user_configfile))
configfile = user_configfile
'''
user_configfile = os.path.join(os.path.expanduser('~/.local/share/gedit/plugins/' + 'smart_highlight'), 'config.xml')
if os.path.exists(user_configfile):
configfile = user_configfile
else:
configfile = os.path.join(os.path.dirname(__file__), "config.xml")
#'''
self.config_manager = config_manager.ConfigManager(configfile)
self.options = self.config_manager.load_configure('search_option')
self.config_manager.to_bool(self.options)
self.smart_highlight | = self.config_manager.load_configure('smart_highlight')
self._insert_menu()
def deactivate(self):
# Remove any installed menu items
self._window.disconnect(self.active_tab_added_id)
self.config_manager.update_config_file(self.config_manager.config_file, 'search_option', self.options)
| self.config_manager.update_config_file(self.config_manager.config_file, 'smart_highlight', self.smart_highlight)
def _insert_menu(self):
# Get the GtkUIManager
manager = self._window.get_ui_manager()
# Create a new action group
self._action_group = Gtk.ActionGroup("SmartHighlightActions")
self._action_group.add_actions( [("SmartHighlightMenu", None, _('Smart Highlighting'))] + \
[("smart_highlight_configure", None, _("Configuration"), None, _("Smart Highlighting Configure"), self.smart_highlight_configure)])
# Insert the action group
manager.insert_action_group(self._action_group, -1)
# Merge the UI
self._ui_id = manager.add_ui_from_string(ui_str)
def _remove_menu(self):
# Get the GtkUIManager
manager = self._window.get_ui_manager()
# Remove the ui
manager.remove_ui(self._ui_id)
# Remove the action group
manager.remove_action_group(self._action_group)
# Make sure the manager updates
manager.ensure_update()
def update_ui(self):
self._action_group.set_sensitive(self._window.get_active_document() != None)
'''
def show_message_dialog(self, text):
dlg = Gtk.MessageDialog(self._window,
Gtk.DialogFlags.MODAL | Gtk.DialogFlags.DESTROY_WITH_PARENT,
Gtk.MessageType.INFO,
Gtk.ButtonsType.CLOSE,
_(text))
dlg.run()
dlg.hide()
#'''
def create_regex(self, pattern, options):
if options['REGEX_SEARCH'] == False:
pattern = re.escape(str(r'%s' % pattern))
else:
pattern = str(r'%s' % pattern)
if options['MATCH_WHOLE_WORD'] == True:
pattern = r'\b%s\b' % pattern
if options['MATCH_CASE'] == True:
regex = re.compile(pattern, re.MULTILINE)
else:
regex = re.compile(pattern, re.IGNORECASE | re.MULTILINE)
return regex
def smart_highlighting_action(self, doc, search_pattern, iter, clear_flg = True):
regex = self.create_regex(search_pattern, self.options)
if clear_flg == True:
self.smart_highlight_off(doc)
self.vadj_value = self._window.get_active_view().get_vadjustment().get_value()
current_line = iter.get_line()
start_line = current_line - 50
end_line = current_line + 50
if start_line <= 0:
self.start_iter = doc.get_start_iter()
else:
self.start_iter = doc.get_iter_at_line(start_line)
if end_line < doc.get_line_count():
self.end_iter = doc.get_iter_at_line(end_line)
else:
self.end_iter = doc.get_end_iter()
text = str(doc.get_text(self.start_iter, self.end_iter, True))
match = regex.search(text)
while(match):
self.smart_highlight_on(doc, match.start()+self.start_iter.get_offset(), match.end() - match.start())
match = regex.search(text, match.end()+1)
def tab_added_action(self, action, tab):
view = tab.get_view()
view.get_buffer().connect('mark-set', self.on_textbuffer_markset_event)
view.get_vadjustment().connect('value-changed', self.on_view_vadjustment_value_changed)
#view.connect('button-press-event', self.on_view_button_press_event)
def on_textbuffer_markset_event(self, textbuffer, iter, textmark):
#print textmark.get_name()
if textmark.get_name() != 'selection_bound' and textmark.get_name() != 'insert':
return
if textbuffer.get_selection_bounds():
start, end = textbuffer.get_selection_bounds()
self.current_selection = textbuffer.get_text(start, end, True)
self.smart_highlighting_action(textbuffer, self.current_selection, iter)
else:
self.current_selection = ''
self.smart_highlight_off(textbuffer)
def smart_highlight_on(self, doc, highlight_start, highlight_len):
if doc.get_tag_table().lookup('smart_highlight') == None:
tag = doc.create_tag("smart_highlight", foreground=self.smart_highlight['FOREGROUND_COLOR'], background=self.smart_highlight['BACKGROUND_COLOR'])
doc.apply_tag_by_name('smart_highlight', doc.get_iter_at_offset(highlight_start), doc.get_iter_at_offset(highlight_start + highlight_len))
def smart_highlight_off(self, doc):
start, end = doc.get_bounds()
if doc.get_tag_table().lookup('smart_highlight') == None:
tag = doc.create_tag("smart_highlight", foreground=self.smart_highlight['FOREGROUND_COLOR'], background=self.smart_highlight['BACKGROUND_COLOR'])
doc.remove_tag_by_name('smart_highlight', start, end)
def smart_highlight_configure(self, action, data = None):
config_ui = ConfigUI(self._plugin)
def on_view_vadjustment_value_changed(self, object, data = None):
if self.current_selection == '':
return
if object. |
rodekruis/shelter-database | src/scripts/create_user.py | Python | mit | 386 | 0.005181 | #! /usr/bin/python
#-*- coding:utf-8 -*
import csv
from werkzeug import generate_password_hash |
from web.models import User
from bootstrap import db
def create_user(email, name, password):
user = User(email=email,
name=name,
pwdhash=generate_pa | ssword_hash(password),
is_active=True)
db.session.add(user)
db.session.commit()
|
olivierverdier/sfepy | tests/test_term_consistency.py | Python | bsd-3-clause | 4,677 | 0.01796 | # c: 19.05.2008, r: 19.05.2008
from sfepy import data_dir
filename_mesh = data_dir + '/meshes/2d/special/circle_in_square.mesh'
dim = 2
field_1 = {
'name' : 'scalar_field',
'dtype' : 'real',
'shape' : 'scalar',
'region' : 'Omega',
'approx_order' : 1,
}
field_2 = {
'name' : 'vector_field',
'dtype' : 'real',
'shape' : 'vector',
'region' : 'Omega',
'approx_order' : 1,
}
variables = {
'us' : ('unknown field', 'scalar_field', 0),
'ts' : ('test field', 'scalar_field', 'us'),
'ps1' : ('parameter field', 'scalar_field', 'us'),
'ps2' : ('parameter field', 'scalar_field', 'us'),
'uv' : ('unknown field', 'vector_field', 1),
'tv' : ('test field', 'vector_field', 'uv'),
'pv1' : ('parameter field', 'vector_field', 'uv'),
'pv2' : ('parameter field', 'vector_field', 'uv'),
}
regions = {
'Omega' : ('all', {}),
}
integral_1 = {
'name' : 'i1',
'kind' : 'v',
'quadrature' : 'gauss_o2_d2',
}
material_1 = {
'name' : 'm',
'function' : 'get_pars',
}
fe = {
'chunk_size' : 100
}
##
# c: 19.05.2008, r: 19.05.2008
def get_pars( ts, coor, mode=None, region=None, ig=None, term = None ):
if mode == 'qp':
n_nod, dim = coor.shape
sym = (dim + 1) * dim / 2
if term == 'biot':
val = nm.zeros( (sym, 1), dtype = nm.float64 )
val[:dim] = 0.132
val[dim:sym] = 0.092
elif term == 'biot_m':
val = 1.0 / nm.array( [3.8], dtype = nm.float64 )
elif term == 'permeability':
val = nm.eye( dim, dtype = nm.float64 )
else:
raise ValueError
return {'val' : nm.tile(val, (coor.shape[0], 1, 1))}
functions = {
'get_pars' : (get_pars,),
}
# (eval term prefix, parameter corresponding to test variable, 'd' variables,
# 'dw' variables (test must be paired with unknown, which should be at
# index 2!), mat mode)
test_terms = [
('%s_biot.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('pv1', 'ps1'), ('pv1', 'ts', 'us', 'uv', 'tv'), 'biot')),
('%s_biot.i1.Omega( m.val, %s, %s )',
('dw', 'pv1', ('pv1', 'ps1'), ('tv', 'ps1', 'uv', 'us', 'ts'), 'biot')),
('%s_diffusion.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('ps1', 'ps2'), ('ts', 'ps1', 'us'), 'permeability')),
('%s_volume_dot_w.i1.Omega( m.val, %s, %s )',
('dw', 'ps1', ('ps1', 'ps2'), ('ts', 'ps1', 'us'), 'biot_m')),
]
import numpy as nm
from sfepy.base.testing import TestCommon
from sfepy.base.base import debug, pause
##
# c: 19.05.2008
class Test( TestCommon ):
##
# c: 19.05.2008, r: 19.05.2008
def from_conf( conf, options ):
from sfepy.fem import ProblemDefinition
problem = ProblemDefinition.from_conf(conf, init_equations=False)
test = Test( problem = problem,
conf = conf, options = options )
return test
from_conf = staticmethod( from_conf )
##
# c: 19.05.2008, r: 19.05.2008
def test_consistency_d_dw( self ):
from sfepy.fem import Function, Variables
ok = True
pb = self.problem
for aux in tes | t_terms:
term_template, (prefix, par_name, d_vars, dw_vars, mat_mode) = aux
print term_template, prefix, par_name, d_vars, dw_vars, mat_mode
term1 = term_template % ((prefix,) + d_vars)
variables = Variables.from_conf(self.conf.variables, pb.fields)
for var_name in d_vars:
var = variabl | es[var_name]
n_dof = var.field.n_nod * var.field.shape[0]
aux = nm.arange( n_dof, dtype = nm.float64 )
var.data_from_data(aux)
pb.materials['m'].function.set_extra_args(term = mat_mode)
if prefix == 'd':
val1 = pb.evaluate(term1, var_dict=variables.as_dict())
else:
val1 = pb.evaluate(term1, call_mode='d_eval',
var_dict=variables.as_dict())
self.report( '%s: %s' % (term1, val1) )
term2 = term_template % (('dw',) + dw_vars[:2])
vec, vv = pb.evaluate(term2, mode='weak',
var_dict=variables.as_dict(),
ret_variables=True)
pvec = vv.get_state_part_view(vec, dw_vars[2])
val2 = nm.dot( variables[par_name](), pvec )
self.report( '%s: %s' % (term2, val2) )
err = nm.abs( val1 - val2 ) / nm.abs( val1 )
_ok = err < 1e-12
self.report( 'relative difference: %e -> %s' % (err, _ok) )
ok = ok and _ok
return ok
|
Beyond-Imagination/BlubBlub | ChatbotServer/ChatbotEnv/Lib/site-packages/jpype/_windows.py | Python | gpl-3.0 | 2,127 | 0.002821 | #**************************** | *************************************************
# Copyright 2004-2008 Steve Menard
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use thi | s file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#*****************************************************************************
from . import _jvmfinder
try:
import _winreg as winreg
except ImportError:
import winreg # in Py3, winreg has been moved
# ------------------------------------------------------------------------------
class WindowsJVMFinder(_jvmfinder.JVMFinder):
"""
Linux JVM library finder class
"""
def __init__(self):
"""
Sets up members
"""
# Call the parent constructor
_jvmfinder.JVMFinder.__init__(self)
# Library file name
self._libfile = "jvm.dll"
# Search methods
self._methods = (self._get_from_java_home,
self._get_from_registry)
def _get_from_registry(self):
"""
Retrieves the path to the default Java installation stored in the
Windows registry
:return: The path found in the registry, or None
"""
try :
jreKey = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE,
r"SOFTWARE\JavaSoft\Java Runtime Environment")
cv = winreg.QueryValueEx(jreKey, "CurrentVersion")
versionKey = winreg.OpenKey(jreKey, cv[0])
winreg.CloseKey(jreKey)
cv = winreg.QueryValueEx(versionKey, "RuntimeLib")
winreg.CloseKey(versionKey)
return cv[0]
except WindowsError:
return None
|
jjdmol/LOFAR | CEP/GSM/bremen/src/updater.py | Python | gpl-3.0 | 1,838 | 0 | #!/usr/bin/python
from src.sqllist import get_sql
_UPDATER_EXTRAS = {
'runningcatalog': ['runcatid'],
'runningcatalog_fluxes': ['runcat_id', 'band', 'stokes'],
}
def _refactor_update(sql):
"""
Special refactoring for MonetDB update..from imitation.
"""
def _get_extra_conditions(tabname):
return ' '.join(map(lambda x: 'and {0}.{1} = x.{1}'.format(tabname, x),
_UPDATER_EXTRAS[tabname]))
sqlupdate, sqlfrom = sql.strip().split('from', 1)
table, sqlupd_list = sqlupdate.split('set')
sqlupd_list = sqlupd_list.split(',')
table = table.split()[1]
if sqlfrom.endswith(';'):
sqlfrom = sqlfrom[:-1]
sqlfrom_split = sqlfrom.split('where', 1)
if len(sqlfrom_split) > 1:
[sqlfrom2, sqlwhere] = sqlfrom_split
sqlwhere = 'where %s' % sqlwhere
else:
sqlfrom2 = sqlfrom
sqlwhere = ''
for field in _UPDATER_EXTRAS[table]:
sqlwhere = sqlwhere.replace('%s.%s' % (table, fi | eld) | , 'x.%s' % field)
update_field = []
for sqlf in sqlupd_list:
field, update_stmt = sqlf.split('=')
update_field.append('%s = (select %s from %s x, %s %s %s)' % (
field, update_stmt.replace(table, 'x'),
table, sqlfrom2, sqlwhere,
_get_extra_conditions(table)))
result = []
for field in update_field:
result.append("""update %s set %s
where exists (select 1 from %s);""" % (table, field, sqlfrom))
return result
def run_update(conn, sql_name, *params):
"""
Run update on a given connection. Refactor it for MonetDB if needed.
"""
sql = get_sql(sql_name, *params)
if conn.is_monet():
conn.execute_set(_refactor_update(sql))
else:
conn.execute(sql)
|
mytliulei/DCNRobotInstallPackages | windows/win32/paramiko-1.14.0/paramiko/compress.py | Python | apache-2.0 | 1,245 | 0 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
Compression implementations for a Transport.
"""
import zlib
class ZlibCompressor (object):
def __init__(self):
self.z = zlib.compressobj(9)
def __call__(self, data):
return self.z.compress(data) + self.z.flush(zlib.Z_FULL_FLUSH)
class ZlibDecompressor (object):
def __init__(se | lf):
self.z = zlib.decompressobj()
| def __call__(self, data):
return self.z.decompress(data)
|
romeokpolo/SVMPython | src/01-Week2/find_largest.py | Python | mit | 456 | 0.010965 |
num_list1 = [1, 3, 21, 9, 88, 77, 0, 20]
num_lis | t2 = [10, 3, 21, 9, 88, 77, 0, 1000]
# Defining/creating the function:
def romeos_version_of_max( nlst ):
print nlst
largest = nlst[0]
# Find the largest, and update the variable
for number in nlst:
if number >= largest:
largest = number
| return largest
# Calling the function
print romeos_version_of_max(num_list1)
print romeos_version_of_max(num_list2)
|
fdvarela/odoo8 | addons/crm/base_partner_merge.py | Python | agpl-3.0 | 30,328 | 0.002506 | #!/usr/bin/env python
from __future__ import absolute_import
from email.utils import parseaddr
import functools
import htmlentitydefs
import itertools
import logging
import operator
import re
from ast import literal_eval
from openerp.tools import mute_logger
# Validation Library https://pypi.python.org/pypi/validate_email/1.1
from .validate_email import validate_email
import openerp
from openerp.osv import osv, orm
from openerp.osv import fields
from openerp.osv.orm import browse_record
from openerp.tools.translate import _
pattern = re.compile("&(\w+?);")
_logger = logging.getLogger('base.partner.merge')
# http://www.php2python.com/wiki/function.html-entity-decode/
def html_entity_decode_char(m, defs=htmlentitydefs.entitydefs):
try:
return defs[m.group(1)]
except KeyError:
return m.group(0)
def html_entity_decode(string):
return pattern.sub(html_entity_decode_char, string)
def sanitize_email(email):
assert isinstance(email, basestring) and email
result = re.subn(r';|/|:', ',',
html_entity_decode(email or ''))[0].split(',')
emails = [parseaddr(email)[1]
for item in result
for email in item.split()]
return [email.lower()
for email in emails
if validate_email(email)]
def is_integer_list(ids):
return all(isinstance(i, (int, long)) for i in ids)
class ResPartner(osv.Model):
_inherit = 'res.partner'
_columns = {
'id': fields.integer('Id', readonly=True),
'create_date': fields.datetime('Create Date', readonly=True),
}
class MergePartnerLine(osv.TransientModel):
_name = 'base.partner.merge.line'
_columns = {
'wizard_id': fields.many2one('base.partner.merge.automatic.wizard',
'Wizard'),
'min_id': fields.integer('MinID'),
'aggr_ids': fields.char('Ids', required=True),
}
_order = 'min_id asc'
class MergePartnerAutomatic(osv.TransientModel):
"""
The idea behind this wizard is to create a list of potential partners to
merge. We use two objects, the first one is the wizard for the end-user.
And the second will contain the partner list to merge.
"""
_name = 'base.partner.merge.automatic.wizard'
_columns = {
# Group by
'group_by_email': fields.boolean('Email'),
'group_by_name': fields.boolean('Name'),
'group_by_is_company': fields.boolean('Is Company'),
'group_by_vat': fields.boolean('VAT'),
'group_by_parent_id': fields.boolean('Parent Company'),
'state': fields.selection([('option', 'Option'),
('selection', 'Selection'),
('finished', 'Finished')],
'State',
readonly=True,
required=True),
'number_group': fields.integer("Group of Contacts", readonly=True),
'current_line_id': fields.many2one('base.partner.merge.line', 'Current Line'),
'line_ids': fields.one2many('base.partner.merge.line', 'wizard_id', 'Lines'),
'partner_ids': fields.many2many('res.partner', string='Contacts'),
'dst_partner_id': fields.many2one('res.partner', string='Destination Contact'),
'exclude_contact': fields.boolean('A user associated to the contact'),
'exclude_journal_item': fields.boolean('Journal Items associated to the contact'),
'maximum_group': fields.integer("Maximum of Group of Contacts"),
}
def default_get(self, cr, uid, fields, context=None):
if context is None:
context = {}
res = super(MergePartnerAutomatic, self).default_get(cr, uid, fields, context)
if context.get('active_model') == 'res.partner' and context.get('active_ids'):
partner_ids = context['active_ids']
res['state'] = 'selection'
res['partner_ids'] = partner_ids
res['dst_partner_id'] = self._get_ordered_partner(cr, uid, partner_ids, context=context)[-1].id
return res
_defaults = {
'state': 'option'
}
def get_fk_on(self, cr, table):
q = """ SELECT cl1.relname as table,
att1.attname as column
FROM pg_constraint as con, pg_class as cl1, pg_class as cl2,
pg_attribute as att1, pg_attribute as att2
WHERE con.conrelid = cl1.oid
AND con.confrelid = cl2.oid
AND array_lower(con.conkey, 1) = 1
AND con.conkey[1] = att1.attnum
AND att1.attrelid = cl1.oid
| AND cl2.relname = %s
AND att2.attname = 'id'
| AND array_lower(con.confkey, 1) = 1
AND con.confkey[1] = att2.attnum
AND att2.attrelid = cl2.oid
AND con.contype = 'f'
"""
return cr.execute(q, (table,))
def _update_foreign_keys(self, cr, uid, src_partners, dst_partner, context=None):
_logger.debug('_update_foreign_keys for dst_partner: %s for src_partners: %r', dst_partner.id, list(map(operator.attrgetter('id'), src_partners)))
# find the many2one relation to a partner
proxy = self.pool.get('res.partner')
self.get_fk_on(cr, 'res_partner')
# ignore two tables
for table, column in cr.fetchall():
if 'base_partner_merge_' in table:
continue
partner_ids = tuple(map(int, src_partners))
query = "SELECT column_name FROM information_schema.columns WHERE table_name LIKE '%s'" % (table)
cr.execute(query, ())
columns = []
for data in cr.fetchall():
if data[0] != column:
columns.append(data[0])
query_dic = {
'table': table,
'column': column,
'value': columns[0],
}
if len(columns) <= 1:
# unique key treated
query = """
UPDATE "%(table)s" as ___tu
SET %(column)s = %%s
WHERE
%(column)s = %%s AND
NOT EXISTS (
SELECT 1
FROM "%(table)s" as ___tw
WHERE
%(column)s = %%s AND
___tu.%(value)s = ___tw.%(value)s
)""" % query_dic
for partner_id in partner_ids:
cr.execute(query, (dst_partner.id, partner_id, dst_partner.id))
else:
cr.execute("SAVEPOINT recursive_partner_savepoint")
try:
query = 'UPDATE "%(table)s" SET %(column)s = %%s WHERE %(column)s IN %%s' % query_dic
cr.execute(query, (dst_partner.id, partner_ids,))
if column == proxy._parent_name and table == 'res_partner':
query = """
WITH RECURSIVE cycle(id, parent_id) AS (
SELECT id, parent_id FROM res_partner
UNION
SELECT cycle.id, res_partner.parent_id
FROM res_partner, cycle
WHERE res_partner.id = cycle.parent_id AND
cycle.id != cycle.parent_id
)
SELECT id FROM cycle WHERE id = parent_id AND id = %s
"""
cr.execute(query, (dst_partner.id,))
if cr.fetchall():
cr.execute("ROLLBACK TO SAVEPOINT recursive_partner_savepoint")
finally:
cr.execute("RELEASE SAVEPOINT recursive_partner_savepoint")
def _update_reference_fields(self, cr, uid, src_partners, dst_partner, context |
apruden/genwiki | main.py | Python | lgpl-3.0 | 32 | 0 | from genwi | ki.ge | nwiki import app
|
biocommons/hgvs-eval | hgvseval/testservice/interface.py | Python | apache-2.0 | 1,686 | 0.003559 | import abc
class HGVSTestService(object):
@abc.abstractmethod
def info(self):
"""retu | rns dictionary of package info
"""
pass
@abc.abstractmethod
def project_t_to_g(self, hgvs_string, ac):
"""projects transcript (c. or n.) variant hgvs_string onto genom | ic
sequence specified by ac, returning g. hgvs string
Transcripts may be coding or non-coding.
"""
pass
@abc.abstractmethod
def project_g_to_t(self, hgvs_string, ac):
"""projects g. variant hgvs_string onto transcript sequence
specified by ac, returning a c. or n. hgvs string
Transcripts may be coding or non-coding.
"""
pass
@abc.abstractmethod
def project_c_to_p(self, hgvs_string):
"""projects c. hgvs_string onto corresponding
protein sequence, returning a p. hgvs string.
Transcripts may be coding or non-coding.
"""
pass
@abc.abstractmethod
def rewrite(self, hgvs_string):
"""normalize and rewrite hgvs_string in a canonical form"""
pass
@abc.abstractmethod
def parse(self, hgvs_string):
"""parse hgvs_string and return json representation"""
pass
@abc.abstractmethod
def validate(self, hgvs_string):
"""parse and validate variant returning status and optional messages
If the variant is valid, returns (True, []).
If the variant is
messages). Status is True or False, and messages is a list of
text messages explaining the failure. messages should be
empty if the status is True.
"""
pass
|
oozappa/maiden | tests/__init__.py | Python | apache-2.0 | 148 | 0 | import doctest |
from maiden import config
def load_tests(loader, tests, ignore):
tests.addTests(doctest.DocTestSuite(config))
return t | ests
|
Bodidze/21v-python | unit_05/alive/test.py | Python | mit | 5,183 | 0.000193 | import main
def test_get_board():
alive_cons = [(1, 1),
(2, 2),
(3, 1),
(3, 3),
(4, 0)]
board = [[0, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 1, 0, 1, 0],
[1, 0, 0, 0, 0]]
assert main.get_board(5, alive_cons) == board
class TestGetNeighbors(object):
def test_zero_positive(self):
con = (0, 2)
neighbors = [(-1, 1),
(-1, 2),
(-1, 3),
(0, 1),
(0, 3),
(1, 1),
(1, 2),
(1, 3)]
assert main.get_neighbors(con) == neighbors
def test_zero_zero(self):
con = (0, 0)
neighbors = [(-1, -1),
(-1, 0),
(-1, 1),
(0, -1),
(0, 1),
(1, -1),
(1, 0),
(1, 1)]
assert set(main.get_neighbors(con)) == set(neighbors)
def test_positive_result(self):
con = (5, 5)
neighbors = [(4, 4),
(4, 5),
(4, 6),
(5, 4),
(5, 6),
(6, 4),
(6, 5),
(6, 6)]
assert set(main.get_neighbors(con)) == set(neighbors)
def test_calculate_alive_neighbors():
con = (0, 2)
alive_cons = [(0, 0),
(1, 1),
(1, 2),
(2, 4),
(3, 5),
(0, 3)]
assert main.calculate_alive_neighbors(con, alive_cons) == 3
class TestIsAliveCon(object):
def test_new_con(self):
alive_cons = [(1, 1),
(2, 0),
(2, 2)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def test_alive_con_alive_3_neighbors(self):
alive_cons = [(1, 1),
(2, 0),
(2, 1),
(2, 2)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def test_alive_con_alive_2_neighbors(self):
alive_cons = [(1, 1),
(2, 0),
(2, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is True
def tests_dead_con_few_neighbor(self):
alive_cons = [(1, 1),
(2, 0)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
def test_alive_con_few_neighbors(self):
alive_cons = [(1, 1),
(2, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
def test_many_neighbors(self):
alive_cons = [(1, 0),
(1, 1),
(2, 0),
(3, 1)]
con = (2, 1)
assert main.is_alive_con(con, alive_cons) is False
class TestIsCorrectCon(object):
def test_zero_zero(self):
assert main.is_correct_con(5, (0, 0)) is True
def test_zero_positive(self):
assert main.is_correct_con(5, (0, 4)) is True
assert main.is_correct_con(5, (4, 0)) is True
def test_zero_negative(self):
assert main.is_correct_con(5, (0, -1)) is False
assert main.is_correct_con(5, (-1, 0)) is False
def test_negative(self):
assert main.is_correct_con(5, (-1, -6)) is False
assert main.is_correct_con(5, (-6, -1)) is False
assert main.is_correct_con(5, (-6, -6)) is False
assert main.is_correct_con(5, (-1, -1)) is False
def test_posirive(self):
assert main.is_correct_con(5, (1, 1)) is True
def test_over_size(self):
assert main.is_correct_con(5, (1, 5)) is False
assert main.is_correct_con(5, (5, 1)) is False
assert main.is_correct_con(5, (1, 6)) is False
assert main.is_correct_con(5, (6, 6)) is False
def test_negative_over_size(self):
assert main.is_correct_con(5, (-6, 6)) is False
assert main.is_correct_con(5, (6, -6)) is False
assert main.is_correct_con(5, (-1, 6)) is False
assert main.is_correct_con(5, (-1, 6)) is False
def test_correct_cons():
cons = [(1, 1),
(-1, 2),
(1, -1),
(1, 10)]
assert main.correct_cons(5, cons) == [(1, 1 | )]
|
class TestNewStep(object):
def test_glader(self):
alive_cons = [(1, 2),
(2, 3),
(3, 1),
(3, 2),
(3, 3)]
new_alive_cons = [(2, 1),
(2, 3),
(3, 2),
(3, 3),
(4, 2)]
assert set(main.new_step(alive_cons)) == set(new_alive_cons)
def test_flasher(self):
alive_cons = [(0, 1),
(1, 1),
(2, 1)]
new_alive_cons = [(1, 0),
(1, 1),
(1, 2)]
assert set(main.new_step(alive_cons)) == set(new_alive_cons) |
iambibhas/django | django/views/csrf.py | Python | bsd-3-clause | 5,059 | 0.001186 | from django.conf import settings
from django.http import HttpResponseForbidden
from django.template import Context, Template
from django.utils.translation import ugettext as _
from django.utils.version import get_docs_version
# We include the template inline since we need to be able to reliably display
# this error message, especially for the sake of developers, and there isn't any
# other way of making it available independent of what is in the settings file.
# Only the text appearing with DEBUG=False is translated. Normal translation
# tags cannot be used with this inline templates as makemessages would not be
# able to discover the strings.
CSRF_FAILURE_TEMPLATE = """
<!DOCTYPE html>
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=utf-8">
<meta name="robots" content="NONE,NOARCHIVE">
<title>403 Forbidden</title>
<style type="text/css">
html * { padding:0; margin:0; }
body * { padding:10px 20px; }
body * * { padding:0; }
body { font:small sans-serif; background:#eee; }
body>div { border-bottom:1px solid #ddd; }
h1 { font-weight:normal; margin-bottom:.4em; }
h1 span { font-size:60%; color:#666; font-weight:normal; }
#info { background:#f6f6f6; }
#info ul { margin: 0.5em 4em; }
#info p, #summary p { padding-top:10px; }
#summary { background: #ffc; }
#explanation { background:#eee; border-bottom: 0px none; }
</style>
</head>
<body>
<div id="summary">
<h1>{{ title }} <span>(403)</span></h1>
<p>{{ main }}</p>
{% if no_referer %}
<p>{{ no_referer1 }}</p>
<p>{{ no_referer2 }}</p>
{% endif %}
{% if no_cookie %}
<p>{{ no_cookie1 }}</p>
<p>{{ no_cookie2 }}</p>
{% endif %}
</div>
{% if DEBUG %}
<div id="info">
<h2>Help</h2>
{% if reason %}
<p>Reason given for failure:</p>
<pre>
{{ reason }}
</pre>
{% endif %}
<p>In general, this can occur when there is a genuine Cross Site Request Forgery, or when
<a
href="https://docs.djangoproject.com/en/{{ docs_version }}/ref/csrf/">Django's
CSRF mechanism</a> has not been used correctly. For POST forms, you need to
ensure:</p>
<ul>
<li>Your browser is accepting cookies.</li>
<li>The view function uses <a
h | ref="https://docs.djangoproject.com/en/{{ docs_version }}/ref/templates/api/#subclassing-context-requestcontext"><code>RequestContext</code></a>
for the template, instead of <code>Context</code>. | </li>
<li>In the template, there is a <code>{% templatetag openblock %} csrf_token
{% templatetag closeblock %}</code> template tag inside each POST form that
targets an internal URL.</li>
<li>If you are not using <code>CsrfViewMiddleware</code>, then you must use
<code>csrf_protect</code> on any views that use the <code>csrf_token</code>
template tag, as well as those that accept the POST data.</li>
</ul>
<p>You're seeing the help section of this page because you have <code>DEBUG =
True</code> in your Django settings file. Change that to <code>False</code>,
and only the initial error message will be displayed. </p>
<p>You can customize this page using the CSRF_FAILURE_VIEW setting.</p>
</div>
{% else %}
<div id="explanation">
<p><small>{{ more }}</small></p>
</div>
{% endif %}
</body>
</html>
"""
def csrf_failure(request, reason=""):
"""
Default view used when request fails CSRF protection
"""
from django.middleware.csrf import REASON_NO_REFERER, REASON_NO_CSRF_COOKIE
t = Template(CSRF_FAILURE_TEMPLATE)
c = Context({
'title': _("Forbidden"),
'main': _("CSRF verification failed. Request aborted."),
'reason': reason,
'no_referer': reason == REASON_NO_REFERER,
'no_referer1': _(
"You are seeing this message because this HTTPS site requires a "
"'Referer header' to be sent by your Web browser, but none was "
"sent. This header is required for security reasons, to ensure "
"that your browser is not being hijacked by third parties."),
'no_referer2': _(
"If you have configured your browser to disable 'Referer' headers, "
"please re-enable them, at least for this site, or for HTTPS "
"connections, or for 'same-origin' requests."),
'no_cookie': reason == REASON_NO_CSRF_COOKIE,
'no_cookie1': _(
"You are seeing this message because this site requires a CSRF "
"cookie when submitting forms. This cookie is required for "
"security reasons, to ensure that your browser is not being "
"hijacked by third parties."),
'no_cookie2': _(
"If you have configured your browser to disable cookies, please "
"re-enable them, at least for this site, or for 'same-origin' "
"requests."),
'DEBUG': settings.DEBUG,
'docs_version': get_docs_version(),
'more': _("More information is available with DEBUG=True."),
})
return HttpResponseForbidden(t.render(c), content_type='text/html')
|
nareshppts/account_chart_update | account_renumber/test/create_moves.py | Python | agpl-3.0 | 5,324 | 0.000752 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP - Account renumber wizard
# Copyright (C) 2009 Pexego Sistemas Informáticos. All Rights Reserved
# $Id$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
Script that creates large amounts of account moves on different days,
that can be used later for testing the renumber wizard.
"""
__author__ = "Borja López Soilán (Pexego)"
import sys
import re
import xmlrpclib
import socket
import logging
logger = logging.getLogger("create_lots_of_account_moves")
def create_lots_of_account_moves(d | bname, user, passwd, howmany):
"""
Small OpenERP function that will create lots of account moves
on the selected database, that can later be used for
testing the renumber wizard.
Note: The database must have demo data, and a fiscal year 2009 created.
"""
url_template = "http://%s:%s/xmlrpc/%s"
server = "loca | lhost"
port = 8069
user_id = 0
login_facade = xmlrpclib.ServerProxy(
url_template % (server, port, 'common'))
user_id = login_facade.login(dbname, user, passwd)
object_facade = xmlrpclib.ServerProxy(
url_template % (server, port, 'object'))
for i in range(1, howmany):
#
# Create one account move
#
move_id = object_facade.execute(dbname, user_id, passwd,
'account.move', 'create', {
'ref': 'Test%s' % i,
'type': 'journal_voucher',
'journal_id': 5,
'line_id': [
(0, 0, {
'analytic_account_id': False,
'currency_id': False,
'tax_amount': False,
'account_id': 2,
'partner_id': False,
'tax_code_id': False,
'credit': 1000.0,
'date_maturity': False,
'debit': False,
'amount_currency': False,
'ref': False,
'name': 'Test_l1'
}),
(0, 0, {
'analytic_account_id': False,
'currency_id': False,
'tax_amount': False,
'account_id': 4,
'partner_id': False,
'tax_code_id': False,
'credit': False,
'date_maturity': False,
'debit': 1000.0,
'amount_currency': False,
'ref': False,
'name': 'Test_l2'})
],
'period_id': 1,
'date': '2009-01-%s' % ((i % 31) or 1),
'partner_id': False,
'to_check': 0
},
{})
# Validate the move
object_facade.execute(dbname, user_id, passwd,
u'account.move', 'button_validate', [move_id], {})
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
# ------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) < 5:
logger.info(u"Usage: %s <dbname> <user> <password> <howmany>" % sys.argv[0])
else:
create_lots_of_account_moves(
sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]))
|
JakeCowton/invenio-data | invenio_data/modules/deposit/workflows/alice.py | Python | gpl-2.0 | 3,371 | 0.003856 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2012, 2013, 2014 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Publi | c License as
## published by the Fre | e Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
from flask.ext.login import current_user
from flask import render_template
from invenio.modules.deposit.models import DepositionType, Deposition
from invenio.modules.formatter import format_record
from invenio.modules.deposit.tasks import render_form, \
create_recid, \
prepare_sip, \
finalize_record_sip, \
upload_record_sip, \
prefill_draft,\
process_sip_metadata
from ..forms.alice import AliceDataAnalysisForm
__all__ = ['alice']
class alice(DepositionType):
workflow = [
# Pre-fill draft with values passed in from request
prefill_draft(draft_id='default'),
# Render form and wait for user to submit
render_form(draft_id='default'),
# Create the submission information package by merging form data
# from all drafts (in this case only one draft exists).
prepare_sip(),
# Process metadata to match your JSONAlchemy record model. This will
# call process_sip_metadata() on your subclass.
process_sip_metadata(),
# Reserve a new record id, so that we can provide proper feedback to
# user before the record has been uploaded.
create_recid(),
# Generate MARC based on metadata dictionary.
finalize_record_sip(is_dump=False),
# Seal the SIP and write MARCXML file and call bibupload on it
upload_record_sip(),
]
name = "ALICE Data Analysis"
name_plural = "ALICE Data Analysis"
group = "ALICE Data Analysis"
enabled = True
draft_definitions = {
'default': AliceDataAnalysisForm,
}
@classmethod
def render_completed(cls, d):
"""
Page to render when deposition was successfully completed.
"""
ctx = dict(
deposition=d,
deposition_type=(
None if d.type.is_default() else d.type.get_identifier()
),
uuid=d.id,
my_depositions=Deposition.get_depositions(
current_user, type=d.type
),
sip=d.get_latest_sip(),
format_record=format_record,
)
return render_template('deposit/completed.html', **ctx)
@classmethod
def process_sip_metadata(cls, deposition, metadata):
"""
Implement this method in your subclass to process metadata prior to
MARC generation.
"""
if 'authors' in metadata and metadata['authors']:
metadata['_first_author'] = metadata['authors'][0]
metadata['_additional_authors'] = metadata['authors'][1:]
|
TheTimmy/spack | var/spack/repos/builtin/packages/pathfinder/package.py | Python | lgpl-2.1 | 2,190 | 0.000913 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the LICENSE file for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without | even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
############################################ | ##################################
from spack import *
class Pathfinder(MakefilePackage):
"""Proxy Application. Signature search."""
homepage = "https://mantevo.org/packages/"
url = "http://mantevo.org/downloads/releaseTarballs/miniapps/PathFinder/PathFinder_1.0.0.tgz"
tags = ['proxy-app']
version('1.0.0', '374269e8d42c305eda3e392444e22dde')
build_targets = ['--directory=PathFinder_ref', 'CC=cc']
def edit(self, spec, prefix):
makefile = FileFilter('PathFinder_ref/Makefile')
makefile.filter('-fopenmp', self.compiler.openmp_flag)
def install(self, spec, prefix):
# Manual installation
mkdirp(prefix.bin)
mkdirp(prefix.doc)
install('PathFinder_ref/PathFinder.x', prefix.bin)
install('PathFinder_ref/MicroTestData.adj_list', prefix.bin)
install('README', prefix.doc)
install_tree('generatedData/', prefix.doc.generatedData)
install_tree('scaleData/', prefix.doc.scaleData)
|
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sympy/concrete/tests/test_products.py | Python | agpl-3.0 | 1,362 | 0.002937 | from sympy import (symbols, product, factorial, rf, sqrt, cos,
Function, Product, Rational)
a, k, n = symbols('a,k,n', integer=True)
def test_simple_products():
assert product(2, (k, a, n)) == 2**(n-a+1)
assert product(k, (k, 1, n)) == factorial(n)
assert product(k**3, (k, 1, n)) == factorial(n)**3
assert product(k+1, (k, 0, n-1)) == factorial(n)
assert product(k+1, (k, a, n-1)) == rf(1+a, n-a)
assert product(cos(k), (k, 0, 5)) == cos(1)*cos(2)*cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 3, 5)) == cos(3)*cos(4)*cos(5)
assert product(cos(k), (k, 1, Rational(5, 2))) == cos(1)*cos(2)
assert isinstance(product(k**k, (k, 1, n)), Product)
def test_rational_products():
assert product(1+1/k, (k, 1, n)) == rf(2, n)/factorial(n)
def test_special_products():
# Wallis product
assert product((4*k)**2 / (4*k**2-1), (k, 1, n)) == \
4**n*factorial(n)**2/rf(Rational(1, 2), n)/rf(Rational(3, 2), n)
# Euler's product formula for sin
assert product(1 + a/k**2, (k, 1, n)) == \
rf(1 - sqrt(-a), n)*rf(1 + sqrt(-a), n)/factorial(n)** | 2
def test__eval_product():
from sympy.abc import i, n
# 17 | 10
a = Function('a')
assert product(2*a(i), (i, 1, n)) == 2**n * Product(a(i), (i, 1, n))
# 1711
assert product(2**i, (i, 1, n)) == 2**(n/2 + n**2/2)
|
FrodeSolheim/fs-uae-launcher | launcher/controllertest/ControllerTestWindow.py | Python | gpl-2.0 | 2,019 | 0 | import logging
from fsgamesys.input.inputservice import (
GamepadConnectedEvent,
GamepadDisconnectedEvent,
JoystickConnectedEvent,
JoystickDisconnectedEvent,
useInputService,
)
from launcher.fswidgets2.flexcontainer import FlexContainer
from launcher.fswidgets2.panel import Panel
from launcher.fswidgets2.textarea import TextArea
from launcher.i18n import gettext
from system.classes.window import Window
log = logging.getLogger(__name__)
class ControllerTestWindow(Window):
def __init__(self) -> None:
log.info(
"Creating controller test window",
)
super().__init__(parent=None, title=gettext("Controller Test"))
# ControllerConfigPanel(deviceGuid, parent=self)
self.setSize((800, 500))
with FlexContainer(parent=self):
self.controllersPanel = Panel()
| self.textArea = TextArea(style={"flexGrow": 1, "margin": 10})
self.inputService = useInputService()
self.listen(
self.inputService.gamepadConnectedEvent, self.onGamepadConnected
)
self.lis | ten(
self.inputService.gamepadDisconnectedEvent,
self.onGamepadDisconnected,
)
self.listen(
self.inputService.joystickConnectedEvent, self.onJoystickConnected
)
self.listen(
self.inputService.joystickDisconnectedEvent,
self.onJoystickDisconnected,
)
def append(self, message: str) -> None:
self.textArea.appendLine(message)
def onGamepadConnected(self, event: GamepadConnectedEvent) -> None:
self.append("Gamepad connected")
def onGamepadDisconnected(self, event: GamepadDisconnectedEvent) -> None:
self.append("Gamepad disconnected")
def onJoystickConnected(self, event: JoystickConnectedEvent) -> None:
self.append("Joystick connected")
def onJoystickDisconnected(self, event: JoystickDisconnectedEvent) -> None:
self.append("Joystick disconnected")
|
OpenMined/PySyft | tests/integration/smpc/store/crypto_primitive_provider_test.py | Python | apache-2.0 | 5,243 | 0.000954 | # # stdlib
# from typing import Any
# from typing import Dict
# from typing import Iterable
# from typing import List
# from typing import Tuple
# # third party
# import numpy as np
# import pytest
# # syft absolute
# from syft import Tensor
# # absolute
# from syft.core.smpc.store import CryptoPrimitiveProvider
# from syft.core.smpc.store import register_primitive_generator
# from syft.core.smpc.store import register_primitive_store_add
# from syft.core.smpc.store import register_primitive_store_get
# from syft.core.tensor import ShareTensor
# PRIMITIVE_NR_ELEMS = 4
# # Rasswanth : Fix tests after solving .get() issues
# @pytest.mark.skip
# @pytest.mark.smpc
# @register_primitive_generator("test")
# def provider_test(nr_parties: int, nr_instances: int) -> List[Tuple[int]]:
# """This function will generate the values:
# [((0, 0, 0, 0), (0, 0, 0, 0), ...), ((1, 1, 1, 1), (1, 1, 1, 1)),
# ...]
# """
# primitives = [
# tuple(
# tuple(
# ShareTensor(
# rank=i,
# nr_parties=nr_parties,
# value=Tensor(np.array([[4, 5], [7, 27]], dtype=np.int32)),
# )
# for _ in range(PRIMITIVE_NR_ELEMS)
# )
# for _ in range(nr_instances)
# )
# for i in range(nr_parties)
# ]
# return primitives
# @pytest.mark.skip
# @register_primitive_store_get("test")
# def provider_test_get(
# store: Dict[str, List[Any]], nr_instances: int
# ) -> List[Tuple[int]]:
# return [store["test_key"][i] for i in range(nr_instances)]
# @pytest.mark.skip
# @register_primitive_store_add("test")
# def provider_test_add(
# store: Dict[str, List[Any]], primitives: Iterable[Any]
# ) -> List[Tuple[int]]:
# store["test_key"] = primitives
# @pytest.mark.skip
# def test_exception_init() -> None:
# with pytest.raises(ValueError):
# CryptoPrimitiveProvider()
# @pytest.mark.skip
# def test_generate_primitive_exception() -> None:
# with pytest.raises(ValueError):
# CryptoPrimitiveProvider.generate_primitives(op_str="SMPC", parties=[])
# @pytest.mark.skip
# def test_transfer_primitives_type_exception() -> None:
# with pytest.raises(ValueError):
# """Primitives should be a list."""
# CryptoPrimitiveProvider._transfer_primitives_to_parties(
# op_str="test", primitives=50, parties=[], p_kwargs={}
# )
# @pytest.mark.skip
# def test_transfer_primitives_mismatch_len_exception() -> None:
# with pytest.raises(ValueError):
# """Primitives and Parties should have the sam | e len."""
# Crypto | PrimitiveProvider._transfer_primitives_to_parties(
# op_str="test", primitives=[1], parties=[], p_kwargs={}
# )
# @pytest.mark.skip
# def test_register_primitive() -> None:
# val = CryptoPrimitiveProvider.get_state()
# expected_providers = "test"
# assert expected_providers in val, "Test Provider not registered"
# @pytest.mark.skip
# @pytest.mark.parametrize("nr_instances", [1, 5, 100])
# @pytest.mark.parametrize("nr_parties", [2, 3, 4])
# def test_generate_primitive(get_clients, nr_parties: int, nr_instances: int) -> None:
# parties = get_clients(nr_parties)
# g_kwargs = {"nr_instances": nr_instances}
# res = CryptoPrimitiveProvider.generate_primitives(
# "test",
# parties=parties,
# g_kwargs=g_kwargs,
# p_kwargs=None,
# )
# assert isinstance(res, list)
# assert len(res) == nr_parties
# for i, primitives in enumerate(res):
# for primitive in primitives:
# assert primitive == tuple(
# ShareTensor(
# nr_parties=nr_parties,
# value=Tensor(np.array([[4, 5], [7, 27]], dtype=np.int32)),
# rank=i,
# )
# for _ in range(PRIMITIVE_NR_ELEMS)
# )
# @pytest.mark.skip
# @pytest.mark.parametrize(
# ("nr_instances", "nr_instances_retrieve"),
# [(1, 1), (5, 4), (5, 5), (100, 25), (100, 100)],
# )
# @pytest.mark.parametrize("nr_parties", [2, 3, 4])
# def test_generate_and_transfer_primitive(
# get_clients,
# nr_parties: int,
# nr_instances: int,
# nr_instances_retrieve: int,
# ) -> None:
# parties = get_clients(nr_parties)
# g_kwargs = {"nr_instances": nr_instances}
# CryptoPrimitiveProvider.generate_primitives(
# "test",
# parties=parties,
# g_kwargs=g_kwargs,
# p_kwargs={},
# )
# for i, party in enumerate(parties):
# remote_crypto_store = CryptoPrimitiveProvider.cache_store[party]
# primitives = remote_crypto_store.get_primitives_from_store(
# op_str="test", nr_instances=nr_instances_retrieve
# ).get()
# assert primitives == [
# tuple(
# ShareTensor(
# nr_parties=nr_parties,
# value=Tensor(np.array([[4, 5], [7, 27]], dtype=np.int32)),
# rank=i,
# )
# for _ in range(PRIMITIVE_NR_ELEMS)
# )
# for _ in range(nr_instances_retrieve)
# ]
|
robmcmullen/peppy | peppy/plugins/macro.py | Python | gpl-2.0 | 47,677 | 0.005013 | # peppy Copyright (c) 2006-2010 Rob McMullen
# Licenced under the GPLv2; see http://peppy.flipturn.org for more info
"""Simple macros created by recording actions
This plugin provides macro recording
"""
import os
import wx
from wx.lib.pubsub import Publisher
from peppy.yapsy.plugins import *
from peppy.actions import *
from peppy.actions.minibuffer import *
from peppy.major import MajorMode
from peppy.majormodematcher import MajorModeMatcherDriver
from peppy.minor import *
from peppy.sidebar import *
from peppy.lib.multikey import *
from peppy.debug import *
import peppy.vfs as vfs
from peppy.vfs.itools.vfs.memfs import MemFS, MemFile, MemDir, TempFile
class CharEvent(FakeCharEvent):
"""Fake character event used by L{RecordKeyboardAction} when generating
scripted copies of an action list.
"""
def __init__(self, key, unicode, modifiers):
self.id = -1
self.event_object = None
self.keycode = key
self.unicode = unicode
self.modifiers = modifiers
self.is_quoted = True
@classmethod
def getScripted(cls, evt):
"""Returns a string that represents the python code to instantiate
the object.
Used when serializing a L{RecordedKeyboardAction} to a python string
"""
return "%s(%d, %d, %d)" % (cls.__name__, evt.GetKeyCode(), evt.GetUnicodeKey(), evt.GetModifiers())
class RecordedKeyboardAction(RecordedAction):
"""Subclass of L{RecordedAction} for keyboard events.
"""
def __init__(self, action, evt, multiplier):
RecordedAction.__init__(self, action, multiplier)
self.evt = FakeCharEvent(evt)
# Hack to force SelfInsertCommand to process the character, because
# normally it uses the evt.Skip() to force the EVT_CHAR handler to
# insert the character.
self.evt.is_quoted = True
def __str__(self):
return "%s: %dx%s" % (self.actioncls.__name__, self.multiplier, self.evt.GetKeyCode())
def performAction(self, system_state):
action = self.actioncls(system_state.frame, mode=system_state.mode)
action.actionKeystroke(self.evt, self.multiplier)
def getScripted(self):
return "%s(fra | me, mode).actionKeystroke(%s, %d)" % (self.actioncls.__name__, CharEvent.getScripted(self.evt), self.multiplier)
class RecordedMenuAction(RecordedAction) | :
"""Subclass of L{RecordedAction} for menu events.
"""
def __init__(self, action, index, multiplier):
RecordedAction.__init__(self, action, multiplier)
self.index = index
def __str__(self):
return "%s x%d, index=%s" % (self.actioncls.__name__, self.multiplier, self.index)
def performAction(self, system_state):
action = self.actioncls(system_state.frame, mode=system_state.mode)
action.action(self.index, self.multiplier)
def getScripted(self):
return "%s(frame, mode).action(%d, %d)" % (self.actioncls.__name__, self.index, self.multiplier)
class ActionRecorder(AbstractActionRecorder, debugmixin):
"""Creates, maintains and plays back a list of actions recorded from the
user's interaction with a major mode.
"""
def __init__(self):
self.recording = []
def __str__(self):
summary = ''
count = 0
for recorded_item in self.recording:
if hasattr(recorded_item, 'text'):
summary += recorded_item.text + " "
if len(summary) > 50:
summary = summary[0:50] + "..."
count += 1
if len(summary) == 0:
summary = "untitled"
return MacroFS.escapeFileName(summary)
def details(self):
"""Get a list of actions that have been recorded.
Primarily used for debugging, there is no way to use this list to
play back the list of actions.
"""
lines = []
for recorded_item in self.recording:
lines.append(str(recorded_item))
return "\n".join(lines)
def recordKeystroke(self, action, evt, multiplier):
if action.isRecordable():
record = RecordedKeyboardAction(action, evt, multiplier)
self.appendRecord(record)
def recordMenu(self, action, index, multiplier):
if action.isRecordable():
record = RecordedMenuAction(action, index, multiplier)
self.appendRecord(record)
def appendRecord(self, record):
"""Utility method to add a recordable action to the current list
This method checks for the coalescability of the record with the
previous record, and it is merged if possible.
@param record: L{RecordedAction} instance
"""
self.dprint("adding %s" % record)
if self.recording:
last = self.recording[-1]
if last.canCoalesceActions(record):
self.recording.pop()
record = last.coalesceActions(record)
self.dprint("coalesced into %s" % record)
self.recording.append(record)
def getRecordedActions(self):
return self.recording
def playback(self, frame, mode, multiplier=1):
mode.BeginUndoAction()
state = MacroPlaybackState(frame, mode)
self.dprint(state)
SelectAction.debuglevel = 1
while multiplier > 0:
for recorded_action in self.getRecordedActions():
recorded_action.performAction(state)
multiplier -= 1
SelectAction.debuglevel = 0
mode.EndUndoAction()
class PythonScriptableMacro(MemFile):
"""A list of serialized SelectAction commands used in playing back macros.
This object contains python code in the form of text strings that
provide a way to reproduce the effects of a previously recorded macro.
Additionally, since they are in plain text, they may be carefully edited
by the user to provide additional functionality that is not possible only
using the record capability.
The generated python script looks like the following:
SelfInsertCommand(frame, mode).actionKeystroke(CharEvent(97, 97, 0), 1)
BeginningTextOfLine(frame, mode).actionKeystroke(CharEvent(65, 65, 2), 1)
SelfInsertCommand(frame, mode).actionKeystroke(CharEvent(98, 98, 0), 1)
ElectricReturn(frame, mode).actionKeystroke(CharEvent(13, 13, 0), 1)
where the actions are listed, one per line, by their python class name.
The statements are C{exec}'d in in the global namespace, but have a
constructed local namespace that includes C{frame} and C{mode} representing
the current L{BufferFrame} and L{MajorMode} instance, respectively.
"""
keyword_mapping = {
'key': 'key_binding',
}
def __init__(self, recorder=None, name=None):
"""Converts the list of recorded actions into python string form.
"""
if isinstance(recorder, str):
data = recorder
elif recorder:
name = str(recorder)
data = self.getScriptFromRecorder(recorder)
else:
data = ""
if name is None:
name = "untitled"
MemFile.__init__(self, data, name)
self.parseMacroForMetadata()
def __str__(self):
return self.name
def get_key_binding(self):
try:
return self._key_binding
except AttributeError:
self._key_binding = None
return None
def set_key_binding(self, binding):
self._key_binding = binding
key_binding = property(get_key_binding, set_key_binding)
def save(self, url):
"""Save this macro to the specified macro: url
"""
dprint("Saving to %s" % url)
self.rebuildMacroAndMetadata()
fh = vfs.open_write(url)
fh.write(self.data)
fh.close()
def rebuildMacroAndMetadata(self):
"""Reconstructs text of macro taking into account any changes in
the keybindings or other metadata
|
omargammoh/rpislave | website/proc/rebooter.py | Python | gpl-2.0 | 1,426 | 0.007714 | import datetime
import time
import website.processing
def get_laststatusloop():
try:
f = file('/home/pi/data/laststatusloop','r')
s = f.read()
f.close()
dt = datetime.datetime.strptime(s, '%Y%m%d%H%M%S')
return dt
except:
print ">> rebooter: !!! failed to read last status loop"
return None
def main():
deviation_counter = 0
while True:
try:
dt = get_laststatusloop()
if dt is not None:
now = datetime.datetime.utcnow()
sec = (now - dt).total_seconds()
if sec > 180:
print ">> rebooter: deviation counter +1"
deviation_counter += 1
if deviation_counter >= 6:
print ">> rebooter: will reboot the device because the status process doesnt seem to have run in a while"
f = file('/home/pi/data/reboooter-' + now.strftime('%Y%m%d%H%M%S'),'w')
f.write('we have rebooted on %s because the status loop does not seem to have run for a while, %s x %s se | c' %(str(now), sec, deviation_cou | nter))
f.close()
website.processing.execute(cmd="sudo reboot")
else:
print ">> rebooter: is fine"
except:
print ">> rebooter: !!! error"
pass
time.sleep(300)#500
|
msegado/edx-platform | common/test/acceptance/tests/lms/test_lms_user_preview.py | Python | agpl-3.0 | 5,975 | 0.001841 | # -*- coding: utf-8 -*-
"""
Tests the "preview" selector in the LMS that allows changing between Staff, Learner, and Content Groups.
"""
from textwrap import dedent
from common.test.acceptance.fixtures.course imp | ort CourseFixture, XBlockFixtureDesc
from common. | test.acceptance.pages.common.auto_auth import AutoAuthPage
from common.test.acceptance.pages.lms.courseware import CoursewarePage
from common.test.acceptance.pages.lms.staff_view import StaffCoursewarePage
from common.test.acceptance.tests.helpers import UniqueCourseTest, create_user_partition_json
from openedx.core.lib.tests import attr
from xmodule.partitions.partitions import ENROLLMENT_TRACK_PARTITION_ID, MINIMUM_STATIC_PARTITION_ID, Group
@attr(shard=20)
class StaffViewTest(UniqueCourseTest):
"""
Tests that verify the staff view.
"""
USERNAME = "STAFF_TESTER"
EMAIL = "johndoe@example.com"
def setUp(self):
super(StaffViewTest, self).setUp()
self.courseware_page = CoursewarePage(self.browser, self.course_id)
# Install a course with sections/problems, tabs, updates, and handouts
self.course_fixture = CourseFixture(
self.course_info['org'], self.course_info['number'],
self.course_info['run'], self.course_info['display_name']
)
self.populate_course_fixture(self.course_fixture)
self.course_fixture.install()
# Auto-auth register for the course.
# Do this as global staff so that you will see the Staff View
AutoAuthPage(self.browser, username=self.USERNAME, email=self.EMAIL,
course_id=self.course_id, staff=True).visit()
def _goto_staff_page(self):
"""
Open staff page with assertion
"""
self.courseware_page.visit()
staff_page = StaffCoursewarePage(self.browser, self.course_id)
self.assertEqual(staff_page.staff_view_mode, 'Staff')
return staff_page
@attr(shard=20)
class CourseWithContentGroupsTest(StaffViewTest):
"""
Verifies that changing the "View this course as" selector works properly for content groups.
"""
def setUp(self):
super(CourseWithContentGroupsTest, self).setUp()
# pylint: disable=protected-access
self.course_fixture._update_xblock(self.course_fixture._course_location, {
"metadata": {
u"user_partitions": [
create_user_partition_json(
MINIMUM_STATIC_PARTITION_ID,
'Configuration alpha,beta',
'Content Group Partition',
[
Group(MINIMUM_STATIC_PARTITION_ID + 1, 'alpha'),
Group(MINIMUM_STATIC_PARTITION_ID + 2, 'beta')
],
scheme="cohort"
)
],
},
})
def populate_course_fixture(self, course_fixture):
"""
Populates test course with chapter, sequential, and 3 problems.
One problem is visible to all, one problem is visible only to Group "alpha", and
one problem is visible only to Group "beta".
"""
problem_data = dedent("""
<problem markdown="Simple Problem" max_attempts="" weight="">
<choiceresponse>
<label>Choose Yes.</label>
<checkboxgroup>
<choice correct="true">Yes</choice>
</checkboxgroup>
</choiceresponse>
</problem>
""")
self.alpha_text = "VISIBLE TO ALPHA"
self.beta_text = "VISIBLE TO BETA"
self.audit_text = "VISIBLE TO AUDIT"
self.everyone_text = "VISIBLE TO EVERYONE"
course_fixture.add_children(
XBlockFixtureDesc('chapter', 'Test Section').add_children(
XBlockFixtureDesc('sequential', 'Test Subsection').add_children(
XBlockFixtureDesc('vertical', 'Test Unit').add_children(
XBlockFixtureDesc(
'problem',
self.alpha_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 1]}}
),
XBlockFixtureDesc(
'problem',
self.beta_text,
data=problem_data,
metadata={"group_access": {MINIMUM_STATIC_PARTITION_ID: [MINIMUM_STATIC_PARTITION_ID + 2]}}
),
XBlockFixtureDesc(
'problem',
self.audit_text,
data=problem_data,
# Below 1 is the hardcoded group ID for "Audit"
metadata={"group_access": {ENROLLMENT_TRACK_PARTITION_ID: [1]}}
),
XBlockFixtureDesc(
'problem',
self.everyone_text,
data=problem_data
)
)
)
)
)
@attr('a11y')
def test_course_page(self):
"""
Run accessibility audit for course staff pages.
"""
course_page = self._goto_staff_page()
course_page.a11y_audit.config.set_rules({
'ignore': [
'aria-allowed-attr', # TODO: AC-559
'aria-roles', # TODO: AC-559,
'aria-valid-attr', # TODO: AC-559
'color-contrast', # TODO: AC-559
'link-href', # TODO: AC-559
'section', # TODO: AC-559
'region', # TODO: AC-932
]
})
course_page.a11y_audit.check_for_accessibility_errors()
|
deepmind/deepmind-research | cs_gan/main_ode.py | Python | apache-2.0 | 12,654 | 0.008219 | # Copyright 2019 DeepMind Technologies Limited and Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Training script."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import app
from absl import flags
from absl import logging
import tensorflow.compat.v1 as tf
from cs_gan import file_utils
from cs_gan import gan
from cs_gan import image_metrics
from cs_gan import utils
flags.DEFINE_integer(
'num_training_iterations', 1200000,
'Number of training iterations.')
flags.DEFINE_string(
'ode_mode', 'rk4', 'Integration method.')
flags.DEFINE_integer(
'batch_size', 64, 'Training batch size.')
flags.DEFINE_float(
'grad_reg_weight', 0.02, 'Step size for latent optimisation.')
flags.DEFINE_string(
'opt_name', 'gd', 'Name of the optimiser (gd|adam).')
flags.DEFINE_bool(
'schedule_lr', True, 'The method to project z.')
flags.DEFINE_bool(
'reg_first_grad_only', True, 'Whether only to regularise the first grad.')
flags.DEFINE_integer(
'num_latents', 128, 'The number of latents')
flags.DEFINE_integer(
'summary_every_step', 1000,
'The interval at which to log debug ops.')
flags.DEFINE_integer(
'image_metrics_every_step', 1000,
'The interval at which to log (expensive) image metrics.')
flags.DEFINE_integer(
'export_every', 10,
'The interval at which to export samples.')
# Use 50k to reproduce scores from the paper. Default to 10k here to avoid the
# runtime error caused by too large graph with 50k samples on some machines.
flags.DEFINE_integer(
'num_eval_samples', 10000,
'The number of samples used to evaluate FID/IS.')
flags.DEFINE_string(
'dataset', 'cifar', 'The dataset used for learning (cifar|mnist).')
flags.DEFINE_string(
'output_dir', '/tmp/ode_gan/gan', 'Location where to save output files.')
flags.DEFINE_float('disc_lr', 4e-2, 'Discriminator Learning rate.')
flags.DEFINE_float('gen_lr', 4e-2, 'Generator Learning rate.')
flags.DEFINE_bool(
'run_real_data_metrics', False,
'Whether or not to run image metrics on real data.')
flags.DEFINE_bool(
'run_sample_met | rics', True,
'Whether or not to run image metrics on samples.')
FLAGS = flags | .FLAGS
# Log info level (for Hooks).
tf.logging.set_verbosity(tf.logging.INFO)
def _copy_vars(v_list):
"""Copy variables in v_list."""
t_list = []
for v in v_list:
t_list.append(tf.identity(v))
return t_list
def _restore_vars(v_list, t_list):
"""Restore variables in v_list from t_list."""
ops = []
for v, t in zip(v_list, t_list):
ops.append(v.assign(t))
return ops
def _scale_vars(s, v_list):
"""Scale all variables in v_list by s."""
return [s * v for v in v_list]
def _acc_grads(g_sum, g_w, g):
"""Accumulate gradients in g, weighted by g_w."""
return [g_sum_i + g_w * g_i for g_sum_i, g_i in zip(g_sum, g)]
def _compute_reg_grads(gen_grads, disc_vars):
"""Compute gradients norm (this is an upper-bpund of the full-batch norm)."""
gen_norm = tf.accumulate_n([tf.reduce_sum(u * u) for u in gen_grads])
disc_reg_grads = tf.gradients(gen_norm, disc_vars)
return disc_reg_grads
def run_model(prior, images, model, disc_reg_weight):
"""Run the model with new data and samples.
Args:
prior: the noise source as the generator input.
images: images sampled from dataset.
model: a GAN model defined in gan.py.
disc_reg_weight: regularisation weight for discrmininator gradients.
Returns:
debug_ops: statistics from the model, see gan.py for more detials.
disc_grads: discriminator gradients.
gen_grads: generator gradients.
"""
generator_inputs = prior.sample(FLAGS.batch_size)
model_output = model.connect(images, generator_inputs)
optimization_components = model_output.optimization_components
disc_grads = tf.gradients(
optimization_components['disc'].loss,
optimization_components['disc'].vars)
gen_grads = tf.gradients(
optimization_components['gen'].loss,
optimization_components['gen'].vars)
if disc_reg_weight > 0.0:
reg_grads = _compute_reg_grads(gen_grads,
optimization_components['disc'].vars)
disc_grads = _acc_grads(disc_grads, disc_reg_weight, reg_grads)
debug_ops = model_output.debug_ops
return debug_ops, disc_grads, gen_grads
def update_model(model, disc_grads, gen_grads, disc_opt, gen_opt,
global_step, update_scale):
"""Update model with gradients."""
disc_vars, gen_vars = model.get_variables()
with tf.control_dependencies(gen_grads + disc_grads):
disc_update_op = disc_opt.apply_gradients(
zip(_scale_vars(update_scale, disc_grads),
disc_vars))
gen_update_op = gen_opt.apply_gradients(
zip(_scale_vars(update_scale, gen_grads),
gen_vars),
global_step=global_step)
update_op = tf.group([disc_update_op, gen_update_op])
return update_op
def main(argv):
del argv
utils.make_output_dir(FLAGS.output_dir)
data_processor = utils.DataProcessor()
# Compute the batch-size multiplier
if FLAGS.ode_mode == 'rk2':
batch_mul = 2
elif FLAGS.ode_mode == 'rk4':
batch_mul = 4
else:
batch_mul = 1
images = utils.get_train_dataset(data_processor, FLAGS.dataset,
int(FLAGS.batch_size * batch_mul))
image_splits = tf.split(images, batch_mul)
logging.info('Generator learning rate: %d', FLAGS.gen_lr)
logging.info('Discriminator learning rate: %d', FLAGS.disc_lr)
global_step = tf.train.get_or_create_global_step()
# Construct optimizers.
if FLAGS.opt_name == 'adam':
disc_opt = tf.train.AdamOptimizer(FLAGS.disc_lr, beta1=0.5, beta2=0.999)
gen_opt = tf.train.AdamOptimizer(FLAGS.gen_lr, beta1=0.5, beta2=0.999)
elif FLAGS.opt_name == 'gd':
if FLAGS.schedule_lr:
gd_disc_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.disc_lr / 4., FLAGS.disc_lr, FLAGS.disc_lr / 2.],
boundaries=[500, 400000])
gd_gen_lr = tf.train.piecewise_constant(
global_step,
values=[FLAGS.gen_lr / 4., FLAGS.gen_lr, FLAGS.gen_lr / 2.],
boundaries=[500, 400000])
else:
gd_disc_lr = FLAGS.disc_lr
gd_gen_lr = FLAGS.gen_lr
disc_opt = tf.train.GradientDescentOptimizer(gd_disc_lr)
gen_opt = tf.train.GradientDescentOptimizer(gd_gen_lr)
else:
raise ValueError('Unknown ODE mode!')
# Create the networks and models.
generator = utils.get_generator(FLAGS.dataset)
metric_net = utils.get_metric_net(FLAGS.dataset, use_sn=False)
model = gan.GAN(metric_net, generator)
prior = utils.make_prior(FLAGS.num_latents)
# Setup ODE parameters.
if FLAGS.ode_mode == 'rk2':
ode_grad_weights = [0.5, 0.5]
step_scale = [1.0]
elif FLAGS.ode_mode == 'rk4':
ode_grad_weights = [1. / 6., 1. / 3., 1. / 3., 1. / 6.]
step_scale = [0.5, 0.5, 1.]
elif FLAGS.ode_mode == 'euler':
# Euler update
ode_grad_weights = [1.0]
step_scale = []
else:
raise ValueError('Unknown ODE mode!')
# Extra steps for RK updates.
num_extra_steps = len(step_scale)
if FLAGS.reg_first_grad_only:
first_reg_weight = FLAGS.grad_reg_weight / ode_grad_weights[0]
other_reg_weight = 0.0
else:
first_reg_weight = FLAGS.grad_reg_weight
other_reg_weight = FLAGS.grad_reg_weight
debug_ops, disc_grads, gen_grads = run_model(prior, image_splits[0],
model, first_reg_weight)
disc_vars, gen_vars = model.get_variables()
final_disc_grads = _scale_vars(ode_grad_weights[0], disc_grads)
final_gen_grads = _s |
geraintpalmer/ASQ | asq/exit_node.py | Python | mit | 759 | 0.002635 | from __future__ import division
class ExitNode:
"""
Class for the exit node on our network
"""
def _ | _init__(self, max_simulation_time):
"""
Initialise a node.
"""
self.individuals = []
self.id_number = -1
self.next_event_date = max_simulation_time
self.node_capacity = "Inf"
def __repr__(self):
"""
Representation of a node::
"""
return 'Exit N | ode'
def accept(self, next_individual, current_time):
"""
Accepts a new customer to the queue
"""
self.individuals.append(next_individual)
def update_next_event_date(self):
"""
Finds the time of the next event at this node
"""
pass |
angelmtenor/IDSFC | L2_Data_Wrangling/P8_get_hourly_entries_and_exits.py | Python | mit | 2,645 | 0.00794 | import pandas
def get_hourly_entries(df):
"""
The data in the MTA Subway Turnstile data reports on the cumulative
number of entries and exits per row. Assume that you have a dataframe
called df that contains only the rows for a particular turnstile machine
(i.e., unique SCP, C/A, and UNIT). This function should change
these cumulative entry numbers to a count of entries since the last reading
(i.e., entries since the last row in the dataframe).
More specifically, you want to do two things:
1) Create a new column called ENTRIESn_hourly
2) Assign to the column the difference between ENTRIESn of the current row
and the previous row. If there is any NaN, fill/replace it with 1.
b1) Create a new column called EXITSn_hourly
b2) Assign to the column the difference between EXITSn of the current row
and the previous row. If there is any NaN, fill/replace it with 0.
You may find the pandas functions shift() and fillna() to be helpful in this exercise.
Examples of what your dataframe should look like at the end of this exercise:
C/A UNIT SCP DATEn TIMEn DESCn ENTRIESn EXITSn ENTRIESn_hourly
0 A002 R051 02-00-00 05-01-11 00:00:00 REGULAR 3144312 1088151 1
1 A002 R051 02-00-00 05-01-11 04:00:00 REGULAR 3144335 1088159 23
2 A002 R051 02-00-00 05-01-11 08:00:00 REGULAR 3144353 1088177 18
3 A002 R051 02-00-00 05-01-11 12:00:00 REGULAR 3144424 1088231 71
4 A002 R051 02-00 | -00 05-01-11 16:00:00 REGULAR 3144594 1088275 170
5 A002 R051 02-00-00 05-01-11 20:00:00 REGULAR 3144808 1088317 214
6 A002 R051 02-00-00 05-02-11 00:00:00 REGULAR 3144895 1088328 | 87
7 A002 R051 02-00-00 05-02-11 04:00:00 REGULAR 3144905 1088331 10
8 A002 R051 02-00-00 05-02-11 08:00:00 REGULAR 3144941 1088420 36
9 A002 R051 02-00-00 05-02-11 12:00:00 REGULAR 3145094 1088753 153
10 A002 R051 02-00-00 05-02-11 16:00:00 REGULAR 3145337 1088823 243
...
...
"""
# your code here
df['ENTRIESn_hourly'] = df['ENTRIESn'].diff().fillna(1)
df['EXITSn_hourly'] = df['EXITSn'].diff().fillna(0)
return df
DIR = 'MTA_Subway_turnstile'
data_filename = DIR+'/'+'master_turnslide_filtered.txt'
data = pandas.read_csv(data_filename)
print(get_hourly_entries(data).head()) |
nikdoof/test-auth | app/groups/migrations/0002_groupinformation.py | Python | bsd-3-clause | 5,402 | 0.00759 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import DataMigration
from django.db import models
class Migration(DataMigration):
def forwards(sel | f, orm):
for group in orm['auth.Group'].objects.all():
try:
obj = orm.GroupInformation.objects.get(group=group)
except orm.Group | Information.DoesNotExist:
obj = orm.GroupInformation(group=group)
obj.save()
def backwards(self, orm):
orm.GroupInformation.objects.all().delete()
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'unique': 'True'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '30', 'unique': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'groups.groupinformation': {
'Meta': {'object_name': 'GroupInformation'},
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.User']", 'symmetrical': 'False'}),
'description': ('django.db.models.fields.TextField', [], {}),
'group': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.Group']", 'unique': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'requestable': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'groups.grouprequest': {
'Meta': {'object_name': 'GroupRequest'},
'changed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'requests'", 'to': "orm['auth.Group']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'reason': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'grouprequests'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['groups']
|
liquidm/druid | docs/_bin/generate-license-dependency-reports.py | Python | apache-2.0 | 2,924 | 0.004788 | #!/usr/bin/env python3
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import subprocess
import sys
existing_jar_di | ct_notice = {}
def main():
if len(sys.argv) != 3:
sys.stderr.write('usage: program <druid source path> <full tmp path>\n')
sys.exit(1)
druid_path = sys.argv[1]
tmp_path = sys.argv[2]
generate_reports(druid_path, tmp_path)
def generate_reports(druid_path, tmp_path):
license_main_path = tmp_path + "/license-reports"
license_ext_path = tmp_path + "/license-reports/ext"
os.mkdir(license_main_path)
| os.mkdir(license_ext_path)
print("********** Generating main LICENSE report.... **********")
os.chdir(druid_path)
command = "mvn -Pdist -Ddependency.locations.enabled=false project-info-reports:dependencies"
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
command = "cp -r distribution/target/site {}/site".format(license_main_path)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
sys.exit()
print("********** Generating extension LICENSE reports.... **********")
extension_dirs = os.listdir("extensions-core")
for extension_dir in extension_dirs:
full_extension_dir = druid_path + "/extensions-core/" + extension_dir
if not os.path.isdir(full_extension_dir):
continue
print("--- Generating report for {}... ---".format(extension_dir))
extension_report_dir = "{}/{}".format(license_ext_path, extension_dir)
os.mkdir(extension_report_dir)
os.chdir(full_extension_dir)
try:
command = "mvn -Ddependency.locations.enabled=false project-info-reports:dependencies"
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
command = "cp -r target/site {}/site".format(extension_report_dir)
outstr = subprocess.check_output(command, shell=True).decode('UTF-8')
except:
print("Encountered error when generating report for: " + extension_dir)
os.chdir("..")
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
print('Interrupted, closing.') |
ESSolutions/ESSArch_Core | ESSArch_Core/essxml/ProfileMaker/migrations/0007_auto_20160908_1428.py | Python | gpl-3.0 | 1,696 | 0 | """
ESSArch is an open source archiving and digi | tal pre | servation system
ESSArch
Copyright (C) 2005-2019 ES Solutions AB
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Contact information:
Web - http://www.essolutions.se
Email - essarch@essolutions.se
"""
# -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-09-08 14:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ProfileMaker', '0006_auto_20160902_1308'),
]
operations = [
migrations.DeleteModel(
name='finishedTemplate',
),
migrations.RemoveField(
model_name='templatepackage',
name='generated',
),
migrations.AddField(
model_name='templatepackage',
name='namespace',
field=models.CharField(default='', max_length=20),
),
migrations.AddField(
model_name='templatepackage',
name='root_element',
field=models.CharField(default='', max_length=55),
),
]
|
poppogbr/genropy | packages/adm/model/connection.py | Python | lgpl-2.1 | 3,070 | 0.00456 | # encoding: utf-8
from __future__ import with_statement
from datetime import datetime
class Table(object):
def use_dbstores(self):
return False
def config_db(self, pkg):
tbl = pkg.table('connection', pkey='id', name_long='!!Connection',
name_plural='!!Connections', broadcast='old')
tbl.column('id', size='22', name_long='!!Connection id')
tbl.column('userid', size=':32', name_long='!!Userid').relation('user.username')
tbl.column('username', size=':32', name_long='!!Username')
tbl.column('ip', size=':15', name_long='!!Ip number')
tbl.column('start_ts', 'DH', name_long='!!Start TS')
tbl.column('end_ts', 'DH', name_long='!!Start TS')
tbl.column('end_reason', size=':12', name_long='!!End reason')
tbl.column('user_agent', name_long='!!User agent')
tbl.aliasColumn('user_fullname', relation_path='@userid.fullname', name_long='!!User fullname')
def trigger_onUpdating(self, record, old_record=None):
if 'end_ts' in record and record['end_ts']:
self.db.table('adm.served_page').closePendingPages(connection_id=record['id'],
end_ts=record['end_ts'],
end_reason=record['end_reason'])
def getPendingConnections(self, userid=None):
where = '$end_ts IS NULL'
if userid:
where = '%s AND %s' % ('$userid=:userid', where)
return self.query(where=where, userid=userid).fetch()
def closePendingConnections(self, end_ts=None, end_reason=None):
end_ts = end_ts or datetime.now()
for conn in self.getPendingConnections():
self.closeConnection(conn['id'], end_ts=end_ts, end_reason=end_reason)
def connectionLog(self, event, connection_id=None):
if event == 'open':
self.openConnection()
else:
self.closeConnection(connection_id=connection_id, end_reason='logout')
def closeConnection(self, connection_id=None, end_ts=None, end_reason=None):
page = self.db.application.site.currentPage
connection_id = connection_id or page.connection_id
with self.db.tempEnv(connectionName='system'):
self.batchUpdate(dict(end_ts=end_ts or datetime.now(), end_reason=end_reason),
where='$id=:connection_id', connection_id=connection_id)
self.db.commit()
def openConnection(self):
page = self.db.application.site.currentPage
avatar = page.avatar
new_connection_record = dict(id= | page.connection_id, username=page.user,
userid=avatar.userid, start_ts=datetime.now(),
| ip=page.request.remote_addr,
user_agent=page.request.get_header('User-Agent'))
with self.db.tempEnv(connectionName='system'):
self.insertOrUpdate(new_connection_record)
self.db.commit()
|
JPro173/cloudpie | client/main.py | Python | gpl-3.0 | 1,186 | 0.001686 | import htmlPy
import socket
import json
import os
sock = socket.socket()
sock.connect(('localhost', 5002))
sock.send(b'')
sock.recv(1024)
sock.recv(1024)
sock.recv(1024)
app = htmlPy.AppGUI(title=u"Python Best Ever", maximized=True)
app.template_path = os.path.abspath("./html")
app.static_path = os.path.abspath("./html")
template_name = 'index.html'
app_data = {
'val': '0'
}
def processor(response):
response = str(response)
response = json.loads(response)['message']
print(response)
command, data = response.split('#')
if '@' in command:
command, subcommand = command.split('@')
if command == 'p | ut':
a | pp_data[subcommand] = data
class App(htmlPy.Object):
def __init__(self):
super(App, self).__init__()
@htmlPy.Slot(str)
def link(self, url):
template_name = str(url)
app.template = (template_name, app_data)
@htmlPy.Slot(str)
def command(self, cmd):
cmd = bytes(cmd)
sock.send(cmd)
response = sock.recv(1024)
processor(response)
app.template = (template_name, app_data)
app.template = (template_name, app_data)
app.bind(App())
app.start()
|
rafasis1986/EngineeringMidLevel | flaskiwsapp/settings/devConfigTemplate.py | Python | mit | 539 | 0.001855 | '''
Created on Sep 22, 2016
@author: rtorres
'''
import os
from flaskiwsapp.settings.baseConfig import BaseConfi | g
class DevConfig(BaseConfig):
"""Development configuration"""
ENV = 'dev'
DEBUG = True
DEBUG_TB_ENABLED = True
SQLALCHEMY_DATABASE_URI = 'postgresql://localhost/example'
AUTH0_CALLBACK_URL = 'http://localhost/auth/callback'
AUTH0_CLIENT_ID = ''
AUTH0_CLIENT_SECRET = ''
AUTH0_DOMAIN = ''
APP_DOMA | IN = 'localhost'
APP_URL = 'http://%s' % APP_DOMAIN
SERVER_NAME = 'locahost'
|
mkesicki/excel_validator | validator/NotBlankValidator.py | Python | mit | 401 | 0.009975 | from validator.BaseValidator impor | t BaseValidator
import re
class NotBlankValidator(BaseValidator):
required = True
message = "Cell can not be blank"
def validate(self, value):
if self.required == False or (value != None and value != ""):
return True
return False
def | __init__(self, params):
super(NotBlankValidator, self).__init__(params)
|
AAT-SP2017/tweet-miner | tweet-miner/main.py | Python | mit | 684 | 0.005848 | import log
import taglistener
import setti | ngs
if __name__ == '__main__':
try:
import time
logger = log.get_logger(__name__)
listener = taglistener.TagListener()
print('Started listening for {}'.format(settings.TAGS))
print('Press CTRL+C to exit. It may take a minute or two for it to gracefully terminate.')
while listener.is_running():
time.sleep(2)
listener.disconnect()
except KeyboardInterrupt:
print('\nExiting - it may take a little while until the last | threaded request is being handled.')
print('Be patient.')
if listener.is_running():
listener.disconnect()
|
chrislit/abydos | tests/fingerprint/test_fingerprint_consonant.py | Python | gpl-3.0 | 1,933 | 0 | # Copyright 2019-2020 by Christopher C. Little.
# This file is part of Abydos.
#
# Abydos is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Abydos is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Abydos. If not, see <http://www.gnu.org/licenses/>.
"""abydos.tests.fingerprint.test_fingerprint_consonant.
This module contains unit tests for abydos.fingerprint.Consonant
"""
import unittest
from abydos.fingerprint import Consonant
class ConsonantTestCases(unittest.TestCase):
"""Test Taft's consonant coding functions.
abydos.fingerprint.Consonant
"""
def test_consonant_fingerprint(self):
"""Test abydos.fingerprint.Con | sonant."""
# Base case
self.assertEqual(Consonant().fingerprint(''), '')
# Test cases from paper
self.assertEqual(Consonant(variant=1).fingerprint('ABRAMS'), 'ABRMS')
self.assertEqual(Consonant(variant=2).fingerprint('AR | ROW'), 'ARR')
self.assertEqual(Consonant(variant=3).fingerprint('ABRAHAM'), 'ABRM')
self.assertEqual(
Consonant(variant=1, doubles=False).fingerprint('ARROW'), 'ARW'
)
self.assertEqual(
Consonant(variant=2, doubles=False).fingerprint('ARROW'), 'AR'
)
self.assertEqual(
Consonant(variant=3, doubles=False).fingerprint('GARRETH'), 'GRT'
)
# coverage
self.assertEqual(Consonant(vowels='R').fingerprint('ARROW'), 'AOW')
if __name__ == '__main__':
unittest.main()
|
neo5g/server-gsrp5 | server-gsrp5/orm/fields.py | Python | agpl-3.0 | 18,659 | 0.064902 | # -*- coding: utf-8 -*-
import base64
import decimal
from psycopg2 import Binary
import datetime as DT
import pytz
import logging
_logger = logging.getLogger(__name__)
def _set_symbol(symb):
if symb is None or symb == False:
return None
return str(symb)
class _column(object):
__slots__ = ('__dict__','label', 'readonly', 'priority', 'domain', 'context', 'required', 'size', 'on_delete', 'on_update', 'change_default', 'translate', 'selections', 'selectable','filtering','manual', 'help', 'unique','selectable','timezone','obj','rel','id1','id2','offset','limit','check')
def __init__(self, **kwargs):
if len(kwargs) == 0:
raise AttributesImpl
for key in kwargs.keys():
if key in self.__slots__ and key != "__dict__":
setattr(self,key,kwargs[key])
else:
self.__dict__[key] = kwargs[key]
def __contains__(self, name):
if name != "__dict__" and name in self.__slots__:
return True
else:
if name == "__dict__":
return True
else:
return False
def _get_symbol_c(self):
if hasattr(self, '_symbol_c'):
return self._symbol_set[0]
else:
raise AttributesImpl
def _get_symbol_f(self, value):
if hasattr(self, '_symbol_f'):
return self._symbol_set[1](value)
else:
raise AttributesImpl
def _get_attrs(self, attrs = []):
result = {}
if len(attrs) == 0:
la = list(self.__slots__[1:]) + self.__dict__.keys()
else:
la = attrs
for a in la:
if self.__dict__.has_key(a):
result[a] = self.__dict__[a]
else:
if hasattr(self,a):
result[a] = getattr(self,a)
return result
class char(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'char'
_db_type = 'character'
_symbol_c = "%s"
_symbol_f = _set_symbol
_symbol_set = (_symbol_c, _symbol_f) |
_symbol_get = None
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, size = 32, change_default = True, translate = False, selectable = False, filtering = None, domain=None, manual = False, help = False, unique = False, check = None):
super(char, self).__init__(label=label, readonly=readonly, priority=priority, context=context, required = required, | size = size, change_default = change_default, translate = translate, selectable = selectable, filtering=filtering, domain=domain, manual = manual, help=help, unique = unique, check = check)
class varchar(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'varchar'
_db_type = 'character varying'
_symbol_c = "%s"
_symbol_f = _set_symbol
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, size = None, change_default = True, translate = False, selectable = False , filtering = None, domain=None, manual = False, help = False, unique = None, check = None):
super(varchar, self).__init__(label=label, readonly=readonly, priority=priority, context=context, required = required, size = size, change_default = change_default, translate = translate, selectable = selectable, filtering = filtering, domain=domain, manual = manual, help=help, unique = unique, check = check)
class text(_column):
_classic_read =True
_classic_write = True
_prefetch = False
_type = 'text'
_db_type = 'text'
_symbol_c = "%s"
_symbol_f = _set_symbol
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, change_default = True, translate = False, manual = False, help = False):
super(text,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, change_default = change_default, translate = translate, manual = manual, help = help)
class xml(_column):
_classic_read =True
_classic_write = True
_prefetch = False
_type = 'xml'
_db_type = 'xml'
_symbol_c = "%s"
_symbol_f = _set_symbol
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, change_default = True, translate = False, manual = False, help = False):
super(xml,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, change_default = change_default, translate = translate, manual = manual, help= help)
class boolean(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'boolean'
_db_type = 'boolean'
_symbol_c = "%s"
_symbol_f = lambda x: x and 'True' or 'False'
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, change_default = True, manual = False, help = False, selectable = None):
super(boolean,self).__init__(label = label, readonly = readonly, priority = priority, context = context, change_default = change_default, manual = manual, help = help, selectable=selectable)
class integer(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'integer'
_db_type = 'integer'
_symbol_c = "%s"
_symbol_f = lambda x: int(x or 0)
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x or 0
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, change_default = True, manual = False, help = False, check = None):
super(integer,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, change_default= change_default, manual = manual, help = help, check = check)
class double(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'double'
_db_type = 'double precision'
_symbol_c = "%s"
_symbol_f = lambda x: float(x) or 0.0
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: x or 0.0
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, size = (15,3), change_default = True, manual = False, help = False, check = None):
super(double,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, size = size, change_default = change_default, manual = manual, help = help, check = check)
class decimal(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'decimal'
_db_type = 'decimal'
_symbol_c = "%s"
_symbol_f = lambda x: decimal.Decimal(x) or decimal.Decimal('0.0')
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: decimal.Decimal(x) or decimal.Decimal('0.0')
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, size = (15,3), change_default = True, manual = False, help = False, check = None):
super(decimal,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, size = size, change_default = change_default, manual = manual, help = help, check = check)
class numeric(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'numeric'
_db_type = 'numeric'
_symbol_c = "%s"
_symbol_f = lambda x: decimal.Decimal(x) or decimal/Decimal('0.0')
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = lambda self, x: decimal.Decimal(x) or decimal/Decimal('0.0')
def __init__(self, label = 'unknown', readonly = False, priority = 0, context = {}, required = False, size = (15,3), change_default = True, manual = False, help = False):
super(numeric,self).__init__(label = label, readonly = readonly, priority = priority, context = context, required = required, size =size, change_default = change_default, manual = manual, help = help)
class selection(_column):
_classic_read =True
_classic_write = True
_prefetch = True
_type = 'selection'
_db_type = 'character varying'
_symbol_c = "%s"
_symbol_f = _set_symbol
_symbol_set = (_symbol_c, _symbol_f)
_symbol_get = None
def __init__(self, label = 'unknown', selections = [],readonly = False, priority = 0, context = {}, required = False, size = 32, change_default = True, translate |
participedia/api | migrations/utils/fake_users.py | Python | mit | 968 | 0.001033 | import csv
import json
fake_usernames = json.loads(open('members.json').read())
print '{} fake users'.format(len(fake_usernames))
csv_reader = csv.DictReader(open('../data-transport/migrations/users.csv'))
users = [user for user in csv_reader]
print '{} real users'.format(len(users))
print '{} merged'.format(len(zip(users, fake_usernames)))
newusers = [{
'name': fake['name'].encode('utf-8'),
'email': fake['email'].encode('utf-8'),
'accepted_date': real['accepted_date'],
'id': real['id'],
'language': real['language'],
'language_1': real['language_1'],
'last_access_date': real['last_access_date'],
'login': real['login']}
for (fak | e, real) in zip(fake_usernames, users)]
fieldnames = sorted(users[-1].keys())
with open('migrations/users.csv' | , 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames, extrasaction='ignore')
writer.writeheader()
for user in newusers:
writer.writerow(user)
|
mitchcapper/mythbox | resources/lib/mysql-connector-python/python2/mysql/connector/cursor.py | Python | gpl-2.0 | 24,598 | 0.005285 | # MySQL Connector/Python - MySQL driver written in Python.
# Copyright (c) 2009, 2012, Oracle and/or its affiliates. All rights reserved.
# MySQL Connector/Python is licensed under the terms of the GPLv2
# <http://www.gnu.org/licenses/old-licenses/gpl-2.0.html>, like most
# MySQL Connectors. There are special exceptions to the terms and
# conditions of the GPLv2 as it is applied to this software, see the
# FOSS License Exception
# <http://www.mysql.com/about/legal/licensing/foss-exception.html>.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Cursor classes
"""
import sys
import weakref
import re
import itertools
from mysql.connector import constants
from mysql.connector import errors
from mysql.connector import utils
RE_SQL_COMMENT = re.compile("\/\*.*\*\/")
RE_SQL_INSERT_VALUES = re.compile(
r'VALUES\s*(\(\s*(?:%(?:\(.*\)|)s\s*(?:,|)\s*)+\))',
re.I | re.M)
RE_SQL_INSERT_STMT = re.compile(r'INSERT\s+INTO', re.I)
RE_SQL_SPLIT_STMTS = re.compile(
r''';(?=(?:[^"'`]*["'`][^"'`]*["'`])*[^"'`]*$)''')
class CursorBase(object):
"""
Base for defining MySQLCursor. This class is a skeleton and defines
methods and members as required for the Python Database API
Specification v2.0.
It's better to inherite from MySQLCursor.
"""
def __init__(self):
self._description = None
self._rowcount = -1
self._last_insert_id = None
self.arraysize = 1
def callproc(self, procname, args=()):
pass
def close(self):
pass
def execute(self, operation, params=()):
pass
def executemany(self, operation, seqparams):
pass
def fetchone(self):
pass
def fetchmany(self, size=1):
pass
def fetchall(self):
pass
def nextset(self):
pass
def setinputsizes(self, sizes):
pass
def setoutputsize(self, size, column=None):
pass
def reset(self):
pass
@property
def description(self):
"""Returns description of columns in a result
This property returns a list of tuples describing the columns in
in a result set. A tuple is described as follows::
(column_name,
type,
None,
None,
None,
None,
null_ok,
column_flags) # Addition to PEP-249 specs
Returns a list of tuples.
"""
return self._description
@property
def rowcount(self):
"""Returns the number of rows produced or affected
This property returns the number of rows produced by queries
such as a SELECT, or affected rows when executing DML statements
like INSERT or UPDATE.
Note that for non-buffered cursors it is impossible to know the
number of rows produced before having fetched them all. For those,
the number of rows will be -1 right after execution, and
incremented when fetching rows.
Returns an integer.
"""
return self._rowcount
@property
def lastrowid(self):
"""Returns the value generated for an AUTO_INCREMENT column
Returns the value generated for an AUTO_INCREMENT column by
the previous INSERT or UPDATE statement or None when there is
no such value | available.
Returns a long value or None.
"""
return self._last_insert_id
class MySQLCursor | (CursorBase):
"""Default cursor for interacting with MySQL
This cursor will execute statements and handle the result. It will
not automatically fetch all rows.
MySQLCursor should be inherited whenever other functionallity is
required. An example would to change the fetch* member functions
to return dictionaries instead of lists of values.
Implements the Python Database API Specification v2.0 (PEP-249)
"""
def __init__(self, connection=None):
CursorBase.__init__(self)
self._connection = None
self._stored_results = []
self._nextrow = (None, None)
self._warnings = None
self._warning_count = 0
self._executed = None
self._executed_list = []
if connection is not None:
self._set_connection(connection)
def __iter__(self):
"""
Iteration over the result set which calls self.fetchone()
and returns the next row.
"""
return iter(self.fetchone, None)
def _set_connection(self, connection):
try:
self._connection = weakref.proxy(connection)
self._connection._protocol
except (AttributeError, TypeError):
raise errors.InterfaceError(errno=2048)
def _reset_result(self):
self._rowcount = -1
self._lastrowid = None
self._nextrow = (None, None)
self._stored_results = []
self._warnings = None
self._warning_count = 0
self._description = None
self._executed = None
self._executed_list = []
self.reset()
def _have_unread_result(self):
"""Check whether there is an unread result"""
try:
return self._connection.unread_result
except AttributeError:
return False
def next(self):
"""
Used for iterating over the result set. Calles self.fetchone()
to get the next row.
"""
try:
row = self.fetchone()
except errors.InterfaceError:
raise StopIteration
if not row:
raise StopIteration
return row
def close(self):
"""Close the cursor
Returns True when successful, otherwise False.
"""
if self._connection is None:
return False
self._reset_result()
self._connection = None
return True
def _process_params_dict(self, params):
try:
to_mysql = self._connection.converter.to_mysql
escape = self._connection.converter.escape
quote = self._connection.converter.quote
res = {}
for k,v in params.items():
c = v
c = to_mysql(c)
c = escape(c)
c = quote(c)
res[k] = c
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing pyformat-parameters; %s" % e)
else:
return res
return None
def _process_params(self, params):
"""
Process the parameters which were given when self.execute() was
called. It does following using the MySQLConnection converter:
* Convert Python types to MySQL types
* Escapes characters required for MySQL.
* Quote values when needed.
Returns a list.
"""
if isinstance(params,dict):
return self._process_params_dict(params)
try:
res = params
res = map(self._connection.converter.to_mysql,res)
res = map(self._connection.converter.escape,res)
res = map(self._connection.converter.quote,res)
except StandardError, e:
raise errors.ProgrammingError(
"Failed processing format-parameters; %s" % e)
else:
return tuple(res)
return None
def _row_to_python( |
ztenma/Swap | itertoolsExt.py | Python | mit | 5,830 | 0.025729 |
import random
import collections
from itertools import *
def take(n, iterable):
"Return first n items of the iterable as a list"
return list(islice(iterable, n))
def tabulate(function, start=0):
"Return function(0), function(1), ..."
return map(function, count(start))
def consume(iterator, n):
"Advance the iterator n-steps ahead. If n is none, consume en | tirely."
# Use functions that consume iterators at C speed.
if n is None:
# feed the entire iterator into a zero-length deque
collections.deque(iterator, maxlen=0)
else:
# advance to the empty sli | ce starting at position n
next(islice(iterator, n, n), None)
def nth(iterable, n, default=None):
"Returns the nth item or a default value"
return next(islice(iterable, n, None), default)
def quantify(iterable, pred=bool):
"Count how many times the predicate is true"
return sum(map(pred, iterable))
def padnone(iterable):
"""Returns the sequence elements and then returns None indefinitely.
Useful for emulating the behavior of the built-in map() function.
"""
return chain(iterable, repeat(None))
def ncycles(iterable, n):
"Returns the sequence elements n times"
return chain.from_iterable(repeat(tuple(iterable), n))
def dotproduct(vec1, vec2):
return sum(map(operator.mul, vec1, vec2))
def flatten(listOfLists):
"Flatten one level of nesting"
return chain.from_iterable(listOfLists)
def repeatfunc(func, times=None, *args):
"""Repeat calls to func with specified arguments.
Example: repeatfunc(random.random)
"""
if times is None:
return starmap(func, repeat(args))
return starmap(func, repeat(args, times))
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
return zip_longest(*args, fillvalue=fillvalue)
def roundrobin(*iterables):
"roundrobin('ABC', 'D', 'EF') --> A D E B F C"
# Recipe credited to George Sakkis
pending = len(iterables)
nexts = cycle(iter(it).__next__ for it in iterables)
while pending:
try:
for next in nexts:
yield next()
except StopIteration:
pending -= 1
nexts = cycle(islice(nexts, pending))
def partition(pred, iterable):
'Use a predicate to partition entries into false entries and true entries'
# partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
def powerset(iterable):
"powerset([1,2,3]) --> () (1,) (2,) (3,) (1,2) (1,3) (2,3) (1,2,3)"
s = list(iterable)
return chain.from_iterable(combinations(s, r) for r in range(len(s)+1))
def unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def unique_justseen(iterable, key=None):
"List unique elements, preserving order. Remember only the element just seen."
# unique_justseen('AAAABBBCCDAABBB') --> A B C D A B
# unique_justseen('ABBCcAD', str.lower) --> A B C A D
return map(next, map(itemgetter(1), groupby(iterable, key)))
def iter_except(func, exception, first=None):
""" Call a function repeatedly until an exception is raised.
Converts a call-until-exception interface to an iterator interface.
Like builtins.iter(func, sentinel) but uses an exception instead
of a sentinel to end the loop.
Examples:
iter_except(functools.partial(heappop, h), IndexError) # priority queue iterator
iter_except(d.popitem, KeyError) # non-blocking dict iterator
iter_except(d.popleft, IndexError) # non-blocking deque iterator
iter_except(q.get_nowait, Queue.Empty) # loop over a producer Queue
iter_except(s.pop, KeyError) # non-blocking set iterator
"""
try:
if first is not None:
yield first() # For database APIs needing an initial cast to db.first()
while 1:
yield func()
except exception:
pass
def first_true(iterable, default=False, pred=None):
"""Returns the first true value in the iterable.
If no true value is found, returns *default*
If *pred* is not None, returns the first item
for which pred(item) is true.
"""
# first_true([a,b,c], x) --> a or b or c or x
# first_true([a,b], x, f) --> a if f(a) else b if f(b) else x
return next(filter(pred, iterable), default)
def indexFalse(iterable, pred=bool):
"""Return the index of the first false value.
If there is no false value, return the length of the iterable"""
i = -1
for i, e in enumerate(iterable):
if not pred(e): return i
return i+1
def random_product(repeat=1, *args):
"Random selection from itertools.product(*args, **kwds)"
pools = [tuple(pool) for pool in args] * repeat
return tuple(random.choice(pool) for pool in pools)
def random_permutation(iterable, r=None):
"Random selection from itertools.permutations(iterable, r)"
pool = tuple(iterable)
r = len(pool) if r is None else r
return tuple(random.sample(pool, r))
def random_combination(iterable, r):
"Random selection from itertools.combinations(iterable, r)"
pool = tuple(iterable)
n = len(pool)
indices = sorted(random.sample(range(n), r))
return tuple(pool[i] for i in indices)
def random_combination_with_replacement(iterable, r):
"Random selection from itertools.combinations_with_replacement(iterable, r)"
pool = tuple(iterable)
n = len(pool)
indices = sorted(random.randrange(n) for i in range(r))
return tuple(pool[i] for i in indices)
|
luuloe/python-duco | tests/test_enum_types.py | Python | mit | 569 | 0 | """Test methods in duco/modbus.py."""
import unittest
from duco.const import (DUCO_MODULE_TYPE_MASTER,
DUCO_MODULE_TYPE_ACTUATOR_PRINT)
from duco.enum_types im | port (ModuleType)
class TestModuleType(unitte | st.TestCase):
def test_supported(self):
for idx in range(DUCO_MODULE_TYPE_MASTER, 10):
self.assertTrue(ModuleType.supported(idx), "msg")
self.assertFalse(ModuleType.supported(DUCO_MODULE_TYPE_MASTER-1), "")
self.assertFalse(ModuleType.supported(
DUCO_MODULE_TYPE_ACTUATOR_PRINT+1), "")
|
danielrcardenas/ac-course-2017 | frameworks/pycellchem-2.0/src/NetFraglets.py | Python | apache-2.0 | 2,152 | 0.004647 | #---------------------------------------------------------------------------
#
# NetFraglets: networked examples of Fraglets, a chemically-inspired
# programming language for computer networks
#
# so far only one example available:
# CDP: confirmed delivery protocol
#
# Reference:
#
# C. Tschudin. Fraglets: A metabolistic execution model for communication
# protocols. Proc. 2nd Annual Symposium on Autonomous Intelligent Networks
# and Systems (AINS), July 2003.
#
# python implementation by Lidia Yamamoto, Belgium, October 2013
#
# - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#
# Copyright (C) 2015 Lidia A. R. Yamamoto
# Contact: http://www.artificial-chemistries.org/
#
# This file is part of PyCellChemistry.
#
# PyCellChemistry is free software: you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# version 3, as published by the Free Software Foundation.
#
# PyCellChemistry is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyCellChemistry, see file COPYING. If not, see
# http://www.gnu.org/licenses/
#
from Fraglets import *
from artchem.Cell import *
def cdp():
""" CDP: confirmed delivery protocol """
niter = 10
n1 = Fraglets('a')
n2 = Fraglets('b')
n1.add_cnx('b', n2)
n2.add_cnx('a', n1)
n1.inject(n1.parse('matchp c send b split send a k *'))
#n1.inject('cd')
net = Cell()
net.add(n1)
net.add(n2)
#net.run(10) |
print >> sys.stderr, "INIT:"
n1.trace()
n2.trace()
cnt = 0
for i in range(niter):
print >> sys.stderr, "ITER=", i
if i % 4 == 0:
mol = 'cd' + str(cnt)
n1.inject(mol)
cnt = (cnt + 1) % 10
net.propensity()
net.gillespie()
print >> sys.stderr, "END:"
n1.trace()
n2.trace()
if __n | ame__ == '__main__':
cdp()
|
probml/pyprobml | scripts/xcorr_demo.py | Python | mit | 1,136 | 0.003521 | # adpated from http://docs.scipy.org/doc/scipy-0.15.1/reference/generated/scipy.signal.correlate2d.html
import superimport
import matplotlib.pyplot as plt
import numpy as np
from scipy import signal
from scipy import misc
print("cross correlation demo")
face = misc.face() - misc.face().mean()
face = face.sum(-1)
template = np.copy(face[700:800, 310:380]) # right eye
template -= template.mean()
noisyface = face + np.random.randn(*face.shape) * 50 # add noise
corr = signal.correlate2d(noisyface, template, boundary='symm', mode='same')
y, x = np.unravel_index(-1*np.argmax(corr), corr.shape) # find the match
fig, ((ax_orig, ax_template), (ax_noisy, ax_corr)) = plt.subpl | ots(2, 2)
ax_orig.imshow(face, cmap='gray')
ax_orig.set_title('Original')
ax_orig.set_axis_off()
ax_orig.plot(x, y, 'ro')
ax_template.imshow(template, cmap='gray')
ax_template.set_title('Template')
ax_template.set_axis_off()
ax_noisy.imshow( | noisyface, cmap='gray')
ax_noisy.set_title('Noisy')
ax_noisy.set_axis_off()
ax_noisy.plot(x, y, 'ro')
ax_corr.imshow(corr, cmap='gray')
ax_corr.set_title('Cross-correlation')
ax_corr.set_axis_off()
fig.show()
|
openstack/designate | designate/common/policies/zone_transfer_request.py | Python | apache-2.0 | 4,669 | 0 | # All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import versionutils
from oslo_policy import policy
from designate.common.policies import base
DEPRECATED_REASON = """
The zone transfer request API now supports system scope and default roles.
"""
deprecated_create_zone_transfer_request = policy.DeprecatedRule(
name="create_zone_transfer_request",
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_get_zone_transfer_request = policy.DeprecatedRule(
name="get_zone_transfer_request",
check_str=base.LEGACY_RULE_ZONE_TRANSFER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_get_zone_transfer_request_detailed = policy.DeprecatedRule(
name="get_zone_transfer_request_detailed",
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_update_zone_transfer_request = policy.DeprecatedRule(
name="update_zone_transfer_request",
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
deprecated_delete_zone_transfer_request = policy.DeprecatedRule(
name="delete_zone_transfer_request",
check_str=base.RULE_ADMIN_OR_OWNER,
deprecated_reason=DEPRECATED_REASON,
deprecated_since=versionutils.deprecated.WALLABY
)
rules = [
policy.DocumentedRuleDefault(
name="create_zone_transfer_request",
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Create Zone Transfer Accept",
operations=[
{
'path': '/v2/zones/{zone_id}/tasks/transfer_requests',
'method': 'POST'
}
],
deprecated_rule=deprecated_create_zone_transfer_request
),
policy.DocumentedRuleDefault(
name="get_zone_transfer_request",
check_str=base.RULE_ZONE_TRANSFER,
scope_types=['system', 'project'],
description="Show a Zone Transfer Request",
operations=[
{
'path': '/v2/zones/tasks/transfer_requests/{zone_transfer_request_id}', # noqa
'method': 'GET'
}
],
deprecated_rule=deprecated_get_zone_transfer_request
),
policy.RuleDefault(
name="get_zone_transfer_request_detailed",
check_str=base.SYSTEM_OR_PROJECT_READER,
scope_types=['system', 'project'],
deprecated_rule=deprecated_create_zone_transfer_request
),
policy.DocumentedRuleDefault(
name="find_zone_transfer_requests",
check_str=base.RULE_ANY,
description="List Zone Transfer Requests",
operations=[
{
'pat | h': '/v2/zones/tasks/transfer_requests',
'method': 'GET'
}
],
),
policy.RuleDefault(
name="find_zone_transfer_request",
check_str=base.RULE_ANY
),
policy.DocumentedRuleDefault(
name="update_zone_transfer_request",
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Update a Zone Transfer Request",
operations=[
{
'path': '/v2/zones | /tasks/transfer_requests/{zone_transfer_request_id}', # noqa
'method': 'PATCH'
}
],
deprecated_rule=deprecated_update_zone_transfer_request
),
policy.DocumentedRuleDefault(
name="delete_zone_transfer_request",
check_str=base.SYSTEM_ADMIN_OR_PROJECT_MEMBER,
scope_types=['system', 'project'],
description="Delete a Zone Transfer Request",
operations=[
{
'path': '/v2/zones/tasks/transfer_requests/{zone_transfer_request_id}', # noqa
'method': 'DELETE'
}
],
deprecated_rule=deprecated_delete_zone_transfer_request
)
]
def list_rules():
return rules
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.