repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
claudep/translate | translate/tools/porestructure.py | Python | gpl-2.0 | 5,484 | 0.002371 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2005, 2006 Zuza Software Foundation
#
# This file is part of translate.
#
# translate is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# translate is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
"""Restructure Gettxt PO files produced by
:doc:`poconflicts </commands/poconflicts>` into the original directory tree
for merging using :doc:`pomerge </commands/pomerge>`.
See: http://docs.translatehouse.org/projects/translate-toolkit/en/latest/commands/pomerge.html
for examples and usage instructions.
"""
import os
import sys
from translate.misc import optrecurse
from translate.storage import po
class SplitOptionParser(optrecurse.RecursiveOptionParser):
"""a specialized Option Parser for posplit"""
def parse_args(self, args=None, values=None):
"""parses the command line options, handling implicit input/output args"""
(options, args) = optrecurse.RecursiveOptionParser.parse_args(self, args, values)
if not options.output:
self.error("Output file is rquired")
return (options, args)
def set_usage(self, usage=None):
"""sets the usage string - if usage not given, uses getusagestring for each option"""
if usage is None:
self.usage = "%prog " + " ".join([self.getusagestring(option) for option in self.option_list]) + \
"\n " + \
"input directory is searched for PO files with (poconflicts) comments, all entries are written to files in a directory structure for pomerge"
else:
super(SplitOptionParser, self).set_usage(usage)
def recursiveprocess(self, options):
"""recurse through directories and process files"""
if not self.isrecursive(options.output, 'output'):
try:
self.warning("Output directory does not exist. Attempting to create")
# TODO: maybe we s | hould only allow it to be created, otherwise
# we mess up an existing tree.
os.mkdir(options.output)
except:
self.error(optrecurse.optparse.OptionValueError("Output directory does not exist, attempt to create failed"))
if self.isrecursive(option | s.input, 'input') and getattr(options, "allowrecursiveinput", True):
if isinstance(options.input, list):
inputfiles = self.recurseinputfilelist(options)
else:
inputfiles = self.recurseinputfiles(options)
else:
if options.input:
inputfiles = [os.path.basename(options.input)]
options.input = os.path.dirname(options.input)
else:
inputfiles = [options.input]
self.textmap = {}
self.initprogressbar(inputfiles, options)
for inputpath in inputfiles:
fullinputpath = self.getfullinputpath(options, inputpath)
try:
success = self.processfile(options, fullinputpath)
except Exception:
self.warning("Error processing: input %s" % (fullinputpath), options, sys.exc_info())
success = False
self.reportprogress(inputpath, success)
del self.progressbar
def processfile(self, options, fullinputpath):
"""process an individual file"""
inputfile = self.openinputfile(options, fullinputpath)
inputpofile = po.pofile(inputfile)
for pounit in inputpofile.units:
if not (pounit.isheader() or pounit.hasplural()): # XXX
if pounit.hasmarkedcomment("poconflicts"):
for comment in pounit.othercomments:
if comment.find("# (poconflicts)") == 0:
pounit.othercomments.remove(comment)
break
# TODO: refactor writing out
outputpath = comment[comment.find(")") + 2:].strip()
self.checkoutputsubdir(options, os.path.dirname(outputpath))
fulloutputpath = os.path.join(options.output, outputpath)
if os.path.isfile(fulloutputpath):
outputfile = open(fulloutputpath, 'rb')
outputpofile = po.pofile(outputfile)
else:
outputpofile = po.pofile()
outputpofile.units.append(pounit) # TODO:perhaps check to see if it's already there...
with open(fulloutputpath, 'wb') as fh:
fh.write(outputpofile.serialize())
def main():
# outputfile extentions will actually be determined by the comments in the
# po files
pooutput = ("po", None)
formats = {(None, None): pooutput, ("po", "po"): pooutput, "po": pooutput}
parser = SplitOptionParser(formats, description=__doc__)
parser.set_usage()
parser.run()
if __name__ == '__main__':
main()
|
aricaldeira/PySPED | pysped/nfe/leiaute/nfe_310.py | Python | lgpl-2.1 | 81,201 | 0.005439 | # -*- coding: utf-8 -*-
#
# PySPED - Python libraries to deal with Brazil's SPED Project
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira at tauga.com.br>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# PySPED - Bibliotecas Python para o
# SPED - Sistema Público de Escrituração Digital
#
# Copyright (C) 2010-2012
# Copyright (C) Aristides Caldeira <aristides.caldeira arroba tauga.com.br>
#
# Este programa é um software livre: você pode redistribuir e/ou modificar
# este programa sob os termos da licença GNU Affero General Public License,
# publicada pela Free Software Foundation, em sua versão 3 ou, de acordo
# com sua opção, qualquer versão posterior.
#
# Este programa é distribuido na esperança de que venha a ser útil,
# porém SEM QUAISQUER GARANTIAS, nem mesmo a garantia implícita de
# COMERCIABILIDADE ou ADEQUAÇÃO A UMA FINALIDADE ESPECÍFICA. Veja a
# GNU Affero General Public License para mais detalhes.
#
# Você deve ter recebido uma cópia da GNU Affero General Public License
# juntamente com este programa. Caso esse não seja o caso, acesse:
# <http://www.gnu.org/licenses/>
#
from __future__ import (division, print_function, unicode_literals,
absolute_import)
from builtins import str
from io import BytesIO
from pysped.xml_sped import *
from pysped.nfe.leiaute import ESQUEMA_ATUAL_VERSAO_3 as ESQUEMA_ATUAL
from pysped.nfe.leiaute import nfe_200
from pysped.nfe.webservices_nfce_3 import ESTADO_QRCODE, ESTADO_CONSULTA_NFCE
from pysped.nfe.webservices_flags import CODIGO_UF
import os
import binascii
import hashlib
import qrcode
import sys
import unicodedata
DIRNAME = os.path.dirname(__file__)
class Deduc(nfe_200.Deduc):
def __init__(self):
super(Deduc, self).__init__()
class ForDia(nfe_200.ForDia):
def __init__(self):
super(ForDia, self).__init__()
class Cana(nfe_200.Cana):
def __init__(self):
super(Cana, self).__init__()
class IPIDevol(XMLNFe):
def __init__(self):
super(IPIDevol, self).__init__()
self.vIPIDevol = TagDecimal(nome='vIPIDevol', codigo='I50', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/impostoDevol/IPI')
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.vIPIDevol.valor:
xml += '<IPI>'
xml += self.vIPIDevol.xml
xml += '</IPI>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vIPIDevol.xml = arquivo
xml = property(get_xml, set_xml)
class ImpostoDevol(XMLNFe):
def __init__(self):
super(ImpostoDevol, self).__init__()
self.pDevol = TagDecimal(nome='pDevol', codigo='I50', tamanho=[1, 5, 1], decimais=[0, 2, 2], raiz='//det/impostoDevol')
self.IPI = IPIDevol()
def get_xml(self):
xml = XMLNFe.get_xml(self)
if self.pDevol.valor:
xml += '<impostoDevol>'
xml += self.pDevol.xml
xml += self.IPI.xml
xml += '</impostoDevol>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.pDevol.xml = arquivo
xml = property(get_xml, set_xml)
class ISSQN(nfe_200.ISSQN):
def __init__(self):
super(ISSQN, self).__init__()
self.vAliq = TagDecimal(nome='vAliq' , codigo='U03', tamanho=[1, 5, 1], decimais=[0, 4, 2], raiz='//det/imposto/ISSQN')
self.cListServ = TagCaracter(nome='cListServ', codigo='U06', tamanho=[5, 5], raiz='//det/imposto/ISSQN')
self.xListServ = TagCaracter(nome='xListServ', tamanho=[1, 255], raiz='//det/imposto/ISSQN')
#
# Campos novos da versão 3.10
#
self.vDeducao = TagDecimal(nome='vDeducao', codigo='U07', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vOutro = TagDecimal(nome='vOutro', codigo='U08', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vDescIncond = TagDecimal(nome='vDescIncond', codigo='U09', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vDescCond = TagDecimal(nome='vDescCond', codigo='U10', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.vISSRet = TagDecimal(nome='vISSRet', codigo='U11', tamanho=[1, 15, 1], decimais=[0, 2, 2], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.indISS = TagCaracter(nome='indISS', codigo='U12', tamanho=[1, 1], raiz='//det/imposto/ISSQN')
self.cServico = TagCaracter(nome='cServico', codigo='U13', tamanho=[1, 20], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.xServico = TagCaracter(nome='xServico', tamanho=[1, 255], raiz='//det/imposto/ISSQN')
self.cMun = TagInteiro(nome='cMun' , codigo='U14', tamanho=[7, 7, 7], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.cPais = TagInteiro(nome='cPais' , codigo='U15', tamanho=[4, 4, 4], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.nProcesso = TagCaracter(nome='nProcesso', codigo='U16', tamanho=[1, 30], raiz='//det/imposto/ISSQN', obrigatorio=False)
self.indIncentivo = TagCaracter(nome='indIncentivo', codigo='U17', tamanho=[1, 1], raiz='//det/imposto/ISSQN', valor='2')
def get_xml(self):
if not (self.indISS.valor):
return ''
xml = XMLNFe.get_xml(self)
xml += '<ISSQN>'
xml += self.vBC.xml
xml += self.vAliq.xml
xml += self.vISSQN.xml
xml += self.cMunFG.xml
xml += self.cListServ.xml
xml += self.vDeducao.xml
xml += self.vOutro.xml
xml += self.vDescIncond.xml
xml += self.vDescCond.xml
xml += self.vISSRet.xml
xml += self.indISS.xml
xml += self.cServico.xml
xml += self.cMun.xml
xml += self.cPais.xml
xml += self.nProcesso.xml
xml += self.indIncentivo.xml
xml += '</ISSQN>'
retur | n xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.vBC.xml = arquivo
self.vAliq.xml = arquivo
self.vISSQN.xml = arquivo
self.c | MunFG.xml = arquivo
self.cListServ.xml = arquivo
self.vDeducao.xml = arquivo
self.vOutro.xml = arquivo
self.vDescIncond.xml = arquivo
self.vDescCond.xml = arquivo
self.vISSRet.xml = arquivo
self.indISS.xml = arquivo
self.cServico.xml = arquivo
self.cMun.xml = arquivo
self.cPais.xml = arquivo
self.nProcesso.xml = arquivo
self.indIncentivo.xml = arquivo
xml = property(get_xml, set_xml)
class ICMSUFDest(XMLNFe):
def __init__(self):
super(ICMSUFDest, self).__init__()
self.vBCUFDest = TagDecimal(nome='vBCUFDest', codigo='AI01', tamanho=[1, 13, 1], decimais=[0, 2, 2], raiz='//det/imposto/ICMSUFDest')
self.pFCPUFDest = TagDecimal(nome='pFCPUFDest', codigo='AI02', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pICMSUFDest = TagDecimal(nome='pICMSUFDest', codigo='AI03', tamanho=[1, 3, 1], decimais=[2, 4, 2], raiz='//det/imposto/ICMSUFDest')
self.pICMSInter = TagDecimal(nome='pICMSInter', codigo='AI04', tamanho=[1, 3, 1], decimais=[0, 2, 2], raiz='//det/imposto/ICMSUFDest')
self |
miyataken999/weblate | weblate/trans/autofixes/whitespace.py | Python | gpl-3.0 | 1,794 | 0 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2015 Michal Čihař <michal@cihar.com>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details. |
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import re
from django.utils.translation import uget | text_lazy as _
from weblate.trans.autofixes.base import AutoFix
NEWLINES = re.compile(r'\r\n|\r|\n')
START = re.compile(r'^(\s+)', re.UNICODE)
END = re.compile(r'(\s+)$', re.UNICODE)
class SameBookendingWhitespace(AutoFix):
'''
Help non-techy translators with their whitespace
'''
name = _('Trailing and leading whitespace')
def fix_single_target(self, target, source, unit):
# normalize newlines of source
source = NEWLINES.sub('\n', source)
# capture preceding and tailing whitespace
start = START.search(source)
end = END.search(source)
head = start.group() if start else ''
tail = end.group() if end else ''
# add the whitespace around the target translation (ignore blanks)
stripped = target.strip()
if stripped:
newtarget = u''.join((head, stripped, tail))
return newtarget, newtarget != target
return target, False
|
Johnzero/erp | openerp/addons/membership/report/report_membership.py | Python | agpl-3.0 | 6,097 | 0.003444 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from osv import fields, osv
import tools
import decimal_precision as dp
STATE = [
('none', 'Non Member'),
('canceled', 'Cancelled Member'),
('old', 'Old Member'),
('waiting', 'Waiting Member'),
('invoiced', 'Invoiced Member'),
('free', 'Free Member'),
('paid', 'Paid Member'),
]
class report_membership(osv.osv):
'''Membership Analysis'''
_name = 'report.membership'
_description = __doc__
_auto = False
_rec_name = 'year'
_columns = {
'year': fields.char('Year', size=4, readonly=True, select=1),
'month': fields.selection([('01', 'January'), ('02', 'February'), \
('03', 'March'), ('04', 'April'),\
('05', 'May'), ('06', 'June'), \
('07', 'July'), ('08', 'August'),\
('09', 'September'), ('10', 'October'),\
('11', 'November'), ('12', 'December')], 'Month', readonly=True),
'date_from': fields.datetime('Start Date', readonly=True, help="Start membership date"),
'date_to': fields.datetime('End Date', readonly=True, help="End membership da | te"),
'num_waiting': fields.integer('# Waiting', readonly=True),
'num_invoiced': fields.integer('# Invoiced', readonly=True),
'num_paid': fields.integer('# Paid', readonly=True),
'tot_pending': fields.float('Pending Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'tot_earned': fields.float('Earned Amount', digits_compute= dp.get_precision('Account'), readonly=True),
'partner_id': fields.many2on | e('res.partner', 'Member', readonly=True),
'associate_member_id': fields.many2one('res.partner', 'Associate Member', readonly=True),
'membership_id': fields.many2one('product.product', 'Membership Product', readonly=True),
'membership_state': fields.selection(STATE, 'Current Membership State', readonly=True),
'user_id': fields.many2one('res.users', 'Salesman', readonly=True),
'company_id': fields.many2one('res.company', 'Company', readonly=True)
}
def init(self, cr):
'''Create the view'''
tools.drop_view_if_exists(cr, 'report_membership')
cr.execute("""
CREATE OR REPLACE VIEW report_membership AS (
SELECT
MIN(id) AS id,
partner_id,
user_id,
membership_state,
associate_member_id,
membership_amount,
date_from,
date_to,
year,
month,
COUNT(num_waiting) AS num_waiting,
COUNT(num_invoiced) AS num_invoiced,
COUNT(num_paid) AS num_paid,
SUM(tot_pending) AS tot_pending,
SUM(tot_earned) AS tot_earned,
membership_id,
company_id
FROM
(SELECT
MIN(p.id) AS id,
p.id AS partner_id,
p.user_id AS user_id,
p.membership_state AS membership_state,
p.associate_member AS associate_member_id,
p.membership_amount AS membership_amount,
TO_CHAR(p.membership_start, 'YYYY-MM-DD') AS date_from,
TO_CHAR(p.membership_stop, 'YYYY-MM-DD') AS date_to,
TO_CHAR(p.membership_start, 'YYYY') AS year,
TO_CHAR(p.membership_start,'MM') AS month,
CASE WHEN ml.state = 'waiting' THEN ml.id END AS num_waiting,
CASE WHEN ml.state = 'invoiced' THEN ml.id END AS num_invoiced,
CASE WHEN ml.state = 'paid' THEN ml.id END AS num_paid,
CASE WHEN ml.state IN ('waiting', 'invoiced') THEN SUM(il.price_subtotal) ELSE 0 END AS tot_pending,
CASE WHEN ml.state = 'paid' OR p.membership_state = 'old' THEN SUM(il.price_subtotal) ELSE 0 END AS tot_earned,
ml.membership_id AS membership_id,
p.company_id AS company_id
FROM res_partner p
LEFT JOIN membership_membership_line ml ON (ml.partner = p.id)
LEFT JOIN account_invoice_line il ON (ml.account_invoice_line = il.id)
LEFT JOIN account_invoice ai ON (il.invoice_id = ai.id)
WHERE p.membership_state != 'none' and p.active = 'true'
GROUP BY
p.id,
p.user_id,
p.membership_state,
p.associate_member,
p.membership_amount,
TO_CHAR(p.membership_start, 'YYYY-MM-DD'),
TO_CHAR(p.membership_stop, 'YYYY-MM-DD'),
TO_CHAR(p.membership_start, 'YYYY'),
TO_CHAR(p.membership_start,'MM'),
ml.membership_id,
p.company_id,
ml.state,
ml.id
) AS foo
GROUP BY
year,
month,
date_from,
date_to,
partner_id,
user_id,
membership_id,
company_id,
membership_state,
associate_member_id,
membership_amount
)""")
report_membership()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4: |
siosio/intellij-community | python/testData/multipleArgumentsCompletion/slashParameter.after.py | Python | apache-2.0 | 87 | 0.034483 | def foo(a, /, b):
print(a, b)
|
def egg():
| a = 1
b = 2
foo(a, b)<caret> |
stephane-martin/salt-debian-packaging | salt-2016.3.3/tests/unit/transport/ipc_test.py | Python | apache-2.0 | 3,969 | 0.000252 | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Mike Place <mp@saltstack.com>`
'''
# Import python libs
from __future__ import absolute_import
import os
import logging
import tornado.gen
import tornado.ioloop
import tornado.testing
import salt.utils
import salt.config
import salt.exceptions
import salt.transport.ipc
import salt.transport.server
import salt.transport.client
from salt.ext.six.moves import range
# Import Salt Testing libs
import integration
from salttesting.mock import MagicMock
from salttesting.helpers import ensure_in_syspath
log = logging.getLogger(__name__)
ensure_in_syspath('../')
class BaseIPCReqCase(tornado.testing.AsyncTestCase):
'''
Test the req server/client pair
'''
def setUp(self):
super(BaseIPCReqCase, self).setUp()
self._start_handlers = dict(self.io_loop._handlers)
self.socket_path = os.path.join(integration.TMP, 'ipc_test.ipc')
self.server_channel = salt.transport.ipc.IPCMessageServer(
self.socket_path,
io_loop=self.io_loop,
payload_handler=self._handle_payload,
)
self.server_channel.start()
self.payloads = []
def tearDown(self):
super(BaseIPCReqCase, self).tearDown()
failures = []
self.server_channel.close()
os.unlink(self.socket_path)
for k, v in self.io_loop._handlers.iteritems():
if self._start_handlers.get(k) != v:
failures.append((k, v))
if len(failures) > 0:
raise Exception('FDs still attached to the IOLoop: {0}'.format(failures))
@tornado.gen.coroutine
def _handle_payload(self, payload, reply_func):
self.payloads.append(payload)
yield reply_func(payload)
if isinstance(payload, dict) and payload.get('stop'):
self.stop()
class IPCMessageClient(BaseIPCReqCase):
'''
Test all of the clear msg stuff
'''
def _get_channel(self):
channel = salt.transport.ipc.IPCMessageClient(
socket_path=self.socket_path,
io_loop=self.io_loop,
)
channel.connect(callback=self.stop)
self.wait()
return channel
def setUp(self):
super(IPCMessageClient, self).setUp()
self.channel = self._get_channel()
def tearDown(self):
super(IPCMessageClient, self).setUp()
self.channel.close()
def test_basic_send(self):
msg = {'foo': 'bar', 'stop': True}
self.channel.send(msg)
self.wait()
self.assertEqual(self.payloads[0], msg)
def test_many_send(self):
msgs = []
self.server_channel.stream_handler = MagicMock()
for i in range(0, 1000):
msgs.append('test_many_send_{0}'.format(i))
for i in msgs:
self.channel.send(i)
self.channel.send({'stop': True})
self.wait()
self.assertEqual(self.payloads[:-1], msgs)
def test_very_big_message(self):
long_str = ''.join([str(num) for num in range(10**5)])
msg = {'long_str': long_str, 'stop': True}
self.channel.send(msg)
self.wait()
self.assertEqual(msg, self.payloads[0])
def test_multistream_sends(self):
local_channel = self._get_channel()
for c in (self.channel, local_channel):
c.send('foo')
self.channel.send({'stop': True})
self.wait()
self.assertEqua | l(self.payloads[:-1], ['foo', 'foo'])
def test_multistream_errors(self):
local_channel = self._get_channel()
for c in (self.channel, local_cha | nnel):
c.send(None)
for c in (self.channel, local_channel):
c.send('foo')
self.channel.send({'stop': True})
self.wait()
self.assertEqual(self.payloads[:-1], [None, None, 'foo', 'foo'])
if __name__ == '__main__':
from integration import run_tests
run_tests(IPCMessageClient, needs_daemon=False)
|
Southpaw-TACTIC/TACTIC | src/install/backup/tactic_backup.py | Python | epl-1.0 | 4,220 | 0.007583 | #!/usr/bin/env python3
# This is a simple command line script that can be used to backup the
# TACTIC database. It is independent of TACTIC, so can be run on
# servers where TACTIC is not install with the database.
import datetime
import os
import time
import subprocess
import tacticenv
from pyasm.common import Environment
from pyasm.security import Batch
# Location of zip executable
#ZIP_EXE = "C:\\Users\\user\\Documents\\backups\\7za920\\7za.exe"
ZIP_EXE = "zip"
# Location of all back-up types
BACKUP_DIR = "/spt/tactic/tactic_temp/"
# Locations of different backup types
DB_DIR = "backup_db"
PROJECT_DIR = "backup_tactic"
ASSETS_DIR = "backup_assets"
# Location of TACTIC src code
TACTIC_DIR = "/spt/tactic/tactic/"
class DatabaseBackup(object):
def execute(my):
base_dir = "%s%s" % (BACKUP_DIR, DB_DIR)
import datetime
now = datetime.datetime.now()
date = now.strftime("%Y%m%d_%H%M")
file_name = 'tacticDatabase_%s.sql' % date
path = "%s/%s" % (base_dir, file_name)
print("Backing up database to: [%s]" % path)
# Check if base_dir is exists and writable.
if not os.path.exists(base_dir):
os.mkdir(base_dir)
# Create backup, and if successful, prune old
# backups.
try:
cmd = 'pg_dumpall -U postgres -c > %s' % path
os.system(cmd)
except Exception as e:
print("Could not run database backup: %s" % e)
else:
cmd = PruneBackup()
cmd.execute(base_dir, 30)
#cmd = 'gzip -f %s' % path
#os.system(cmd)
class ProjectBackup(object):
def execute(my):
| base_dir = "%s%s" % (BACKUP_DIR, PROJECT_DIR)
zip_exe = ZIP_EXE
now = datetime.datetime | .now()
date = now.strftime("%Y%m%d_%H%M")
file_path = '%s/tactic_%s.zip' % (base_dir, date)
# Check if base_dir is exists and writable.
if not os.path.exists(base_dir):
os.mkdir(base_dir)
# Create backup, and if successful, prune old
# backups.
try:
subprocess.call([zip_exe, "-r", file_path, TACTIC_DIR])
except Exception as e:
print("Could not zip project directory. %s" % e)
else:
cmd = PruneBackup()
cmd.execute(base_dir, 1)
class AssetsBackup(object):
def execute(my):
base_dir = "%s%s" % (BACKUP_DIR, ASSETS_DIR)
asset_dir = Environment.get_asset_dir()
zip_exe = ZIP_EXE
now = datetime.datetime.now()
date = now.strftime("%Y%m%d_%H%M")
file_path = '%s/assets_%s.zip' % (base_dir, date)
# Check if base_dir is exists and writable.
if not os.path.exists(base_dir):
os.mkdir(base_dir)
# Create backup, and if successful, prune old
# backups.
try:
subprocess.call([zip_exe, "-r", file_path, asset_dir])
except Exception as e:
print("Could not zip assets directory: %s" % e)
else:
cmd = PruneBackup()
cmd.execute(base_dir, 3)
class PruneBackup(object):
def execute(my, directory, days):
'''Removes files in directory older than specified days.'''
dir = directory
print("Pruning backup files older than [%s] days" % days)
import datetime
today = datetime.datetime.today()
files = os.listdir(dir)
for file in files:
path = "%s/%s" % (dir, file)
(mode, ino, dev, nlink, uid, gid, size, atime, mtime, ctime) = os.stat(path)
ctime = datetime.datetime.fromtimestamp(ctime)
if today - ctime > datetime.timedelta(days=days):
os.unlink(path)
if __name__ == '__main__':
'''
# TODO
os.system("vacuumdb -U postgres --all --analyze")
'''
Batch()
cmd = DatabaseBackup()
cmd.execute()
cmd = AssetsBackup()
cmd.execute()
cmd = ProjectBackup()
#cmd.execute()
|
ulif/pail | docs/conf.py | Python | lgpl-3.0 | 8,125 | 0.007508 | # -*- coding: utf-8 -*-
#
# pail documentation build configuration file, created by
# sphinx-quickstart on Sat May 4 03:26:35 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All co | nfiguration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- Ge | neral configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.ifconfig']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pail'
copyright = u'2013, Uli Fouquet'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1dev'
import pkg_resources
version = pkg_resources.get_distribution('pail').version
#version = __version__
# The full version, including alpha/beta/rc tags.
release = version
if release.endswith('dev'):
release = '%s (unreleased)' % release
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'paildoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pail.tex', u'pail Documentation',
u'Uli Fouquet', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pail', u'pail Documentation',
[u'Uli Fouquet'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pail', u'pail Documentation',
u'Uli Fouquet', 'pail', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
bhavanaananda/DataStage | test/FileShare/tests/TestWebDAVAccess.py | Python | mit | 7,828 | 0.010731 | # $Id: TestWebDAVAccess.py 1047 2009-01-15 14:48:58Z graham $
#
# Unit testing for FileAccess module
#
import os
# Make sure python-kerberos package is installed
import kerberos
import sys
import httplib
import urllib2
import urllib2_kerberos
import re
import base64
import unittest
from urlparse import urlparse
sys.path.append("../..")
readmetext="This directory is the root of the ADMIRAL shared file system.\n"
mountpoint="mountadmiralwebdav"
readmefile="ADMIRAL.README"
theurl="http://zoo-admiral-ibrg.zoo.ox.ac.uk/webdav/TestUser1"
class TestWebDAVAccess(unittest.TestCase):
def setUp(self):
# mount WebDAV share here
status=os.system('mount '+mountpoint)
self.assertEqual(status, 0, 'Mount failure')
return
def tearDown(self):
os.system('umount '+mountpoint)
return
# Test cases
def testNull(self):
assert (True), "True expected"
return
def testReadMe(self):
# Test assumes ADMIRAL shared file system is mounted at mountpoint
# Open README file
f = open(mountpoint+'/'+readmefile)
assert (f), "README file open failed"
# Read first line
l = f.readline()
# Close file
f.close()
# Check first line
self.assertEqual(l, readmetext, 'Unexpected README content')
return
def testCreateFile(self):
f = open(mountpoint+'/testCreateWebDAVFile.tmp','w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(mountpoint+'/testCreateWebDAVFile.tmp','r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test creation of file\n', 'Unexpected file content')
return
def testUpdateFile(self):
filename = mountpoint+'/testUpdateWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'a+')
f.write('Test update of file\n')
f.close()
f = open(filename,'r')
l1 = f.readline()
l2 = f.readline()
f.close()
self.assertEqual(l1, 'Test creation of file\n', 'Unexpected file content: l1')
self.assertEqual(l2, 'Test update of file\n', 'Unexpected file content: l2')
return
def testRewriteFile(self):
filename = mountpoint+'/testRewriteWebDAVFile.tmp'
f = open(filename,'w+')
assert (f), "File creation failed"
f.write('Test creation of file\n')
f.close()
f = open(filename,'w+')
f.write('Test rewrite of file\n')
f.close()
f = open(filename,'r')
l = f.readline()
f.close()
self.assertEqual(l, 'Test rewrite of file\n', 'Unexpected file content')
return
def testDeleteFile(self):
filename1 = mountpoint+'/testCreateWebDAVFile.tmp'
filename2 = mountpoint+'/testRewriteWebDAVFile.tmp'
filename3 = mountpoint+'/testUpdateWebDAVFile.tmp'
# Test and delete first file
try:
s = os.stat(filename1)
except:
assert (False), "File "+filename1+" not found or other stat error"
os.remove(filename1)
try:
s = os.stat(filename1)
assert (False), | "File "+filename1+" not deleted"
except:
pass
# Test and delete second file
try:
s = os.stat(filename2)
except:
assert (False), "File "+filename2+" not found or other stat error"
os.remove(filename2)
try:
s = os.stat(filename2)
| assert (False), "File "+filename2+" not deleted"
except:
pass
# Test and delete third file
try:
s = os.stat(filename3)
except:
assert (False), "File "+filename3+" not found or other stat error"
os.remove(filename3)
try:
s = os.stat(filename3)
assert (False), "File "+filename3+" not deleted"
except:
pass
return
def testWebDAVFile(self):
h1 = httplib.HTTPConnection('zakynthos.zoo.ox.ac.uk')
h1.request('GET','/webdav')
res=h1.getresponse()
authreq = str(res.status) + ' ' + res.reason
print authreq
self.assertEqual(authreq, '401 Authorization Required', 'Unexpected response')
return
def testWebDAVFileUrlLib(self):
#_ignore = kerberos.GSS_C_DELEG_FLAG
#from kerberos import GSS_C_DELEG_FLAG,GSS_C_MUTUAL_FLAG,GSS_C_SEQUENCE_FLAG
#_ignore, ctx = kerberos.authGSSClientInit('krbtgt/OX.AC.UK@OX.AC.UK', gssflags=GSS_C_DELEG_FLAG|GSS_C_MUTUAL_FLAG|GSS_C_SEQUENCE_FLAG)
_ignore, ctx = kerberos.authGSSClientInit('HTTP@zakynthos.zoo.ox.ac.uk')
_ignore = kerberos.authGSSClientStep(ctx, '')
tgt = kerberos.authGSSClientResponse(ctx)
opener = urllib2.build_opener()
opener.add_handler(urllib2_kerberos.HTTPKerberosAuthHandler())
resp = opener.open(theurl)
print resp
return
req = urllib2.Request(theurl)
try:
handle = urllib2.urlopen(req)
except IOError, e:
pass
else:
assert (False), theurl + " isn't protected by authentication."
if not hasattr(e, 'code') or e.code != 401:
# we got an error - but not a 401 error
assert (False), theurl + " Error: " + e
authline = e.headers['www-authenticate']
# this gets the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authobj = re.compile(
r'''(?:\s*www-authenticate\s*:)?\s*(\w*)\s+realm=['"]([^'"]+)['"]''',
re.IGNORECASE)
# this regular expression is used to extract scheme and realm
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
assert (False), "Malformed authentication header: " + authline
scheme = matchobj.group(1)
realm = matchobj.group(2)
# here we've extracted the scheme
# and the realm from the header
print scheme
print realm
return
# Sentinel/placeholder tests
def testUnits(self):
assert (True)
def testComponents(self):
assert (True)
def testIntegration(self):
assert (True)
def testPending(self):
assert (False), "No pending test"
# Assemble test suite
from MiscLib import TestUtils
def getTestSuite(select="unit"):
"""
Get test suite
select is one of the following:
"unit" return suite of unit tests only
"component" return suite of unit and component tests
"all" return suite of unit, component and integration tests
"pending" return suite of pending tests
name a single named test to be run
"""
testdict = {
"unit":
[ "testUnits"
, "testNull"
],
"component":
[ "testComponents"
, "testReadMe"
, "testCreateFile"
, "testRewriteFile"
, "testUpdateFile"
, "testDeleteFile"
],
"integration":
[ "testIntegration"
],
"pending":
[ "testPending"
, "testWebDAVFile"
, "testWebDAVFileUrlLib"
]
}
return TestUtils.getTestSuite(TestWebDAVAccess, testdict, select=select)
# Run unit tests directly from command line
if __name__ == "__main__":
TestUtils.runTests("TestFileAccess", getTestSuite, sys.argv)
# End.
|
Akagi201/learning-python | pyramid/MyShop/setup.py | Python | mit | 1,213 | 0 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.txt')) as f:
README = f.read | ()
with open(os.path.join(here, 'CHANGES.txt')) as f:
CHANGES = f.read()
requires = [
'pyramid',
'pyramid_chameleon',
'pyramid_debugtoolbar',
'pyramid_tm',
'SQLAlchemy',
'transaction',
'zope.sqlalchemy',
'waitress',
'pyramid_layout'
]
setup(name='MyShop',
version='0.0',
description='MyShop',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Py | thon",
"Framework :: Pyramid",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI :: Application",
],
author='',
author_email='',
url='',
keywords='web wsgi bfg pylons pyramid',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
test_suite='myshop',
install_requires=requires,
entry_points="""\
[paste.app_factory]
main = myshop:main
[console_scripts]
initialize_MyShop_db = myshop.scripts.initializedb:main
""",
)
|
dogancankilment/Yemekci | manage.py | Python | gpl-2.0 | 250 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "yemekci.settings")
from django.core.management | import execute_from_command_line |
execute_from_command_line(sys.argv)
|
tscalf/aws-rds-replica-tools | query_profiler.py | Python | gpl-3.0 | 5,180 | 0.003475 | #!/usr/bin/env python
'''
Query Profiler
Profile a list of queries from a file. Put the output on stdout.
'''
import os
import json
import logging
import time
import pymysql
import boto3
LOGGER = logging.getLogger('query_profile')
LOGGER.setLevel(logging.DEBUG)
# create file handler which logs even debug messages
FH = logging.FileHandler('query_profile.log')
FH.setLevel(logging.DEBUG)
# create console handler with a higher log level
CH = logging.StreamHandler()
CH.setLevel(logging.INFO)
# create formatter and add it to the handlers
FORMATTER = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
FH.setFormatter(FORMATTER)
CH.setFormatter(FORMATTER)
# add the handlers to the logger
LOGGER.addHandler(FH)
LOGGER.addHandler(CH)
LOGGER.debug("Starting up.")
def time_usage(func):
'''
Time the execution of a Function
'''
def wrapper(*args, **kwargs):
'''
Wrapper for timing the Functi | on
'''
beg_ts = time.time()
retval = func(*args, **kwargs)
end_ts = time.time()
LOGGER.info("Function '%s' elapsed time: %f s", func.__name__, (end_ts - beg_ts))
return retval
return wrapper
def get_db_credentials(landscape, environment, dbinstanceidentifier, dbschema):
'''
Get the credentials for the database from SSM.
: | param landscape: The landscape where the product is deployed.
Typical Contry Code: uk, eu, na, us, ca
:param environment: The environment for the product. Typically: preprod or production
:param dbinstanceidentifier: The RDS DB instance Identifier.
Load parameters into SSM as JSON arrays with the following command:
aws ssm put-parameter --type SecureString \
--name "lsm.<landscape>.<environment>.<dbinstanceidentifier>.<dbschema>" \
--value '{"mysql_username": "<username>", "mysql_password": "<password>",\
"EndpointAddress": "<rds instance endpoint>", "EndpointPort": 3306 }' \
--region us-east-2 --overwrite
'''
session = boto3.Session(region_name='us-east-2')
client = session.client('ssm')
separator = '.'
ssm_key = separator.join(["lsm", landscape, environment, dbinstanceidentifier, dbschema])
ssm_vars = client.get_parameters(Names=[ssm_key], WithDecryption=True)
# LOGGER.debug("SSM Paramters: %s", ssm_vars)
return json.loads(ssm_vars['Parameters'][0]['Value'])
def get_environment():
'''
Get the environment setup and return the db credentials
'''
if 'LANDSCAPE' not in os.environ:
os.environ['LANDSCAPE'] = raw_input("Enter the landscape of the taget instance: ")
landscape = os.environ['LANDSCAPE']
if 'ENVIRONMENT' not in os.environ:
os.environ['ENVIRONMENT'] = raw_input("Enter the environment of the taget instance: ")
environment = os.environ['ENVIRONMENT']
if 'HOST_ID' not in os.environ:
os.environ['HOST_ID'] = raw_input("Enter the host id of the taget instance: ")
host_id = os.environ['HOST_ID']
if 'DBSCHEMA' not in os.environ:
os.environ['DBSCHEMA'] = raw_input("Enter the schema name to execute queries against: ")
return get_db_credentials(landscape=landscape, environment=environment,
dbinstanceidentifier=host_id, dbschema=os.environ['DBSCHEMA'])
@time_usage
def time_query(cursor, query):
'''
Time the execution of a query.
'''
cursor.execute(query)
@time_usage
def main():
'''
Do some work
'''
creds = get_environment()
dbschema = os.environ['DBSCHEMA']
if 'HOST_OVERIDE' not in os.environ:
if 'DB_HOST' not in os.environ:
os.environ['DB_HOST'] = raw_input("Enter the Override Hostname: ")
db_host = os.environ['DB_HOST']
else:
db_host = creds['EndpointAddress']
LOGGER.debug("DEBUG: Connecting to database %s", db_host)
if 'QUERY_FILE' not in os.environ:
os.environ['QUERY_FILE'] = raw_input("Enter the name of the file where queries live: ")
try:
mysql_connect = pymysql.connect(
host=db_host.encode('ascii', 'ignore'),
port=3306, database=dbschema,
user=creds['mysql_username'], password=creds['mysql_password'],
connect_timeout=5)
LOGGER.debug("Connected as: %s", creds['mysql_username'])
except Exception as exc:
LOGGER.error("Failed to connect to %s", db_host)
LOGGER.error("Error: %s", exc.args)
return
try:
cursor = mysql_connect.cursor()
query_file = open(os.environ['QUERY_FILE'])
for query in query_file:
LOGGER.debug("Executing query: %s", query)
time_query(cursor, query)
# cursor.execute(query)
# LOGGER.debug("Query Start: %s", start)
# LOGGER.debug("Query end: %s", end)
# LOGGER.info("Query %s", query)
# LOGGER.info("Execution time %f microseconds", end - start)
cursor.close()
mysql_connect.commit()
except Exception as exc:
LOGGER.error("Query execution failed. %s", exc)
finally:
mysql_connect.close()
if __name__ == '__main__':
main()
|
MuckRock/muckrock | muckrock/accounts/migrations/0042_auto_20180803_0915.py | Python | agpl-3.0 | 709 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-08-03 13:15
# Django
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0041_profile_avatar_url'),
]
operations = [
migrations.RemoveField(
model_name='profile',
name='confirmation_key',
),
migrations.AddField(
| model_name='profile',
name='full_name',
field=models.CharField(blank=True, max_length=255),
),
| migrations.AddField(
model_name='profile',
name='uuid',
field=models.UUIDField(blank=True, null=True),
),
]
|
razvanm/fs-expedition | heatmap.py | Python | unlicense | 464 | 0.00431 | #!/usr/bin/env python
import sys
line = sys.stdin.readline() # skip the header
line = sys.stdin.readline()
all | = {}
while line:
v = line.split()
if v[0] not in all:
all[v[0]] = set()
all[v[0]].add(v[1])
line = sys.stdin.readline()
s = [k for (_, k) in sorted([(len(v), k) for (k,v) in all.items()])]
print ' '.join(reversed(s))
for i | in s:
print i,
for j in reversed(s):
print len(all[i].intersection(all[j])),
print
|
adam-iris/mailman | src/mailman/bin/checkdbs.py | Python | gpl-3.0 | 7,696 | 0.00104 | # Copyright (C) 1998-2014 by the Free Software Foundation, Inc.
#
# This file is part of GNU Mailman.
#
# GNU Mailman is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free
# Software Foundation, either version 3 of the License, or (at your option)
# any later version.
#
# GNU Mailman is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# GNU Mailman. If not, see <http://www.gnu.org/licenses/>.
import sys
import time
import optparse
from email.Charset import Charset
from mailman import MailList
from mailman import Utils
from mailman.app.requests import handle_request
from mailman.configuration import config
from mailman.core.i18n import _
from mailman.email.message import UserNotification
from mailman.initialize import initialize
from mailman.interfaces.requests import IListRequests, RequestType
from mailman.version import MAILMAN_VERSION
# Work around known problems with some RedHat cron daemons
import signal
signal.signal(signal.SIGCHLD, signal.SIG_DFL)
NL = u'\n'
now = time.time()
def parseargs():
parser = optparse.OptionParser(version=MAILMAN_VERSION,
usage=_("""\
%prog [options]
Check for pending admin requests and mail the list owners if necessary."""))
parser.add_option('-C', '--config',
help=_('Alternative configuration file to use'))
opts, args = parser.parse_args()
if args:
parser.print_help()
print(_('Unexpected arguments'), file=sys.stderr)
sys.exit(1)
return opts, args, parser
def pending_requests(mlist):
# Must return a byte string
lcset = mlist.preferred_language.charset
pending = []
first = True
requestsdb = IListRequests(mlist)
for request in requestsdb.of_type(RequestType.subscription):
if first:
pending.append(_('Pending subscriptions:'))
first = False
key, data = requestsdb.get_request(request.id)
when = data['when']
addr = data['addr']
fullname = data['fullname']
passwd = data['passwd']
digest = data['digest']
lang = data['lang']
if fullname:
if isinstance(fullname, unicode):
fullname = fullname.encode(lcset, 'replace')
fullname = ' (%s)' % fullname
pending.append(' %s%s %s' % (addr, fullname, time.ctime(when)))
first = True
fo | r request in requestsdb.of_type(RequestType.held_message):
if first:
pending.append(_('\nPending posts:'))
first = False
key, data = requestsdb.get_request(request.id)
when = data['when']
sender = data['sender']
subject = data['subject']
reason = data['reason'] |
text = data['text']
msgdata = data['msgdata']
subject = Utils.oneline(subject, lcset)
date = time.ctime(when)
reason = _(reason)
pending.append(_("""\
From: $sender on $date
Subject: $subject
Cause: $reason"""))
pending.append('')
# Coerce all items in pending to a Unicode so we can join them
upending = []
charset = mlist.preferred_language.charset
for s in pending:
if isinstance(s, unicode):
upending.append(s)
else:
upending.append(unicode(s, charset, 'replace'))
# Make sure that the text we return from here can be encoded to a byte
# string in the charset of the list's language. This could fail if for
# example, the request was pended while the list's language was French,
# but then it was changed to English before checkdbs ran.
text = NL.join(upending)
charset = Charset(mlist.preferred_language.charset)
incodec = charset.input_codec or 'ascii'
outcodec = charset.output_codec or 'ascii'
if isinstance(text, unicode):
return text.encode(outcodec, 'replace')
# Be sure this is a byte string encodeable in the list's charset
utext = unicode(text, incodec, 'replace')
return utext.encode(outcodec, 'replace')
def auto_discard(mlist):
# Discard old held messages
discard_count = 0
expire = config.days(mlist.max_days_to_hold)
requestsdb = IListRequests(mlist)
heldmsgs = list(requestsdb.of_type(RequestType.held_message))
if expire and heldmsgs:
for request in heldmsgs:
key, data = requestsdb.get_request(request.id)
if now - data['date'] > expire:
handle_request(mlist, request.id, config.DISCARD)
discard_count += 1
mlist.Save()
return discard_count
# Figure out epoch seconds of midnight at the start of today (or the given
# 3-tuple date of (year, month, day).
def midnight(date=None):
if date is None:
date = time.localtime()[:3]
# -1 for dst flag tells the library to figure it out
return time.mktime(date + (0,)*5 + (-1,))
def main():
opts, args, parser = parseargs()
initialize(opts.config)
for name in config.list_manager.names:
# The list must be locked in order to open the requests database
mlist = MailList.MailList(name)
try:
count = IListRequests(mlist).count
# While we're at it, let's evict yesterday's autoresponse data
midnight_today = midnight()
evictions = []
for sender in mlist.hold_and_cmd_autoresponses.keys():
date, respcount = mlist.hold_and_cmd_autoresponses[sender]
if midnight(date) < midnight_today:
evictions.append(sender)
if evictions:
for sender in evictions:
del mlist.hold_and_cmd_autoresponses[sender]
# This is the only place we've changed the list's database
mlist.Save()
if count:
# Set the default language the the list's preferred language.
_.default = mlist.preferred_language
realname = mlist.real_name
discarded = auto_discard(mlist)
if discarded:
count = count - discarded
text = _('Notice: $discarded old request(s) '
'automatically expired.\n\n')
else:
text = ''
if count:
text += Utils.maketext(
'checkdbs.txt',
{'count' : count,
'mail_host': mlist.mail_host,
'adminDB' : mlist.GetScriptURL('admindb',
absolute=1),
'real_name': realname,
}, mlist=mlist)
text += '\n' + pending_requests(mlist)
subject = _('$count $realname moderator '
'request(s) waiting')
else:
subject = _('$realname moderator request check result')
msg = UserNotification(mlist.GetOwnerEmail(),
mlist.GetBouncesEmail(),
subject, text,
mlist.preferred_language)
msg.send(mlist, **{'tomoderators': True})
finally:
mlist.Unlock()
if __name__ == '__main__':
main()
|
jammers-ach/pywolf3d | pywolf3d/player.py | Python | mit | 3,285 | 0.008219 | from ursina import *
class Wolf3dPlayer(Entity):
def __init__(self, **kwargs):
super().__init__()
self.speed = 5
self.position = (1,5,1)
self.height = 0.5
self.camera_pivot = Entity(parent=self, y=self.height)
self.cursor = Entity(parent=camera.ui, model='quad', color=color.pink, scale=.008, rotation_z=45)
camera.parent = self.camera_pivot
camera.position = (0,0,0)
camera.rotation = (0,0,0)
camera.fov = 90
mouse.locked = True
self.mouse_sensitivity = Vec2(40, 40)
self.target_smoothing = 100
self.smoothing = self.target_smoothing
self.gravity = 1
self.grounded = True
self.jump_height = .5
self.jump_duration = .5
self.jumping = False
self.air_time = 0
for key, value in kwargs.items():
setattr(self, key ,value)
def upda | te(self):
rotate_sprites = False
if application.development_mode:
self.y -= held_keys['e']
self.y += held_keys['q']
if held_keys['a']:
self.rotation_y -= 1
rotate_sprites = True
if held_keys['d']:
self.rotation_y += 1
rotate_sprites = True
if held_keys['w'] or held_keys['s']:
rotate_sprites = True
self.dir | ection = Vec3(
self.forward * (held_keys['w'] - held_keys['s'])
).normalized()
origin = self.world_position + (self.up*.5)
hit_info = raycast(origin , self.direction, ignore=[self,], distance=.5, debug=False)
if not hit_info.hit:
self.position += self.direction * self.speed * time.dt
if rotate_sprites and self.level:
for s in self.level.sprites:
s.face(self)
if self.gravity:
# # gravity
offset = (0,2,0)
ray = boxcast(self.world_position + offset, self.down, ignore=(self,), thickness=.9)
if ray.distance <= 2:
if not self.grounded:
self.land()
self.grounded = True
# make sure it's not a wall and that the point is not too far up
if ray.world_normal.y > .7 and ray.world_point.y - self.world_y < .5: # walk up slope
self.y = ray.world_point[1]
return
else:
self.grounded = False
# if not on ground and not on way up in jump, fall
self.y -= min(self.air_time, ray.distance-.05)
self.air_time += time.dt * .25 * self.gravity
def input(self, key):
if key == 'space' and mouse.hovered_entity \
and hasattr(mouse.hovered_entity.parent, 'open'):
mouse.hovered_entity.parent.open()
def jump(self):
if not self.grounded:
return
self.grounded = False
self.animate_y(self.y+self.jump_height, self.jump_duration, resolution=120, curve=curve.out_expo)
invoke(self.start_fall, delay=self.jump_duration)
def start_fall(self):
self.y_animator.pause()
self.jumping = False
def land(self):
# print('land')
self.air_time = 0
self.grounded = True
|
ucsd-ccbb/Oncolist | src/restLayer/app/ingest.py | Python | mit | 2,103 | 0.000476 | import re
import csv
import pymongo
import itertools
from bioservices import WikiPathways
import app
from util import save_file_metadata, split_id, is_numeric, is_boolean
log = app.get_logger('ingest')
def ingest(filepath):
_id, _ = split_id(filepath)
client = pymongo.MongoClient()
meta = client.files.meta.find_one({'_id': _id})
if meta:
parser = meta['parser']
if parser == 'tsv':
data = ingest_tsv(filepath)
else:
raise NotImplementedError('unknown parser %s'.format(parser))
client.files[str(_id)].insert(data)
return save_file_metadata(filepath, status='success', count=len(data))
else:
save_file_metadata(filepath, status='error')
raise LookupError('no metadata found for {}'.format(filepath))
def ingest_tsv(filepath):
log.info('ingesting %s as tsv file', filepath)
save_file_metadata(filepath, status='parsing', filetype='tsv')
with open(filepath, 'rU') as fid:
reader = c | sv.reader(fid, delimiter='\t')
header = reader.next()
log.debug("%d columns: %s", len(header), ", ".join(header))
if len(header) == 0:
raise ValueError('header row must contain at least one column')
keys = [normalize_column_name(h) for h in header]
def parse(row):
if len(keys) == len(row):
return dict(zip(keys, row))
parsed = [parse(row) for row in | reader]
parsed = [v for v in parsed if v is not None]
header = [{'raw': h, 'key': k} for h, k in itertools.izip(header, keys)]
for h in header:
data = [p[h['key']] for p in parsed]
if all(is_boolean(d) for d in data):
h['datatype'] = 'boolean'
elif all(is_numeric(d) for d in data):
h['datatype'] = 'numeric'
else:
h['datatype'] = 'string'
save_file_metadata(filepath, headers=header)
return parsed
# replace all " " with "_"
def normalize_column_name(name):
return re.sub(r'\W+', '_', name.lower())
|
relic7/prodimages | mozu/RESTClient.py | Python | mit | 22,278 | 0.009875 | #!/usr/bin/env python
# coding: utf-8
from os import environ
from mozu_image_util_functions import log
## Import and set static connection environmental variables -- sql_alchemy_uri, mozu_tenant etc.
from base_config import authenticate, set_environment
set_environment()
# Import initial static vars and Auth func Can set using environ or static
__base_protocol__ = environ['MOZU_PROTOCOL']
__base_url__ = environ['MOZU_BASE_URL']
__listFQN__ = environ['MOZU_LIST_FQN']
__documentTypeFQN__ = environ['MOZU_DOCUMENT_TYPE_FQN']
__master_catalogid__ = environ['MOZU_MASTER_CATALOG_ID']
__tenant_name__ = environ['MOZU_TENANT_NAME']
### build Mozu API Url Strings from env vars
__tenant_url__ = "{ | 0}://t{1}.{2}".format(__base_protocol__, __tenant_name__,__base_url__ )
__document_data_api__ | = __tenant_url__ + "/api/content/documentlists/" + __listFQN__ + "/documents"
__document_tree_api__ = __tenant_url__ + "/api/content/documentlists/" + __listFQN__ + "/documentTree"
### valid keys for filtering insert fields and other query fields or args`
__mozu_image_table_valid_keys__ = [ 'id', 'bf_imageid', 'mz_imageid', 'md5checksum', 'created_date', 'modified_date', 'updated_count' ]
__mozu_query_filter_valid_keys__ = [ 'sortBy', 'filter', 'responseFields', 'pageSize', 'startIndex', 'includeInactive' ]
__mozu_query_filter_valid_operators__ = [ 'sw', 'cont', 'in' ]
__mozu_document_filter_valid_keys__ = [ 'name', 'filter', 'responseFields', 'includeInactive' ]
class MozuBflyDocument:
pass
class MozuAlchemyClient:
pass
class MozuRestClient(object):
"""Class to interact with Mozus REST API interface -- MozuRestClient"""
# Class http_status_code
http_status_code = 777
__endpoints = {}
__listFQN = __listFQN__
__documentTypeFQN = __documentTypeFQN__
__tenant_name = __tenant_name__
__master_catalogid = __master_catalogid__
### build Mozu API Url String
__tenant_url = __tenant_url__
__document_data_api = __document_data_api__
__document_tree_api = __document_tree_api__
@log
def __init__(self, **kwargs):
# MozuRestClient.__endpoints["endpoint_resource_doclist"] = MozuRestClient.__document_data_api
# self.mz_imageid = kwargs.get('mz_imageid', '')
# if type(self.mz_imageid) == str:
# self.document_resource = MozuRestClient.__tenant_url + "/api/content/documentlists/" + MozuRestClient.__listFQN + "/documents/" + self.mz_imageid
# self.document_resource_content = MozuRestClient.__tenant_url + "/api/content/documentlists/" + MozuRestClient.__listFQN + "/documents/" + self.mz_imageid + "/content"
# self.document_metadata_resource = MozuRestClient.__tenant_url + "/api/content/documentlists/" + MozuRestClient.__listFQN + "/documents/" + self.mz_imageid
# MozuRestClient.__endpoints["endpoint_resource_doc_content"] = self.document_resource_content
# MozuRestClient.__endpoints["endpoint_resource_doc_metadata"] = self.document_metadata_resource
# elif len(self.bf_imageid) >= 9:
# self.document_tree_resource_content = MozuRestClient.__tenant_url + "/api/content/documentlists/" + MozuRestClient.__listFQN + "/documentTree/" + self.bf_imageid + "/content" ## ?folderPath={folderPath}&folderId={folderId}
# MozuRestClient.__endpoints["endpoint_resource_doc_tree_content"] = self.document_tree_resource_content
# Auth / Connect
self.accessToken = authenticate()
# Headers / Data-Payload and Filters
self.headers = {'Content-type': 'application/json', 'x-vol-app-claims': self.accessToken, 'x-vol-tenant': MozuRestClient.__tenant_name, 'x-vol-master-catalog': MozuRestClient.__master_catalogid } #, 'x-vol-dataview-mode': 'Pending', # ??'x-vol-site' : '1', }
## TODO does the logic and order below with src_filepath and bf_imageid work or should bf_imageid be first?
if kwargs.get('src_filepath'):
self.bf_imageid, self.ext = kwargs.get('src_filepath').split('/')[-1].split('.')
self.ext = self.ext.lower() # self.bf_imageid.split('.')[-1].lower()
elif kwargs.get('bf_imageid'):
self.bf_imageid = kwargs.get('bf_imageid')
else:
self.bf_imageid, self.ext = '', ''
self.mz_imageid = kwargs.get('mz_imageid', '')
## Tags - Keywords - Metadata
self.properties = {'tags': kwargs.get('tags','')}
# Build Data Payload
self.document_payload = self.set_document_payload(**kwargs) #{'listFQN' : MozuRestClient.__listFQN, 'documentTypeFQN' : MozuRestClient.__documentTypeFQN, 'name' : self.bf_imageid, 'extension' : self.ext, 'properties': self.properties}
print 'Document Payload Set, Response Initialized'
self.request_url_string = self.set_query_string(**kwargs)
print kwargs, "End Init -- kwargs"
#super(MozuRestClient, self).__init__(**kwargs)
@log
def __str__(self):
print "MozuID: {0}\tBflyID: {1}".format(self.mz_imageid, self.bf_imageid)
return "MZID: %s - BFID: %s - Status: %i" % (self.mz_imageid, self.bf_imageid , MozuRestClient.http_status_code)
#@log
def __repr__(self):
dictrepr = dict.__repr__(self.__dict__)
return '{0}({1})'.format(type(self).__name__, dictrepr)
@log
def __setitem__(self, key, value):
#dict.__setitem__(self, key, value)
self.__dict__[key] = value
#self[key] = value
@log
def __getitem__(self, key):
#return dict.__getitem__(self, key)
#return self[key]
return self.__dict__[key]
@log
def __delitem__(self, key):
del self.__getitem__(dict)[key]
@log
def __contains__(self, key, value):
return self.__getitem__(dict(self)).__contains__(key)
#return dict.__getitem__(self).__contains__(value)
#@log
#def update(self, *args, **kwargs):
# print 'update', args, kwargs
# for k, v in dict(*args, **kwargs).iteritems():
# self[k] = v
# @property
@log
def set_document_payload(self, **kwargs):
self.bf_imageid = kwargs.get('bf_imageid', self.bf_imageid)
self.mz_imageid = kwargs.get('mz_imageid', self.mz_imageid)
self.ext = kwargs.get('ext', 'png')
self.properties = kwargs.get('properties', self.properties)
self.document_payload = {'listFQN' : MozuRestClient.__listFQN, 'documentTypeFQN' : MozuRestClient.__documentTypeFQN, 'name' : self.bf_imageid, 'extension' : self.ext, 'properties': self.properties}
print("Setting Document Payload\n\t{}".format(self.document_payload))
return self.document_payload
#document_payload = property(set_document_payload)
@log
def set_endpoint_uri(self, **kwargs):
self.bf_imageid = kwargs.get('bf_imageid', self.bf_imageid)
self.mz_imageid = kwargs.get('mz_imageid', self.mz_imageid)
MozuRestClient.__endpoints["endpoint_resource_doclist"] = MozuRestClient.__document_data_api
MozuRestClient.__endpoints["endpoint_resource_doc_metadata"] = MozuRestClient.__endpoints["endpoint_resource_doclist"] + '/' + self.mz_imageid
MozuRestClient.__endpoints["endpoint_resource_doc_content"] = MozuRestClient.__endpoints["endpoint_resource_doc_metadata"] + "/content"
MozuRestClient.__endpoints["endpoint_resource_doc_tree_content"] = MozuRestClient.__document_tree_api + '/' + self.bf_imageid + "/content"
print("Setting Endpoints\n\t{}".format(MozuRestClient.__endpoints))
return MozuRestClient.__endpoints
#endpoint_uri = property(set_endpoint_uri)
@log
def set_query_string(self,**kwargs):
from mozu_image_util_functions import include_keys
from urllib import urlencode, unquote
## Default qstring params camel cased to adhere to mozu format
if kwargs.get("name"): # or kwargs.get("bf_imageid"):
kwargs['name'] = kwargs.get("name")
kwargs["pageSize"] = kwargs.get("page_size |
tonytan4ever/canary | canary/openstack/common/gettextutils.py | Python | apache-2.0 | 16,232 | 0.00037 | # Copyright 2012 Red Hat, Inc.
# Copyright 2013 IBM Corp.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
gettext for openstack-common modules.
Usual usage in an openstack.common module:
from billing.openstack.common.gettextutils import _
"""
import copy
import gettext
import locale
from logging import handlers
import os
import re
from babel import localedata
import six
_localedir = os.environ.get('canary'.upper() + '_LOCALEDIR')
_t = gettext.translation('canary', localedir=_localedir, fallback=True)
_AVAILABLE_LANGUAGES = {}
USE_LAZY = False
def enable_lazy():
"""Convenience function for configuring _() to use lazy gettext
Call this at the start of execution to enable the gettextutils._
function to | use lazy gettext functionality. This is useful if
your project is importi | ng _ directly instead of using the
gettextutils.install() way of importing the _ function.
"""
global USE_LAZY
USE_LAZY = True
def _(msg):
if USE_LAZY:
return Message(msg, domain='billing')
else:
if six.PY3:
return _t.gettext(msg)
return _t.ugettext(msg)
def install(domain, lazy=False):
"""Install a _() function using the given translation domain.
Given a translation domain, install a _() function using gettext's
install() function.
The main difference from gettext.install() is that we allow
overriding the default localedir (e.g. /usr/share/locale) using
a translation-domain-specific environment variable (e.g.
NOVA_LOCALEDIR).
:param domain: the translation domain
:param lazy: indicates whether or not to install the lazy _() function.
The lazy _() introduces a way to do deferred translation
of messages by installing a _ that builds Message objects,
instead of strings, which can then be lazily translated into
any available locale.
"""
if lazy:
# NOTE(mrodden): Lazy gettext functionality.
#
# The following introduces a deferred way to do translations on
# messages in OpenStack. We override the standard _() function
# and % (format string) operation to build Message objects that can
# later be translated when we have more information.
def _lazy_gettext(msg):
"""Create and return a Message object.
Lazy gettext function for a given domain, it is a factory method
for a project/module to get a lazy gettext function for its own
translation domain (i.e. nova, glance, cinder, etc.)
Message encapsulates a string so that we can translate
it later when needed.
"""
return Message(msg, domain=domain)
from six import moves
moves.builtins.__dict__['_'] = _lazy_gettext
else:
localedir = '%s_LOCALEDIR' % domain.upper()
if six.PY3:
gettext.install(domain,
localedir=os.environ.get(localedir))
else:
gettext.install(domain,
localedir=os.environ.get(localedir),
unicode=True)
class Message(six.text_type):
"""A Message object is a unicode object that can be translated.
Translation of Message is done explicitly using the translate() method.
For all non-translation intents and purposes, a Message is simply unicode,
and can be treated as such.
"""
def __new__(cls, msgid, msgtext=None, params=None, domain='billing', *args):
"""Create a new Message object.
In order for translation to work gettext requires a message ID, this
msgid will be used as the base unicode text. It is also possible
for the msgid and the base unicode text to be different by passing
the msgtext parameter.
"""
# If the base msgtext is not given, we use the default translation
# of the msgid (which is in English) just in case the system locale is
# not English, so that the base text will be in that locale by default.
if not msgtext:
msgtext = Message._translate_msgid(msgid, domain)
# We want to initialize the parent unicode with the actual object that
# would have been plain unicode if 'Message' was not enabled.
msg = super(Message, cls).__new__(cls, msgtext)
msg.msgid = msgid
msg.domain = domain
msg.params = params
return msg
def translate(self, desired_locale=None):
"""Translate this message to the desired locale.
:param desired_locale: The desired locale to translate the message to,
if no locale is provided the message will be
translated to the system's default locale.
:returns: the translated message in unicode
"""
translated_message = Message._translate_msgid(self.msgid,
self.domain,
desired_locale)
if self.params is None:
# No need for more translation
return translated_message
# This Message object may have been formatted with one or more
# Message objects as substitution arguments, given either as a single
# argument, part of a tuple, or as one or more values in a dictionary.
# When translating this Message we need to translate those Messages too
translated_params = _translate_args(self.params, desired_locale)
translated_message = translated_message % translated_params
return translated_message
@staticmethod
def _translate_msgid(msgid, domain, desired_locale=None):
if not desired_locale:
system_locale = locale.getdefaultlocale()
# If the system locale is not available to the runtime use English
if not system_locale[0]:
desired_locale = 'en_US'
else:
desired_locale = system_locale[0]
locale_dir = os.environ.get(domain.upper() + '_LOCALEDIR')
lang = gettext.translation(domain,
localedir=locale_dir,
languages=[desired_locale],
fallback=True)
if six.PY3:
translator = lang.gettext
else:
translator = lang.ugettext
translated_message = translator(msgid)
return translated_message
def __mod__(self, other):
# When we mod a Message we want the actual operation to be performed
# by the parent class (i.e. unicode()), the only thing we do here is
# save the original msgid and the parameters in case of a translation
unicode_mod = super(Message, self).__mod__(other)
modded = Message(self.msgid,
msgtext=unicode_mod,
params=self._sanitize_mod_params(other),
domain=self.domain)
return modded
def _sanitize_mod_params(self, other):
"""Sanitize the object being modded with this Message.
- Add support for modding 'None' so translation supports it
- Trim the modded object, which can be a large dictionary, to only
those keys that would actually be used in a translation
- Snapshot the object being modded, in case the message is
translated, it will be used as it was when the Message was created
"""
if other is None:
params = (other,)
|
DhrubajyotiDas/PyAbel | abel/tests/test_tools_circularize.py | Python | mit | 1,075 | 0.002791 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
| import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
import abel
def test_circularize_image():
IM = abel.tools.analytical.sample_image(n=511, name='Ominus', sigma=2)
# flower image distortion
def flower_scaling(theta, freq=2, am | p=0.1):
return 1 + amp*np.sin(freq*theta)**4
IMdist = abel.tools.circularize.circularize(IM,
radial_correction_function=flower_scaling)
IMcirc, angle, scalefactor, spline =\
abel.tools.circularize.circularize_image(IMdist,
method='lsq', dr=0.5, dt=0.1, smooth=0,
ref_angle=0, return_correction=True)
r, c = IMcirc.shape
diff = (IMcirc - IM).sum(axis=1).sum(axis=0)
assert_almost_equal(diff, -306.0, decimal=0)
assert_almost_equal(angle[-1], 3.04, decimal=2)
assert_almost_equal(scalefactor[4], 0.97, decimal=2)
if __name__ == "__main__":
test_circularize_image()
|
fmrchallenge/fmrbenchmark | domains/dubins_traffic/dub_sim/src/genroad.py | Python | bsd-3-clause | 434 | 0 | #!/usr/bin/env python
from __future__ import print_function
import argparse
from fmrb | .dubins_traffic import gen | _worldsdf, RoadNetwork
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('FILE', type=str, help='road network description file')
args = parser.parse_args()
with open(args.FILE, 'rt') as f:
roads = RoadNetwork(f)
print(gen_worldsdf(roads))
|
VitalPet/addons-onestein | base_directory_files_download/models/__init__.py | Python | agpl-3.0 | 218 | 0 | # -*- coding: utf-8 -*-
# Copyright 2017 | Onestein (<http://www.onestein.eu>)
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
from . import ir_filesystem_directory
from . import | ir_filesystem_file
|
strogo/djpcms | tests/regression/editing/tests.py | Python | bsd-3-clause | 4,078 | 0.01692 | import json
from djpcms import test
from djpcms.plugins.text import Text
class Editing(test.TestCase):
def setUp(self):
super(Editing,self).setUp()
p = self.get()['page']
p.set_template(p.create_template('thre-columns',
'{{ content0 }} {{ content1 }} {{ content2 }}',
'left,center,right'))
for pr in range(0,5):
p.add_plugin(Text,0)
p.add_plugin(Text,1)
p.add_plugin( | Text,2)
def postdata(self):
return {self.sites.settings.HTML_CLASSES.post_view_key:'rearrange'}
def geturl(self, block):
return '{0}{1}/{2}/{3}/'.format(self.sites.settings.CONTENT_INLINE_EDITING['pagecontent'],
| block.page.id,
block.block,
block.position)
def _getcontent(self, block, toblock):
'''Do as jQuery does'''
data = self.postdata()
if toblock.position:
if toblock.position <= block.position:
toblockp = self.get_block(toblock.block,toblock.position-1)
else:
toblockp = toblock
data['previous'] = toblockp.htmlid()
else:
data['next'] = toblock.htmlid()
self.assertTrue(self.login())
url = self.geturl(block)
res = self.post(url, data = data, response = True, ajax = True)
return json.loads(res.content)
def get_block(self, blocknum, position):
'''Get a content block from page and perform sanity check'''
p = self.get()['page']
block = p.get_block(blocknum,position)
self.assertEqual(block.block,blocknum)
self.assertEqual(block.position,position)
return block
def testLayout(self):
p = self.get()['page']
self.assertEqual(p.numblocks(),3)
def testRearrangeSame(self):
block = self.get_block(2,3)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrangeSame0(self):
block = self.get_block(1,0)
content = self._getcontent(block,block)
self.assertEqual(content['header'],'empty')
def testRearrange3to1SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrange3to0SameBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(2,0)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrange1to4SameBlock(self):
block = self.get_block(2,1)
toblock = self.get_block(2,4)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
ids = dict(((el['selector'],el['value']) for el in data))
self.assertTrue(ids['#'+block.htmlid()],toblock.htmlid())
self.assertTrue(ids['#'+toblock.htmlid()],block.htmlid())
def testRearrangeDifferentBlock(self):
block = self.get_block(2,3)
toblock = self.get_block(0,1)
content = self._getcontent(block,toblock)
self.assertEqual(content['header'],'attribute')
data = content['body']
|
Dziolas/inspire-next | inspire/dojson/utils.py | Python | gpl-2.0 | 3,011 | 0.000664 | # -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2015 CERN.
#
# INSPIRE is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""dojson related utilities."""
import six
def legacy_export_as_marc(json, tabsize=4):
"""Create the MARCXML representation using the producer rules."""
def encode_for_marcxml(value):
from invenio.utils.text import encode_for_xml
if isinstance(value, unicode):
value = value.encode('utf8')
return encode_for_xml(str(value), wash=True)
export = ['<record>\n']
for key, value in sorted(six.iteritems(json)):
if not value:
continue
if key.startswith('00') and len(key) == 3:
# Controlfield
if isinstance(value, list):
value = value[0]
export += ['\t<controlfield tag="%s">%s'
'</controlfield>\n'.expandtabs(tabsize)
% (key, encode_for_marcxml(value))]
else:
tag = key[:3]
try:
ind1 = key[3].replace("_", "")
except:
ind1 = ""
try:
ind2 = key[4].replace("_", "")
except:
ind2 = ""
if isinstance(value, dict):
value = [value]
| for field in value:
export += ['\t<datafield tag="%s" ind1="%s" '
'ind2="%s">\n'.expandtabs(tabsize)
% (tag, ind1, ind2)]
for code, subfieldvalue in six.iteritems(field):
if subfieldvalue:
if isinstance(subfieldvalue, list):
for val in subfieldvalue:
export += ['\t\t<subfield | code="%s">%s'
'</subfield>\n'.expandtabs(tabsize)
% (code, encode_for_marcxml(val))]
else:
export += ['\t\t<subfield code="%s">%s'
'</subfield>\n'.expandtabs(tabsize)
% (code,
encode_for_marcxml(subfieldvalue))]
export += ['\t</datafield>\n'.expandtabs(tabsize)]
export += ['</record>\n']
return "".join(export)
|
spookylukey/django-autocomplete-light | test_project/ajax_create/models.py | Python | mit | 216 | 0 | from django.db import models
class Creatable(models. | Model):
name = models.CharField(max_length=100)
related = models.ManyToManyField('self', blank=Tr | ue)
def __unicode__(self):
return self.name
|
datalogics/scons | src/engine/SCons/Tool/Perforce.py | Python | mit | 3,456 | 0.004051 | """SCons.Tool.Perforce.py
Tool-specific initialization for Perforce Source Code Management system.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# __COPYRIGHT__
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, m | odify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLU | DING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
import os
import SCons.Action
import SCons.Builder
import SCons.Node.FS
import SCons.Util
# This function should maybe be moved to SCons.Util?
from SCons.Tool.PharLapCommon import addPathIfNotExists
# Variables that we want to import from the base OS environment.
_import_env = [ 'P4PORT', 'P4CLIENT', 'P4USER', 'USER', 'USERNAME', 'P4PASSWD',
'P4CHARSET', 'P4LANGUAGE', 'SYSTEMROOT' ]
PerforceAction = SCons.Action.Action('$P4COM', '$P4COMSTR')
def generate(env):
"""Add a Builder factory function and construction variables for
Perforce to an Environment."""
def PerforceFactory(env=env):
""" """
return SCons.Builder.Builder(action = PerforceAction, env = env)
#setattr(env, 'Perforce', PerforceFactory)
env.Perforce = PerforceFactory
env['P4'] = 'p4'
env['P4FLAGS'] = SCons.Util.CLVar('')
env['P4COM'] = '$P4 $P4FLAGS sync $TARGET'
try:
environ = env['ENV']
except KeyError:
environ = {}
env['ENV'] = environ
# Perforce seems to use the PWD environment variable rather than
# calling getcwd() for itself, which is odd. If no PWD variable
# is present, p4 WILL call getcwd, but this seems to cause problems
# with good ol' Windows's tilde-mangling for long file names.
environ['PWD'] = env.Dir('#').get_abspath()
for var in _import_env:
v = os.environ.get(var)
if v:
environ[var] = v
if SCons.Util.can_read_reg:
# If we can read the registry, add the path to Perforce to our environment.
try:
k=SCons.Util.RegOpenKeyEx(SCons.Util.hkey_mod.HKEY_LOCAL_MACHINE,
'Software\\Perforce\\environment')
val, tok = SCons.Util.RegQueryValueEx(k, 'P4INSTROOT')
addPathIfNotExists(environ, 'PATH', val)
except SCons.Util.RegError:
# Can't detect where Perforce is, hope the user has it set in the
# PATH.
pass
def exists(env):
return env.Detect('p4')
|
clarinsi/reldi-api | index.py | Python | gpl-3.0 | 3,105 | 0.005801 | # -*- coding: utf-8 -*-
import sys
import os
import atexit
from ConfigParser import NoSectionError
from flask import Flask, url_for
from flask.ext.cors import CORS
from src.core.cmstiser import Csmtiser
from src.core.ner_tagger import NerTagger
from src.di import DependencyContainer
from src.core.lexicon import Lexicon
from src.core.segmenter import Segmenter
from src.core.tagger import Tagger
from src.core.lematiser | import Lematiser
from src.core.dependency_parser import DependencyParser
from src.core.restorer import DiacriticRestorer
from src.routers.api_router import ApiRouter
from src.routers.web_router import WebRouter
from src.services.mail_service import MailService
from src.helpers import jsonify
from flask import ma | ke_response, redirect
from werkzeug.contrib.fixers import ProxyFix
import traceback
reload(sys)
sys.setdefaultencoding('utf-8')
from src.helpers import config
def init():
languages = ['hr', 'sl', 'sr']
app = Flask(__name__)
app.secret_key = 'super secret key'
app.config['SESSION_TYPE'] = 'filesystem'
app.config['UPLOAD_FOLDER'] = os.path.dirname(os.path.realpath(__file__)) + '/uploads/'
CORS(app)
app.wsgi_app = ProxyFix(app.wsgi_app)
print 'Initializing models'
dc = DependencyContainer(lazy=False)
for lang in languages:
dc['segmenter.' + lang] = lambda: Segmenter(lang)
dc['tagger.' + lang] = lambda: Tagger(lang, dc['segmenter.' + lang])
dc['lemmatiser.' + lang] = lambda: Lematiser(lang, dc['segmenter.' + lang], dc['tagger.' + lang])
dc['ner_tagger.' + lang] = lambda: NerTagger(lang, dc['lemmatiser.' + lang])
if lang=='sl':
dc['csmtiser.'+lang] = lambda: Csmtiser(lang, dc['segmenter.' + lang])
dc['lexicon.' + lang] = lambda: Lexicon(lang)
dc['restorer.'+lang] = lambda: DiacriticRestorer(lang, dc['segmenter.' + lang])
dc['dependency_parser.' + lang] = lambda: DependencyParser(lang, dc['lemmatiser.' + lang])
dc['mail_service'] = lambda: MailService()
print 'Models initialized'
try:
url_prefix= config["url"]["prefix"]
except KeyError:
url_prefix=''
api_router = ApiRouter(dc)
app.register_blueprint(api_router, url_prefix = url_prefix+'/api/v1')
web_router = WebRouter(dc)
app.register_blueprint(web_router, url_prefix = url_prefix+'/web')
@app.errorhandler(Exception)
def handle_error(error):
'''
@param error:
@type error: string
@return:
@rtype: string
'''
app.logger.error(error)
traceback.print_exc()
response = jsonify(error.message)
traceback.print_exc(error)
return response, error.status_code if hasattr(error, 'status_code') else 500
@app.route('/', methods=['GET'])
def main():
return make_response(redirect(url_prefix+'/web'))
return app
if __name__ == "__main__":
text = 'Modeli su učitani! Vrlo uspješno.'
app = init()
app.run(host='0.0.0.0', port=8084) # debug=True, use_reloader=False)
else:
application = init()
|
fkmhrk/kiilib_python | test/sample_test.py | Python | apache-2.0 | 155 | 0.006452 | import unittest
class ExampleTest(uni | ttest.TestCase):
def test_a(self):
self.assertEq | ual(1, 1)
self.assertEqual([1, 2, 3], [1, 2, 3])
|
jenshnielsen/HJCFIT | likelihood/python/optimization.py | Python | gpl-3.0 | 5,973 | 0.01557 | ########################
# DCProgs computes missed-events likelihood as described in
# Hawkes, Jalali and Colquhoun (1990, 1992)
#
# Copyright (C) 2013 University College London
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#########################
""" Subpackage for likelihood optimization. """
__docformat__ = "restructuredtext en"
__all__ = ['reduce_likelihood']
def reduce_likelihood(likelihood, graph_matrix):
""" Maps likelihood to a set of variable components.
The goal is a callable that takes on input a numpy array with only variable components.
It hides from the input the components that are fixed or can be obtained as a result of an
expression.
:param likelihood:
This should be a callable object that takes a :class:`QMatrix` or numpy matrix on input.
:param graph_matrix:
Defines the reaction graph. This is a list of list (matrix) where each element is either
"V" (variable), 0 (no direct reaction between these two states), a number (Fixed
reaction rate), or a string with a valid python expression (that `eval` understands). In the
latter case, `q` will be replaced with the value of the qmatrix, `i` is set to the current
row index, and `j` to the current column index. The open-states should be in the top-left
corner.
.. code-bloc:: python
[ ["V", 0, 0.1],
[ 0, "V", "2e0*q[2, 1]"],
["V", "V", "V"] ]
In the exampl above, there are no direct reactions transforming state (1) into state
(2), and the reaction rate from state (1) to state (3) is fixed. All others will be
optimized, subject to the intrinsic constraints (sum over each row is zero, diagonal
elements are negative) and extrinsic contraints (defined below).
Note that diagonal elements are always obtained from the condition that rows of the matrix
should be equal to one (e.g. as though they were set to "k[i, i] - sum(k[i])"). This
constraint is the last one imposed on the qmatrix.
The expression mechanism above is not sufficient to handle cycles, e.g `q[1, 2]` is an
expression that depends on `q[2, 1]`, which itself is an expression that depends on
`q[1, 2]`. Such cases will not produce an error. Howerver, their result is undefined.
Furthermore, it should not explicitely depend on the diagonal components. Those components
have not yet been constrained to have rows sum to zero.
:returns:
A callable from which the fixed components have been abstracted.
It takes on input a numpy vector with as many components as there are variable components in
graph_matrix.
For convenience, the callable has a `to_reduced_coords` method which takes a numpy matrix
and returns a vector with only the variable components.
It also sports a `to_full_coords` coords that maps back to the whole space.
"""
import numpy
from numpy import array, zeros, sum
from .likelihood import QMatrix
nstates = len(graph_matrix)
""" Number of states in mechanism. """
if any(len(u) != nstates for u in graph_matrix):
raise ValueError('graph_matrix should be square')
fixed = zeros((nstates, nstates), dtype='bool')
""" Array indexing fixed components of the mechanism. """
variable = zeros((nstates, nstates), dtype='bool')
""" Array indexing variable components of the mechanism. """
expressions = []
""" List of expressio | n used to set matrix components. """
for i, row in enumerate(graph_matrix):
for j, value in enumerate(row):
if i == j: continue
if not isinstance(value, str): fixed[i, j] = True
elif value.lower().rstrip().lstrip() == 'v': va | riable[i, j] = True
else: expressions.append((i, j, value))
if expressions is None: expressions = None
# sanity check
for i, row in enumerate(graph_matrix):
for j, value in enumerate(row):
if fixed[i, j] == False: continue
if abs(value) > 1e-8: continue
if fixed[j, i] == False or abs(graph_matrix[j][i]) > 1e-8:
raise ValueError( 'No reversability for transformations from states {0} to {1}.' \
.format(i, j) )
# This matrix will hold fixed components.
# During optimization, it will receive the variable components
qmatrix = QMatrix(zeros((nstates, nstates)), likelihood.nopen)
for i, row in enumerate(graph_matrix):
for j, value in enumerate(row):
if i == j: qmatrix[i, i] = 0
elif fixed[i, j]: qmatrix[i, j] = value
# Finally, creates actual functions.
def to_full_coords(vector):
qmatrix.matrix[variable] = vector
if expressions is not None:
local_dict = numpy.__dict__.copy()
global_dict = globals()
local_dict['q'] = qmatrix
for i, j, expression in expressions:
local_dict['i'] = i
local_dict['j'] = j
qmatrix.matrix[i, j] = eval(expression, global_dict, local_dict)
for i, row in enumerate(qmatrix.matrix):
row[i] = 0
row[i] = -sum(row)
return qmatrix.matrix
def reduced_likelihood(vector):
to_full_coords(vector)
return likelihood(qmatrix)
reduced_likelihood.to_reduced_coords = lambda x: array(x)[variable]
reduced_likelihood.to_full_coords = lambda x: to_full_coords(x).copy()
reduced_likelihood.likelihood = likelihood
return reduced_likelihood
|
Bismarrck/pymatgen | scripts/feff_plot_dos.py | Python | mit | 2,040 | 0.002941 | #!/usr/bin/env python
# coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""
Script to plot density of states (DOS) generated by an FEFF run
either by site, element, or orbital
"""
from __future__ import division
__author__ = "Alan Dozier"
__credits__= "Anubhav | Jain, Shyue Ping Ong"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Alan Dozier"
__email__ = "adozier@uky.edu"
__date__ = | "April 7, 2012"
import argparse
from collections import OrderedDict
from pymatgen.io.feff import FeffLdos
from pymatgen.electronic_structure.plotter import DosPlotter
parser = argparse.ArgumentParser(description='''Convenient DOS Plotter for Feff runs.
Author: Alan Dozier
Version: 1.0
Last updated: April, 2013''')
parser.add_argument('filename', metavar='filename', type=str, nargs=1,
help='ldos%% file set to plot')
parser.add_argument('filename1', metavar='filename1', type=str, nargs=1,
help='feff.inp input file ')
parser.add_argument('-s', '--site', dest='site', action='store_const',
const=True, help='plot site projected DOS')
parser.add_argument('-e', '--element', dest='element', action='store_const',
const=True, help='plot element projected DOS')
parser.add_argument('-o', '--orbital', dest="orbital", action='store_const',
const=True, help='plot orbital projected DOS')
args = parser.parse_args()
f = FeffLdos.from_file(args.filename1[0], args.filename[0])
dos = f.complete_dos
all_dos = OrderedDict()
all_dos['Total'] = dos
structure = f.complete_dos.structure
if args.site:
for i in xrange(len(structure)):
site = structure[i]
all_dos['Site ' + str(i) + " " + site.specie.symbol] = \
dos.get_site_dos(site)
if args.element:
all_dos.update(dos.get_element_dos())
if args.orbital:
all_dos.update(dos.get_spd_dos())
plotter = DosPlotter()
plotter.add_dos_dict(all_dos)
plotter.show()
|
jordanemedlock/psychtruths | temboo/core/Library/RunKeeper/GeneralMeasurements/UpdateEntry.py | Python | apache-2.0 | 3,693 | 0.004335 | # -*- coding: utf-8 -*-
###############################################################################
#
# UpdateEntry
# Updates a body measurement entry in a user’s feed.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class UpdateEntry(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the UpdateEntry Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(UpdateEntry, self).__init__(temboo_session, '/Library/RunKeeper/GeneralMeasurements/UpdateEntry')
def new_input_set(self):
return UpdateEntryInputSet()
def _make_result_set(self, result, path):
return UpdateEntryResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return UpdateEntryChoreographyExecution(session, exec_id, path)
class UpdateEntryInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the UpdateEntry
| Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_Entry(self, value):
"""
Set the value of the Entry input for this Choreo. ((required, json) A JSON string containing the key/value pairs | for the fields to be updated in the body measurement entry. See documentation for formatting examples.)
"""
super(UpdateEntryInputSet, self)._set_input('Entry', value)
def set_AccessToken(self, value):
"""
Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token retrieved after the final step in the OAuth process.)
"""
super(UpdateEntryInputSet, self)._set_input('AccessToken', value)
def set_EntryID(self, value):
"""
Set the value of the EntryID input for this Choreo. ((required, string) This can be the individual id of the body measurement entry, or you can pass the full uri for the entry as returned from the RetrieveEntries Choreo (i.e. /generalMeasurements/24085455).)
"""
super(UpdateEntryInputSet, self)._set_input('EntryID', value)
class UpdateEntryResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the UpdateEntry Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from RunKeeper.)
"""
return self._output.get('Response', None)
class UpdateEntryChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return UpdateEntryResultSet(response, path)
|
sputnick-dev/weboob | modules/carrefourbanque/__init__.py | Python | agpl-3.0 | 810 | 0 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Romain Bignon
#
# This file is part of weboob.
#
# w | eboob is free software: you can redistribute it and/or modify
# it | under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from .module import CarrefourBanqueModule
__all__ = ['CarrefourBanqueModule']
|
Gebesa-Dev/Addons-gebesa | product_code_unique/__openerp__.py | Python | agpl-3.0 | 701 | 0 | # -*- coding: utf-8 -*-
# © <YEAR(S)> <AUTHOR(S)>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html).
{
"name": "Product Code Unique",
"summary": "Add the unique property to default_code field",
"version": "9.0.1.0.0",
"category": "Product",
"website": "https://odoo-community.org/",
"author": "<Deysy Mas | corro>, Odoo Community Association (OCA)",
"license": | "AGPL-3",
"application": False,
"installable": True,
"external_dependencies": {
"python": [],
"bin": [],
},
"depends": [
"base",
"product",
],
"data": [
"views/product_view.xml"
],
"demo": [
],
"qweb": [
]
}
|
mitenjain/R3N | tests/nnTests.py | Python | mit | 5,157 | 0.006011 | #!/usr/bin/env python
import sys
sys.path.append("../")
import unittest
import numpy as np
from toy_datasets import load_digit_dataset
from lib.optimization import mini_batch_sgd, mini_batch_sgd_with_annealing
class skLearnDigitTest(unittest.TestCase):
def setUp(self):
tr_data, xtr_data, ts_data = load_digit_dataset(0.7)
self.tr = np.array([x[0] for x in tr_data])
self.tr_l = [x[1] for x in tr_da | ta]
self.xtr = np.array([x[0] for x in xtr_data])
self.xtr_l = [x[1] for x in xtr_data]
self.ts = np.array([x[0] for x in ts_data])
self.ts_l = [x[1] for x in ts_data]
def checkModel(self, test_name, model_type, hidden_dim, verbose, epochs, batch_size=10, extra_args=None):
net, results = mini_batch_sgd(motif=test_name,
train | _data=self.tr, labels=self.tr_l,
xTrain_data=self.xtr, xTrain_targets=self.xtr_l,
learning_rate=0.001, L1_reg=0.0, L2_reg=0.0, epochs=epochs,
batch_size=batch_size, hidden_dim=hidden_dim, model_type=model_type,
model_file=None, trained_model_dir=None, verbose=verbose, extra_args=extra_args)
self.assertTrue(results['batch_costs'][1] > results['batch_costs'][-1])
self.assertTrue(results['xtrain_accuracies'][1] < results['xtrain_accuracies'][-1])
def test_twoLayerNeuralNetwork(self):
self.checkModel(test_name="twoLayerTest", model_type="twoLayer", hidden_dim=[10], verbose=False, epochs=1000)
def test_threeLayerNeuralNetwork(self):
self.checkModel(test_name="threeLayerTest", model_type="threeLayer", hidden_dim=[10, 10], verbose=False,
epochs=1000)
self.checkModel(test_name="ReLUthreeLayerTest", model_type="ReLUthreeLayer", hidden_dim=[10, 10],
verbose=False, epochs=1000)
def test_fourLayerNeuralNetwork(self):
self.checkModel(test_name="fourLayerTest", model_type="fourLayer", hidden_dim=[10, 10, 10],
verbose=False, epochs=1000)
self.checkModel(test_name="ReLUfourLayerTest", model_type="ReLUfourLayer", hidden_dim=[10, 10, 10],
verbose=False, epochs=1000)
def test_ConvNet(self):
conv_args = {
"batch_size": 50,
"n_filters": [5],
"n_channels": [1],
"data_shape": [8, 8], # (8-3+1, 8-3+1) = (6, 6)
"filter_shape": [3, 3], # (6/2, 6/2) = (3, 3)
"poolsize": (2, 2) # output is (batch_size, n_nkerns[0], 3, 3)
}
self.checkModel(test_name="ConvNetTest", model_type="ConvNet3", hidden_dim=10, verbose=False, epochs=1000,
extra_args=conv_args, batch_size=conv_args['batch_size'])
def test_scrambledLabels(self):
np.random.shuffle(self.xtr)
net, results = mini_batch_sgd(motif="scrambled",
train_data=self.tr, labels=self.tr_l,
xTrain_data=self.xtr, xTrain_targets=self.xtr_l,
learning_rate=0.001, L1_reg=0.0, L2_reg=0.0, epochs=1000, batch_size=10,
hidden_dim=[10, 10], model_type="threeLayer", model_file=None,
trained_model_dir=None,
verbose=False)
self.assertTrue(results['batch_costs'][1] > results['batch_costs'][-1])
self.assertAlmostEqual(results['xtrain_accuracies'][1], results['xtrain_accuracies'][-1], delta=2.5)
def test_annealingLearningRate(self):
net, results = mini_batch_sgd_with_annealing(motif="annealing",
train_data=self.tr, labels=self.tr_l,
xTrain_data=self.xtr, xTrain_targets=self.xtr_l,
learning_rate=0.001, L1_reg=0.0, L2_reg=0.0, epochs=100,
batch_size=10,hidden_dim=[10, 10], model_type="threeLayer",
model_file=None, trained_model_dir=None, verbose=False)
self.assertTrue(results['batch_costs'][1] > results['batch_costs'][-1])
self.assertTrue(results['xtrain_accuracies'][1] < results['xtrain_accuracies'][-1])
# TODO illegal network tests
# TODO dump/load/eval tests
# TODO MNIST DATASET
def main():
testSuite = unittest.TestSuite()
testSuite.addTest(skLearnDigitTest('test_twoLayerNeuralNetwork'))
testSuite.addTest(skLearnDigitTest('test_threeLayerNeuralNetwork'))
testSuite.addTest(skLearnDigitTest('test_fourLayerNeuralNetwork'))
testSuite.addTest(skLearnDigitTest('test_ConvNet'))
testSuite.addTest(skLearnDigitTest('test_scrambledLabels'))
testSuite.addTest(skLearnDigitTest('test_annealingLearningRate'))
testRunner = unittest.TextTestRunner(verbosity=2)
testRunner.run(testSuite)
if __name__ == '__main__':
main()
|
lijunxyz/leetcode_practice | pow_x_n_medium/Solution1.py | Python | mit | 1,612 | 0.011166 | # https://oj.leetcode.com/problems/powx-n/
# Implement pow(x, n).
# The idea of this problem is very similar to the "divide two integers" problem. Try to match n with
# 2^(2^(k-1)) series.
# But still, there are various detailed corner cases to consider.
class Solution:
# @param x, a float
# @param n, a integer
# @return a float
def __init__(self):
self.epsilon = 1e-20
def pow(self, x, n):
MAX_INT = 0x7FFFFFFF
MIN_INT = -0x7FFFFFFF -1
if x==0 and n<0:
return MAX_INT
if x==0 and n>0:
return 0
if abs(x-1.0) < self.epsilon and x>1.0: # if x is something like 1.000 | 00001
return 1.0
# Now make x and n both positive for convenience
if x<0 and n%2==1:
sign = -1
else:
sign = 1
x = abs(x)
if n<0:
x = 1.0/float(x)
n = | -n
p = 1 # final product
while n>=1:
a = 1 # a temporary probe
x2 = x # a copy of x, reset every iteration <-- this line is easy to forget
while a<<1 < n:
a <<= 1
x2 = x2*x2
p *= x2
n -= a
if p > MAX_INT:
if sign ==1: # if p is to explode and overflow, return now
return MAX_INT
else:
return MIN_INT
if p < self.epsilon:
# This takes care of the case when x<1 and n>0, or x>1 and n<0
return 0
return p*sign
# s = Solution()
# print s.pow(34.005,-3)
|
commonsearch/cosr-back | cosrlib/es.py | Python | apache-2.0 | 4,391 | 0.001139 | from __future__ import absolute_import, division, print_function, unicode_literals
import time
import logging
import ujson as json
from elasticsearch import Elasticsearch
from elasticsearch.client import IndicesClient
from elasticsearch.exceptions import ConnectionTimeout
from .config import config
from .es_mappings import ES_MAPPINGS, ES_SIMILARITIES
class ElasticsearchBulkIndexer(object):
""" Bulk indexer for Elasticsearch """
servers = {
"docs": [config["ELASTICSEARCHDOCS"]],
"text": [config["ELASTICSEARCHTEXT"]]
}
def __init__(self, index_name, batch_size=500):
self.index_name = index_name
self.buffer = []
self.batch_size = batch_size
self.total_size = 0
self.connected = False
self.client = None
def connect(self):
""" Establish the ES connection if not already done """
if self.connected:
return
self.connected = True
self.client = Elasticsearch(self.servers[self.index_name], timeout=60)
def index(self, _id, hit):
""" Queue one document for indexing. """
if not self.connected:
self.connect()
# https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html
self.buffer.append('{"index":{"_id":"%s"}}\n%s\n' % (
_id,
json.dumps(hit) # pylint: disable=no-member
))
if len(self.buffer) >= self.batch_size:
self.flush()
def empty(self):
""" Empty the ES index. Dangerous operation! """
if config["ENV"] not in ("local", "ci"):
raise Exception("empty() not allowed in env %s" % config["ENV"])
if self.indices().exists(index=self.index_name):
self.indices().delete(index=self.index_name)
def refresh(self):
""" Sends a "refresh" to the ES index, forcing the actual indexing of what was sent up until now """
if not self.connected:
return
if config["ENV"] not in ("local", "ci"):
raise Exception("refresh() not allowed in env %s" % config["ENV"])
self.indices().refresh(index=self.index_name)
def flush(self, retries=10):
""" Sends the current indexing batch to ES """
if len(self.buffer) == 0:
return
if not self.connected:
self.connect()
self.total_size += len(self.buffer)
logging.debug(
"ES: Flushing %s docs to index=%s (total: %s)",
len(self.buffer), self.index_name, self.total_size
)
try:
self.bulk_index()
except ConnectionTimeout, e:
if retries == 0:
raise e
time.sleep(60)
return self.flush(retries=retries - 1)
self.buffer = []
def bulk_index(self):
""" Indexes the current buffer to Elasticsearch, bypassing the bulk() helper for performance """
connection = self.client.transport.get_connection()
bulk_url = "/%s/page/_bulk" % self.index_name
body = "".join(self.buffer)
# TODO retries
# status, headers, data
status, _, _ = connection.perform_request("POST", bulk_url, body=body)
if status != 200:
raise Exception("Elasticsearch returned status=%s" % status)
# TODO: look for errors there?
# parsed = json.loads(data)
def indices(self):
""" Returns an elasticsearch.client.IndicesClient instance """
if not self.connected:
self.connect | ()
return IndicesClient(self.client)
def create(self, empty=False):
""" Creates the ES index """
if empty:
self.empty()
mappings = ES_MAPPINGS[self.index_name]
self.indices().create(index=self.index_name, body={
"settings": {
# TODO: this configuration should be set somewhere else! | (cosr-ops?)
"number_of_shards": 5,
"number_of_replicas": 0,
# In prod we don't refresh manually so this is the only setting
# that will make ES periodically refresh to avoid storing only in temporary files
# as we index
"refresh_interval": "60s",
"similarity": ES_SIMILARITIES
},
"mappings": mappings
})
|
jgosmann/psyrun | psyrun/mapper.py | Python | mit | 5,215 | 0 | """Map functions onto parameter spaces."""
from multiprocessing import Pool
import os.path
import warnings
from psyrun.exceptions import IneffectiveExcludeWarning
from psyrun.pspace import dict_concat, missing, Param
def get_result(fn, params, exclude=None):
"""Evaluates a function with given parameters.
Evaluates *fn* with the parameters *param* and returns a dictionary with
the input parameters and returned output values.
Parameters
----------
fn : function
Function to evaluate. Has to return a dictionary.
params : dict
Parameters passed to *fn* as keyword arguments.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Returns *params* updated with the return value of *fn*.
Examples
--------
>>> def fn(x, is_result):
... return {'y': x * x, 'is_result': 1}
>>>
>>> from pprint import pprint
>>> pprint(get_result(fn, {'x': 4, 'is_result': 0}))
{'is_result': 1, 'x': 4, 'y': 16}
"""
result = dict(params)
result.update(fn(**params))
if exclude is not None:
for k in exclude:
if k in result:
del result[k]
else:
warnings.warn(IneffectiveExcludeWarning(k))
return result
def _get_result_single_arg(args):
return get_result(*args)
def map_pspace(fn, pspace, exclude=None):
"""Maps a function to parameter space values.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : `ParameterSpace`
Parameter space providing parameter values to evaluate function on.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Dictionary with the input parameter values and the function return
values.
Examples
--------
>>> def fn(x):
... return {'y': x * x}
>>>
>>> from pprint import pprint
>>> from psyrun import Param
>>> pprint(map_pspace(fn, Param(x=[1, 2])))
{'x': [1, 2], 'y': [1, 4]}
"""
return dict_concat(list(get_result(
fn, p, exclude) for p in pspace.iterate()))
def map_pspace_hdd_backed(
fn, pspace, filename, store, return_data=True, pool_size=1,
exclude=None):
"""Maps a function to parameter space values while storing produced data.
Data is stored progressively. Thus, if the program crashes, not all data
will be lost.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : `ParameterSpace`
Parameter space providing parameter values to evaluate function on.
filename : str
Filename of file to store data to.
store : `Store`
Store to save data with.
return_data : bool, optional
Whether to return the resulting data after mapping the function. This
will read all produced data from the disk.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
None or dict
Dictionary with the input parameter values and the function return
values if requested.
"""
if os.path.exists(filename):
pspace = missing(pspace, Param(**store.load(filename)))
chunksize = max(1, | len(pspace) // pool_size)
for r in Pool(pool_size).imap_unordered(
_get_result_single_arg,
((fn, | p, exclude) for p in pspace.iterate()), chunksize):
store.append(filename, dict_concat((r,)))
if not os.path.exists(filename):
store.save(filename, {})
if return_data:
return store.load(filename)
def map_pspace_parallel(
fn, pspace, n_jobs=-1, backend='multiprocessing', exclude=None):
"""Maps a function to parameter space values in parallel.
Requires `joblib <https://pythonhosted.org/joblib/>`_.
Parameters
----------
fn : function
Function to evaluate on parameter space. Has to return a dictionary.
pspace : ParameterSpace
Parameter space providing parameter values to evaluate function on.
n_jobs : int, optional
Number of parallel jobs. Set to -1 to automatically determine.
backend : str, optional
Backend to use. See `joblib documentation
<https://pythonhosted.org/joblib/parallel.html#using-the-threading-backend>`_
for details.
exclude : sequence, optional
Keys of elements to exclude from the results dictionary.
Returns
-------
dict
Dictionary with the input parameter values and the function return
values.
Examples
--------
>>> from pprint import pprint
>>> from psyrun import Param
>>> from psyrun.utils.example import square
>>>
>>> pprint(map_pspace_parallel(square, Param(x=[1, 2])))
{'x': [1, 2], 'y': [1, 4]}
"""
import joblib
parallel = joblib.Parallel(n_jobs=n_jobs, backend=backend)
return dict_concat(parallel(
joblib.delayed(get_result)(fn, p, exclude) for p in pspace.iterate()))
|
kennedyshead/home-assistant | homeassistant/components/evohome/__init__.py | Python | apache-2.0 | 25,207 | 0.001547 | """Support for (EMEA/EU-based) Honeywell TCC climate systems.
Such systems include evohome, Round Thermostat, and others.
"""
from __future__ import annotations
from datetime import datetime as dt, timedelta
import logging
import re
from typing import Any
import aiohttp.client_exceptions
import evohomeasync
import evohomeasync2
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
HTTP_SERVICE_UNAVAILABLE,
HTTP_TOO_MANY_REQUESTS,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.service import verify_domain_control
from homeassistant.helpers.typing import ConfigType
import homeassistant.util.dt as dt_util
from .const import DOMAIN, GWS, STORAGE_KEY, STORAGE_VER, TCS, UTC_OFFSET
_LOGGER = logging.getLogger(__name__)
ACCESS_TOKEN = "access_token"
ACCESS_TOKEN_EXPIRES = "access_token_expires"
REFRESH_TOKEN = "refresh_token"
USER_DATA = "user_data"
CONF_LOCATION_IDX = "location_idx"
SCAN_INTERVAL_DEFAULT = timedelta(seconds=300)
SCAN_INTERVAL_MINIMUM = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_LOCATION_IDX, default=0): cv.positive_int,
vol.Optional(
CONF_SCAN_INTERVAL, default=SCAN_INTERVAL_DEFAULT
): vol.All(cv.time_period, vol.Range(min=SCAN_INTERVAL_MINIMUM)),
}
)
},
extra=vol.ALLOW_EXTRA,
)
ATTR_SYSTEM_MODE = "mode"
ATTR_DURATION_DAYS = "period"
ATTR_DURATION_HOURS = "duration"
ATTR_ZONE_TEMP = "setpoint"
ATTR_DURATION_UNTIL = "duration"
SVC_REFRESH_SYSTEM = "refresh_system"
SVC_SET_SYSTEM_MODE = "set_system_mode"
SVC_RESET_SYSTEM = "reset_system"
SVC_SET_ZONE_OVERRIDE = "set_zone_override"
SVC_RESET_ZONE_OVERRIDE = "clear_zone_override"
RESET_ZONE_OVERRIDE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_id})
SET_ZONE_OVERRIDE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(ATTR_ZONE_TEMP): vol.All(
vol.Coerce(float), vol.Range(min=4.0, max=35.0)
),
vol.Optional(ATTR_DURATION_UNTIL): vol.All(
cv.time_period, vol.Range(min=timedelta(days=0), max=timedelta(days=1))
),
}
)
# system mode schemas are built dynamically, below
def _dt_local_to_aware(dt_naive: dt) -> dt:
dt_aware = dt_util.now() + (dt_naive - dt.now())
if dt_aware.microsecond >= 500000:
dt_aware += timedelta(seconds=1)
return dt_aware.replace(microsecond=0)
def _dt_aware_to_naive(dt_aware: dt) -> dt:
dt_naive = dt.now() + (dt_aware - dt_util.now())
if dt_naive.microsecond >= 500000:
dt_naive += timedelta(seconds=1)
return dt_naive.replace(microsecond=0)
def convert_until(status_dict: dict, until_key: str) -> None:
"""Reformat a dt str from "%Y-%m-%dT%H:%M:%SZ" as local/aware/isoformat."""
if until_key in status_dict: # only present for certain modes
dt_utc_naive = dt_util.parse_datetime(status_dict[until_key])
status_dict[until_key] = dt_util.as_local(dt_utc_naive).isoformat()
def convert_dict(dictionary: dict[str, Any]) -> dict[str, Any]:
"""Recursively convert a dict's keys to snake_case."""
def convert_key(key: str) -> str:
"""Convert a string to snake_case."""
string = re.sub(r"[\-\.\s]", "_", str(key))
return (string[0]).lower() + re.sub(
r"[A-Z]", lambda matched: f"_{matched.group(0).lower()}", string[1:]
)
return {
(convert_key(k) if isinstance(k, str) else k): (
convert_dict(v) if isinstance(v, dict) else v
)
for k, v in dictionary.items()
}
def _handle_exception(err) -> bool:
"""Return False if the exception can't be ignored."""
try:
raise err
except evohomeasync2.AuthenticationError:
_LOGGER.error(
"Failed to authenticate with the vendor's server. "
"Check your username and password. NB: Some special password characters "
"that work correctly via the website will not work via the web API. "
"Message is: %s",
err,
)
except aiohttp.ClientConnectionError:
# this appears to be a common occurrence with the vendor's servers
_LOGGER.warning(
"Unable to connect with the vendor's server. "
"Check your network and the vendor's service status page. "
"Message is: %s",
err,
)
except aiohttp.ClientResponseError:
if err.status == HTTP_SERVICE_UNAVAILABLE:
_LOGGER.warning(
"The vendor says their server is currently unavailable. "
"Check the vendor's service status page"
)
elif err.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(
"The vendor's API rate limit has been exceeded. "
"If this message persists, consider increasing the %s",
CONF_SCAN_INTERVAL,
)
else:
raise # we don't expect/handle any other Exceptions
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Create a (EMEA/EU-based) Honeywell TCC system."""
async def load_auth_tokens(store) -> tuple[dict, dict | None]:
app_storage = await store.async_load()
tokens = dict(app_storage or {})
if tokens.pop(CONF_USERNAME, None) != config[DOMAIN][CONF_USERNAME]:
# any tokens won't be valid, and store might be be corrupt
await store.async_save({})
return ({}, None)
# evohomeasync2 requires naive/local datetimes as strings
if tokens.get(ACCESS_TOKEN_EXPIRES) is not None:
tokens[ACCESS_TOKEN_EXPIRES] = _dt_aware_to_naive(
dt_u | til.parse_datetime(tokens[ACCESS_TOKEN_EXPI | RES])
)
user_data = tokens.pop(USER_DATA, None)
return (tokens, user_data)
store = hass.helpers.storage.Store(STORAGE_VER, STORAGE_KEY)
tokens, user_data = await load_auth_tokens(store)
client_v2 = evohomeasync2.EvohomeClient(
config[DOMAIN][CONF_USERNAME],
config[DOMAIN][CONF_PASSWORD],
**tokens,
session=async_get_clientsession(hass),
)
try:
await client_v2.login()
except (aiohttp.ClientError, evohomeasync2.AuthenticationError) as err:
_handle_exception(err)
return False
finally:
config[DOMAIN][CONF_PASSWORD] = "REDACTED"
loc_idx = config[DOMAIN][CONF_LOCATION_IDX]
try:
loc_config = client_v2.installation_info[loc_idx]
except IndexError:
_LOGGER.error(
"Config error: '%s' = %s, but the valid range is 0-%s. "
"Unable to continue. Fix any configuration errors and restart HA",
CONF_LOCATION_IDX,
loc_idx,
len(client_v2.installation_info) - 1,
)
return False
if _LOGGER.isEnabledFor(logging.DEBUG):
_config = {"locationInfo": {"timeZone": None}, GWS: [{TCS: None}]}
_config["locationInfo"]["timeZone"] = loc_config["locationInfo"]["timeZone"]
_config[GWS][0][TCS] = loc_config[GWS][0][TCS]
_LOGGER.debug("Config = %s", _config)
client_v1 = evohomeasync.EvohomeClient(
client_v2.username,
client_v2.password,
user_data=user_data,
session=async_get_clientsession(hass),
)
hass.data[DOMAIN] = {}
hass.data[DOMAIN]["broker"] = broker = EvoBroker(
hass, client_v2, client_v1, store, config[DOMAIN]
)
await broker.save_a |
agvergara/Python | X-Serv-XML-ContentApp-Barrapunto/barrapunto/contentApp/admin.py | Python | gpl-3.0 | 118 | 0.008475 | from django.contrib import admin
from models import Content
# Register your models here | .
admin.site.r | egister(Content) |
jakobmoss/tsa | utils/makeweights.py | Python | mit | 2,350 | 0 | # -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Time Series Analysis -- Generate statistical weigts from scatter
#
# Author: Jakob Rørsted Mosumgaard
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
###############################################################################
# Modules
###############################################################################
from __future__ import print_function, with_statement, division
import numpy as np
import bottleneck as bn
###############################################################################
# Functions
###############################################################################
def genweight(datname, dpath, wpath):
"""
Combine time series with statistical weights calculated from scatter
Arguments:
- `datname`: Identifier of data file
- `dpath` : Path to data file (time series).
- `wpath` : Pa | th to scatter file (with same time points!)
"""
# Pretty print
print('Generating weights for {0} !'.format(dpath))
# Load data and weights
t, d = np.loadtxt | (dpath, unpack=True)
tt, sig = np.loadtxt(wpath, unpack=True)
# Check that times are indeed the same
tdif = t - tt
if tdif.any() != 0:
print('Error! Not the same time points! Quitting!')
exit()
# Moving variance (Hans: M = 50 - 100)
M = 70
movstd = bn.move_std(sig, M, min_count=1)
movvar = np.square(movstd)
# Remove first point
x = 1
t = t[x:]
d = d[x:]
movvar = movvar[x:]
# Calculate weights from scatter (1 / variance)
w = np.divide(1.0, movvar)
# Save
outfile = star + '_with-weights.txt'
np.savetxt(outfile, np.transpose([t, d, w]), fmt='%.15e', delimiter='\t')
# Done!
print('Done!\n')
###############################################################################
# Script
###############################################################################
if __name__ == "__main__":
# Definitions
datdir = '../../data/'
ext = '.txt'
append = '-high'
# Run for star 1
star = 'star01'
genweight(star, datdir + star + ext, star + append + ext)
# Run for star 2
star = 'star02'
genweight(star, datdir + star + ext, star + append + ext)
|
fablab-ka/OpenSCAD2D | src/openscad2d.py | Python | gpl-2.0 | 2,313 | 0.002162 | # pylint: disable-msg=E0611
from __future__ import print_function
import sys
from PySide import QtCore, QtGui
from src.documentwatcher import DocumentWatcher
from src.geometrywidget import GeometryWidget
from src.cadfileparser import FcadParser
from src.printcapturecontext i | mport PrintCaptureContext
from src.svggenerator import SvgGenerator
from src.geometrygenerator import GeometryGenerator
class OpenSCAD2D(object):
def __init__(self, filename):
self.screen_width, self.screen_height = 800.0, 600.0
self.file_generator = SvgGenerator()
self.geometry_generator = GeometryGenerator(self.screen_width, self.screen_height)
self.widget = None
self.watcher = None
self.loadFile(filename)
def | loadFile(self, filename):
self.filename = filename
if self.watcher:
self.watcher.stop_monitor()
self.watcher = DocumentWatcher(self.filename, self.on_file_change)
self.watcher.monitor()
self.parser = FcadParser(filename)
self.update()
def update(self):
with PrintCaptureContext() as capture_context:
self.parser = FcadParser(self.filename)
ast, error = self.parser.parse()
print("AST:", ast, ", Error:", error)
if not error:
data = self.geometry_generator.generate(ast)
else:
raise Exception(error)
if self.widget:
self.widget.setData(data, capture_context, error)
return data, capture_context, error
def run(self):
app = QtGui.QApplication(sys.argv)
app.setApplicationName("OpenSCAD2D")
app.setQuitOnLastWindowClosed(True)
app.setWindowIcon(QtGui.QIcon('../logo.png'))
data, capture_context, error = self.update()
self.widget = GeometryWidget(self.filename, data, capture_context, error, self.screen_width, self.screen_height, self.loadFile)
if self.watcher:
self.watcher.stop_monitor()
sys.exit(app.exec_())
def on_file_change(self):
self.update()
if __name__ == "__main__":
if len(sys.argv) <= 1:
#print "no"
program = OpenSCAD2D("../test/data/complex_example.fcad")
else:
program = OpenSCAD2D(sys.argv[1])
program.run()
|
talkincode/txradius | txradius/openvpn/setup_config.py | Python | lgpl-3.0 | 520 | 0.003846 | #!/usr/bin/env py | thon
# -*- coding: utf-8 -*-
confstr = '''[DEFAULT]
nas_id=txovpn
nas_coa_port=3799
nas_addr=127.0.0.1
radius_addr=127.0.0.1
radius_auth_port=18121
radius_acct_port=18131
radius_secret=secret
radius_timeout=3
acct_interval=60
session_timeout=864000
logfile=/var/log/txovpn.log
statusfile=/etc/openvpn/openvpn-status.log
statusdb=/etc/openvpn/txovpn.db
client_config_dir=/etc/openvpn/ccd
server_manage_addr=127.0.0.1:7505
'''
def echo( | ):
print confstr
if __name__ == '__main__':
print confstr
|
nylas/sync-engine | migrations/versions/144_update_calendar_index.py | Python | agpl-3.0 | 1,080 | 0.001852 | """Update Calendar index.
Revision ID: 1c73ca99c03b
Revises: 1d7a72222b7c
Create Date: 2015-02-26 00:50:52 | .322510
"""
# revision identifiers, used by Alembic.
revision = '1c73ca99c03b'
down_revision = '1d7a72222b7c'
from alembic import op
def upgrade():
op.drop_constraint('calendar_ibfk_1', 'calendar', type_='foreignkey')
op.drop_constraint('uuid', 'calendar', type_='unique')
op.create_index('uuid', 'calendar',
| ['namespace_id', 'provider_name', 'name', 'uid'], unique=True)
op.create_foreign_key('calendar_ibfk_1',
'calendar', 'namespace',
['namespace_id'], ['id'])
def downgrade():
op.drop_constraint('calendar_ibfk_1', 'calendar', type_='foreignkey')
op.drop_constraint('uuid', 'calendar', type_='unique')
op.create_index('uuid', 'calendar',
['namespace_id', 'provider_name', 'name'], unique=True)
op.create_foreign_key('calendar_ibfk_1',
'calendar', 'namespace',
['namespace_id'], ['id'])
|
isudox/leetcode-solution | python-algorithm/leetcode/problem_231.py | Python | mit | 1,321 | 0 | """231. Power of Two
https://leetcode.com/problems/power-of-two/
Given an integer n, return true if it is a power of two. Otherwise, return
false.
An integer n is a power of two, if there exists an integer x such that n ==
2^x.
Example | 1:
Input: n = 1
Output: true
Explanation: 2^0 = 1
Example 2:
Input: n = 16
Output: true
Explanation: 2^4 = 16
Example 3:
Input: n = 3
Output: false
Example 4:
Input: n = 4
Output: true
Example 5:
Input: n = 5
Output: false
Constraints:
-2^31 <= n <= 2^31 - 1
"""
class Solution:
def is_power_of_two(self, n: int) -> bool:
| if n <= 0:
return False
if n == 1:
return True
n, rem = divmod(n, 2)
if rem == 1:
return False
return self.is_power_of_two(n)
def is_power_of_two2(self, n: int) -> bool:
if n < 1:
return False
while n:
if n == 1:
return True
elif n & 1 == 1:
return False
n >>= 1
return True
def is_power_of_two3(self, n: int) -> bool:
return n > 0 and bin(n).count('1') == 1
def is_power_of_two4(self, n: int) -> bool:
return n > 0 and (n & (n - 1)) == 0
def is_power_of_two5(self, n: int) -> bool:
return n > 0 and (2 ** 30) % n == 0
|
calee88/ParlAI | parlai/tasks/opensubtitles/agents.py | Python | bsd-3-clause | 872 | 0 | # Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from parlai.core.fbdialog_teacher import FbDialogTeacher
from .build import build
import | copy
import os
def _path(opt, filtered):
# Build the data if it doesn't exist.
build(opt)
dt = opt['datatype'].split(':')[0]
return os.pa | th.join(opt['datapath'], 'OpenSubtitles',
dt + filtered + '.txt')
class DefaultTeacher(FbDialogTeacher):
def __init__(self, opt, shared=None):
opt = copy.deepcopy(opt)
opt['datafile'] = _path(opt, '')
opt['cands_datafile'] = opt['datafile']
super().__init__(opt, shared)
|
zappyk-github/zappyk-python | lib/lib_external/postgresql/bin/pg_python.py | Python | gpl-2.0 | 3,516 | 0.039818 | ##
# .bin.pg_python - Python console with a connection.
##
"""
Python command with a PG-API connection(``db``).
"""
import os
import sys
import re
import code
import optparse
import contextlib
from .. import clientparameters
from ..python import command as pycmd
from .. import project
from ..driver import default as pg_driver
from .. import exceptions as pg_exc
from .. import sys as pg_sys
from .. import lib as pg_lib
pq_trace = optparse.make_option(
'--pq-trace',
dest = 'pq_trace',
help = 'trace PQ protocol transmissions',
default = None,
)
default_options = [
pq_trace,
clientparameters.option_lib,
clientparameters.option_libpath,
] + pycmd.default_optparse_options
def command(argv = sys.argv):
p = clientparameters.DefaultParser(
"%prog [connection options] [script] ...",
version = project.version,
option_list = default_options
)
p.disable_interspersed_args()
co, ca = p.parse_args(argv[1:])
rv = 1
# Resolve the category.
pg_sys.libpath.insert(0, os.path.curdir)
pg_sys.libpath.extend(co.libpath or [])
if co.lib:
cat = pg_lib.Category(*map(pg_lib.load, co.lib))
else:
cat = None
trace_file = None
if co.pq_trace is not None:
trace_file = open(co.pq_trace, 'a')
try:
need_prompt = False
cond = None
connector = None
connection = None
while connection is None:
try:
cond = clientparameters.collect(parsed_options = co, prompt_title = None)
if need_prompt:
| # authspec error thrown last time, so force prompt.
cond[' | prompt_password'] = True
try:
clientparameters.resolve_password(cond, prompt_title = 'pg_python')
except EOFError:
raise SystemExit(1)
connector = pg_driver.fit(category = cat, **cond)
connection = connector()
if trace_file is not None:
connection.tracer = trace_file.write
connection.connect()
except pg_exc.ClientCannotConnectError as err:
for att in connection.failures:
exc = att.error
if isinstance(exc, pg_exc.AuthenticationSpecificationError):
sys.stderr.write(os.linesep + exc.message + (os.linesep*2))
# keep prompting the user
need_prompt = True
connection = None
break
else:
# no invalid password failures..
raise
pythonexec = pycmd.Execution(ca,
context = getattr(co, 'python_context', None),
loader = getattr(co, 'python_main', None),
)
builtin_overload = {
# New built-ins
'connector' : connector,
'db' : connection,
'do' : connection.do,
'prepare' : connection.prepare,
'sqlexec' : connection.execute,
'settings' : connection.settings,
'proc' : connection.proc,
'xact' : connection.xact,
}
if not isinstance(__builtins__, dict):
builtins_d = __builtins__.__dict__
else:
builtins_d = __builtins__
restore = {k : builtins_d.get(k) for k in builtin_overload}
builtins_d.update(builtin_overload)
try:
with connection:
rv = pythonexec(
context = pycmd.postmortem(os.environ.get('PYTHON_POSTMORTEM'))
)
exc = getattr(sys, 'last_type', None)
if rv and exc and not issubclass(exc, Exception):
# Don't try to close it if wasn't an Exception.
del connection.pq.socket
finally:
# restore __builtins__
builtins_d.update(restore)
for k, v in builtin_overload.items():
if v is None:
del builtins_d[x]
if trace_file is not None:
trace_file.close()
except:
pg_sys.libpath.remove(os.path.curdir)
raise
return rv
if __name__ == '__main__':
sys.exit(command(sys.argv))
##
# vim: ts=3:sw=3:noet:
|
anurag03/integration_tests | cfme/tests/services/test_pxe_service_catalogs.py | Python | gpl-2.0 | 5,998 | 0.002834 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from widgetastic.utils import partial_match
from cfme import test_requirements
from cfme.infrastructure.provider import InfraProvider
from cfme.infrastructure.provider.rhevm import RHEVMProvider
from cfme.infrastructure.pxe import get_pxe_server_from_config, get_template_from_config
from cfme.services.service_catalogs import ServiceCatalogs
from cfme.utils import testgen
from cfme.utils.blockers import BZ
from cfme.utils.conf import cfme_data
from cfme.utils.generators import random_vm_name
from cfme.utils.log import logger
pytestmark = [
pytest.mark.meta(server_roles="+automate"),
pytest.mark.usefixtures('uses_infra_providers'),
test_requir | ements.service,
pytest.mark.tier(2)
]
def pytest_generate_tests(metafunc):
# Filter out providers without provisioning data or hosts defined
argnames, argvalues, idlist = testgen.providers_by_class(
metafunc, [InfraProvider], required_fields=[
['provisioning', 'pxe_server'],
['provisioni | ng', 'pxe_image'],
['provisioning', 'pxe_image_type'],
['provisioning', 'pxe_kickstart'],
['provisioning', 'pxe_template'],
['provisioning', 'datastore'],
['provisioning', 'host'],
['provisioning', 'pxe_root_password'],
['provisioning', 'vlan']
])
pargnames, pargvalues, pidlist = testgen.pxe_servers(metafunc)
argnames = argnames
pxe_server_names = [pval[0] for pval in pargvalues]
new_idlist = []
new_argvalues = []
for i, argvalue_tuple in enumerate(argvalues):
args = dict(zip(argnames, argvalue_tuple))
if args['provider'].type == "scvmm":
continue
pxe_server_name = args['provider'].data['provisioning']['pxe_server']
if pxe_server_name not in pxe_server_names:
continue
pxe_cust_template = args['provider'].data['provisioning']['pxe_kickstart']
if pxe_cust_template not in cfme_data.get('customization_templates', {}).keys():
continue
new_idlist.append(idlist[i])
new_argvalues.append(argvalues[i])
testgen.parametrize(metafunc, argnames, new_argvalues, ids=new_idlist, scope="module")
@pytest.fixture(scope='module')
def pxe_server(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_server_name = provisioning_data['pxe_server']
return get_pxe_server_from_config(pxe_server_name, appliance=appliance)
@pytest.fixture(scope='module')
def pxe_cust_template(appliance, provider):
provisioning_data = provider.data['provisioning']
pxe_cust_template = provisioning_data['pxe_kickstart']
return get_template_from_config(pxe_cust_template, create=True, appliance=appliance)
@pytest.fixture(scope="function")
def setup_pxe_servers_vm_prov(pxe_server, pxe_cust_template, provisioning):
if not pxe_server.exists():
pxe_server.create()
pxe_server.set_pxe_image_type(provisioning['pxe_image'], provisioning['pxe_image_type'])
@pytest.fixture(scope="function")
def catalog_item(appliance, provider, dialog, catalog, provisioning,
setup_pxe_servers_vm_prov):
# generate_tests makes sure these have values
pxe_template, host, datastore, pxe_server, pxe_image, pxe_kickstart, pxe_root_password,\
pxe_image_type, pxe_vlan = map(
provisioning.get, (
'pxe_template', 'host', 'datastore', 'pxe_server', 'pxe_image', 'pxe_kickstart',
'pxe_root_password', 'pxe_image_type', 'vlan'
)
)
provisioning_data = {
'catalog': {'catalog_name': {'name': pxe_template, 'provider': provider.name},
'provision_type': 'PXE',
'pxe_server': pxe_server,
'pxe_image': {'name': pxe_image},
'vm_name': random_vm_name('pxe_service')},
'environment': {'datastore_name': {'name': datastore},
'host_name': {'name': host}},
'customize': {'root_password': pxe_root_password,
'custom_template': {'name': pxe_kickstart}},
'network': {'vlan': partial_match(pxe_vlan)},
}
item_name = fauxfactory.gen_alphanumeric()
return appliance.collections.catalog_items.create(
provider.catalog_item_type,
name=item_name,
description="my catalog", display_in=True, catalog=catalog,
dialog=dialog, prov_data=provisioning_data)
@pytest.mark.rhv1
@pytest.mark.meta(blockers=[BZ(1633540, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider)),
BZ(1633516, forced_streams=['5.10'],
unblock=lambda provider: not provider.one_of(RHEVMProvider))])
@pytest.mark.usefixtures('setup_pxe_servers_vm_prov')
def test_pxe_servicecatalog(appliance, setup_provider, provider, catalog_item, request):
"""Tests RHEV PXE service catalog
Metadata:
test_flag: pxe, provision
"""
vm_name = catalog_item.prov_data['catalog']["vm_name"]
request.addfinalizer(
lambda: appliance.collections.infra_vms.instantiate(
"{}0001".format(vm_name), provider).cleanup_on_provider()
)
service_catalogs = ServiceCatalogs(appliance, catalog_item.catalog, catalog_item.name)
service_catalogs.order()
# nav to requests page happens on successful provision
logger.info('Waiting for cfme provision request for service %s', catalog_item.name)
request_description = catalog_item.name
provision_request = appliance.collections.requests.instantiate(request_description,
partial_check=True)
provision_request.wait_for_request(num_sec=3600)
msg = "Provisioning failed with the message {}".format(provision_request.rest.message)
assert provision_request.is_succeeded(), msg
|
tinloaf/home-assistant | tests/components/test_device_sun_light_trigger.py | Python | apache-2.0 | 3,831 | 0 | """The tests device sun light trigger component."""
# pylint: disable=protected-access
from datetime import datetime
from asynctest import patch
import pytest
from homeassistant.setup import async_setup_component
import homeassistant.loader as loader
from homeassistant.const import CONF_PLATFORM, STATE_HOME, STATE_NOT_HOME
from homeassistant.components import (
device_tracker, light, device_sun_light_trigger)
from homeassistant.util import dt as dt_util
from tests.common import async_fire_time_changed
from tests.components.light import common as common_light
@pytest.fixture
def scanner(hass):
"""Initialize components."""
scanner = loader.get_component(
hass, 'device_tracker.test').get_scanner(None, None)
scanner.reset()
scanner.come_home('DEV1')
loader.get_component(hass, 'light.test').init()
with patch(
'homeassistant.components.device_tracker.load_yaml_config_file',
return_value={
'device_1': {
'hide_if_away': False,
'mac': 'DEV1',
'name': 'Unnamed Device',
'picture': 'http://example.com/dev1.jpg',
'track': True,
'vendor': None
},
'device_2': {
'hide_if_away': False,
'mac': 'DEV2',
'name': 'Unnamed Device',
'picture': 'http://example.com/dev2.jpg',
'track': True,
'vendor': None}
}):
assert hass.loop.run_until_complete(async_setup_component(
hass, device_tracker.DOMAIN, {
device_tracker.DOMAIN: {CONF_PLATFORM: 'test'}
}))
assert hass.loop.run_until_complete(async_setup_component(
hass, light.DOMAIN, {
light.DOMAIN: {CONF_PLATFORM: 'test'}
}))
return scanner
async def test_lights_on_when_sun_sets(hass, scanner):
"""Test lights go on when there is someone home and the sun sets."""
test_time = datetime(2017, 4, 5, 1, 2, 3, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
assert await async_setup_component(
hass, device_sun_light_trig | ger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
common_light.async_turn_off(hass)
await hass.async_block_till_done()
test_time = test_time.replace(hour=3)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done() |
assert light.is_on(hass)
async def test_lights_turn_off_when_everyone_leaves(hass, scanner):
"""Test lights turn off when everyone leaves the house."""
common_light.async_turn_on(hass)
await hass.async_block_till_done()
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
hass.states.async_set(device_tracker.ENTITY_ID_ALL_DEVICES,
STATE_NOT_HOME)
await hass.async_block_till_done()
assert not light.is_on(hass)
async def test_lights_turn_on_when_coming_home_after_sun_set(hass, scanner):
"""Test lights turn on when coming home after sun set."""
test_time = datetime(2017, 4, 5, 3, 2, 3, tzinfo=dt_util.UTC)
with patch('homeassistant.util.dt.utcnow', return_value=test_time):
common_light.async_turn_off(hass)
await hass.async_block_till_done()
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {
device_sun_light_trigger.DOMAIN: {}})
hass.states.async_set(
device_tracker.ENTITY_ID_FORMAT.format('device_2'), STATE_HOME)
await hass.async_block_till_done()
assert light.is_on(hass)
|
NetEaseGame/git-webhook | app/utils/DateUtil.py | Python | mit | 535 | 0 | # -*- coding: utf-8 -*-
'''
C | reated on 2015年8月24日
@author: hustcc
'''
import datetime
import time
# 当前时间,可用于mysql datetime
def now_datetime_string():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def now_datetime():
return datetime.datetime.now()
def now_date_string():
return datetime.datetime.now().strftime("%Y-%m-%d")
def now_timestamp():
return time.time()
if __name__ == '__main__':
prin | t(now_datetime())
print(now_timestamp())
print(now_date_string())
|
dcvetko/cybld | cybld/cybld_command_handler.py | Python | mit | 10,981 | 0.002823 | #!/usr/bin/python
# --------------------------------------------------------------------------
#
# MIT License
#
# --------------------------------------------------------------------------
import atexit
import logging
import os
import pty
import subprocess
import tempfile
import threading
import time
import termios
import errno
import re
from cybld import cybld_command_stats, cybld_talker, cybld_ipc_message, cybld_helpers
from cybld.cybld_config_command_group import CyBldConfigCommandGroup
from cybld.cybld_config_runner import CyBldConfigRunner
from cybld.cybld_config_settings import CyBldConfigSettings
from cybld.cybld_ipc_message import CyBldIpcMessage
from cybld.cybld_ipc_neovim import CyBldIpcNeovim
from cybld.cybld_runner import CyBldRunner
from cybld.cybld_shared_status import CyBldSharedStatus
# --------------------------------------------------------------------------
class CyBldCommandHandler:
"""
Helper class to set and execute commands.
Some notes:
- Commands are executed in a seperate thread and only
one command can be executed at any given time (busy flag)
- Commands can be changed while a command is running (not protected
by busy flag)
:param command_group: Refer to CyBldConfigCommandGroup.
:param runner_configs: All parsed runner configs.
:param settings: Refer to CyBldConfigSettings.
:param success_callback: Which function (i. e. notify success) to call
when the command returned 0.
:param fail_callback: Which function (i. e. notify fail) to call
when the command returned not 0 or if we
are busy.
"""
def __init__(self, command_group: CyBldConfigCommandGroup,
runner_configs, settings: CyBldConfigSettings,
success_callback, fail_callback):
assert success_callback is not None
assert fail_callback is not None
self.command_group = command_group
self.success_callback = success_callback
self.fail_callback = fail_callback
self.settings = settings
self.runner_configs = runner_configs
self.runners = []
self._initialize_runners_startup()
self.stats = cybld_command_stats.CyBldCommandStatsList()
self.talker = cybld_talker.CyBldTalker(settings.talk)
self.busy = False
self.shared_status = CyBldSharedStatus(False, self.command_group.name,
settings.tmux_refresh_status)
self.talker.say_hello()
atexit.register(self.talker.say_goodbye)
def _initialize_runners_startup(self):
"""
Initialize the runner configs in case a command matches a configured
runner.
"""
if self.command_group.is_cmd0_runner():
self._initialize_runner(self.command_group.cmd0)
if self.command_group.is_cmd1_runner():
self._initialize_runner(self.command_group.cmd1)
if self.command_group.is_cmd2_runner():
self._initialize_runner(self.command_group.cmd2)
def _initialize_runner(self, runner_name: str):
"""
Helper method to initialize a given runner.
Note that a runner with the given name must exist (assert).
:param runner_name: The name of the runner which should be initialized.
"""
# Runner already loaded
for runner in self.runners:
if runner.config.name == runner_name:
return
# Find the runner config with the name
valid_runner_config = None
for runner_config in self.runner_configs:
if runner_config.name == runner_name:
valid_runner_config = runner_config
break
assert isinstance(valid_runner_config, CyBldConfigRunner)
runner = CyBldRunner(valid_runner_config)
self.runners.append(runner)
def handle_incoming_ipc_message(self, ipc_message: CyBldIpcMessage):
"""
Handle the incoming message by calling exec_cmd.
Note that this quits immediately in case the codeword is invalid.
:param ipc_message: The incoming command.
"""
if not self.command_group.codeword_regex_matches(ipc_message.codeword):
return
if ipc_message.cmd_type == cybld_ipc_message.CyBldIpcMessageType.set_cmd:
self._change_cmd(ipc_message.cmd_number, ipc_message.setcmd_param)
elif ipc_message.cmd_type == cybld_ipc_message.CyBldIpcMessageType.exec_cmd:
self._exec_cmd(ipc_message.cmd_number, ipc_message.nvim_ipc)
else:
assert False
def _change_cmd(self, cmd_number: int, new_cmd: str):
"""
Change the command cmd (cmd0, cmd1, cmd2) to new_cmd
:param cmd_number: The cmd_number which should be changed
:param new_cmd: The string of the new command
"""
if cmd_number is 0:
self.command_group.cmd0 = str(new_cmd)
elif cmd_number is 1:
self.command_group.cmd1 = str(new_cmd)
elif cmd_number is 2:
self.command_group.cmd2 = str(new_cmd)
else:
assert False
if self.command_group.is_cmd_runner_command(new_cmd):
self._initialize_runner(new_cmd)
logging.info("Setting {0} to {1}".format(str(cmd_number), str(new_cmd)))
cybld_helpers.print_seperator_lines()
def _exec_cmd(self, cmd_number: int, nvim_ipc: str):
"""
Execute the given command in a new thread (if we aren't busy)
:param cmd_number: The command number which should be executed
:param nvim_ipc: The NVIM IPC name, if available
"""
# TODO: give the option to kill the existing task instead
if self.busy is True:
self.fail_callback("currently busy")
return
cmd_translated = None
if cmd_number is 0:
cmd_translated = self.command_group.cmd0
elif cmd_number is 1:
cmd_translated = self.command_group.cmd1
elif cmd_number is 2:
cmd_translated = self.command_group.cmd2
if cmd_translated is not None:
task = threading.Thread(target = self._exec_cmd_helper, args = (cmd_translated,
nvim_ipc,))
task.start()
else:
assert False
def _exec_cmd_helper(self, cmd: str, nvim_ipc: str):
| """ |
Helper function to execute the given command and call the success/fail callbacks
:param cmd: The command (full string) which should be executed
:param nvim_ipc: The NVIM IPC name, if available
"""
assert self.busy is False
self.shared_status.set_running()
self.busy = True
os.system("clear")
logging.info("Executing cmd {0}".format(cmd))
start = time.time()
success = False
if self.command_group.is_cmd_runner_command(cmd):
for runner in self.runners:
if runner.config.name == cmd:
success = runner.run_all()
break
else:
# The code block below essentially just "tees" the stdout and
# stderr to a log file, while still preserving the terminal
# output (inclusive colors).
# Using subprocess.PIPE does not seem possible under Darwin,
# since the pipe does not have the isatty flag set (the isatty
# flag affects the color output).
# Note that the file is only written at the end and not streamed.
master, slave = pty.openpty()
# This prevents LF from being converted to CRLF
attr = termios.tcgetattr(slave)
attr[1] = attr[1] & ~termios.ONLCR
termios.tcsetattr(slave, termios.TCSADRAIN, attr)
proc = subprocess.Popen(cmd, shell=True, stdout=slave, stderr=slave, close_fds=False)
# C |
apbard/scipy | scipy/_lib/_numpy_compat.py | Python | bsd-3-clause | 23,885 | 0.000837 | """Functions copypasted from newer versions of numpy.
"""
from __future__ import division, print_function, absolute_import
import warnings
import sys
from warnings import WarningMessage
import re
from functools import wraps
import numpy as np
from scipy._lib._version import NumpyVersion
def import_nose():
""" Import nose only when needed.
"""
nose_is_good = True
minimum_nose_version = (1, 0, 0)
try:
import nose
except ImportError:
nose_is_good = False
else:
if nose.__versioninfo__ < minimum_nose_version:
nose_is_good = False
if not nose_is_good:
msg = ('Need nose >= %d.%d.%d for tests - see '
'http://nose.readthedocs.io' %
minimum_nose_version)
raise ImportError(msg)
return nose
if NumpyVersion(np.__version__) > '1.7.0.dev':
_assert_warns = np.testing.assert_warns
else:
def _assert_warns(warning_class, func, *args, **kw):
r"""
Fail unless the given callable throws the specified warning.
This definition is copypasted from numpy 1.9.0.dev.
The version in earlier numpy returns None.
Parameters
----------
warning_class : class
The class defining the warning that `func` is expected to throw.
func : callable
The callable to test.
*args : Arguments
Arguments passed to `func`.
**kwargs : Kwargs
Keyword arguments passed to `func`.
Returns
-------
The value returned by `func`.
"""
with warnings.catch_warnings(record=True) as l:
warnings.simplefilter('always')
result = func(*args, **kw)
if not len(l) > 0:
raise AssertionError("No warning raised when calling %s"
% func.__name__)
if not l[0].category is warning_class:
raise AssertionError("First warning for %s is not a "
"%s( is %s)" % (func.__name__, warning_class, l[0]))
return result
def assert_raises_regex(exception_class, expected_regexp,
callable_obj=None, *args, **kwargs):
"""
Fail unless an exception of class exception_class and with message that
matches expected_regexp is thrown by callable when invoked with arguments
args and keyword arguments kwargs.
Name of this function adheres to Python 3.2+ reference, but should work in
all versions down to 2.6.
Notes
-----
.. versionadded:: 1.8.0
"""
__tracebackhide__ = True # Hide traceback for py.test
nose = import_nose()
if sys.version_info.major >= 3:
funcname = nose.tools.assert_raises_regex
else:
# Only present in Python 2.7, missing from unittest in 2.6
funcname = nose.tools.assert_raises_regexp
return funcname(exception_class, expected_regexp, callable_obj,
*args, **kwargs)
if NumpyVersion(np.__version__) >= '1.10.0':
from numpy import broadcast_to
else:
# Definition of `broadcast_to` from numpy 1.10.0.
def _maybe_view_as_subclass(original_array, new_array):
if type(original_array) is not type(new_array):
# if input was an ndarray subclass and subclasses were OK,
# then view the result as that subclass.
new_array = new_array.view(type=type(original_array))
# Since we have done something akin to a view from original_array, we
# should let the subclass finalize (if it has it implemented, i.e., is
# not None).
if new_array.__array_finalize__:
new_array.__array_finalize__(original_array)
return new_array
def _broadcast_to(array, shape, subok, readonly):
shape = tuple(shape) if np.iterable(shape) else (shape,)
array = np.array(array, copy=False, subok=subok)
if not shape and array.shape:
raise ValueError('cannot broadcast a non-scalar to a scalar array')
if any(size < 0 for size in shape):
raise ValueError('all elements of broadcast shape must be non-'
'negative')
broadcast = np.nditer(
(array,), flags=['multi_index', 'refs_ok', 'zerosize_ok'],
op_flags=['readonly'], itershape=shape, order='C').itviews[0]
result = _maybe_view_as_subclass(array, broadcast)
if not readonly and array.flags.writeable:
result.flags.writeable = True
return result
def broadcast_to(array, shape, subok=False):
return _broadcast_to(array, shape, subok=subok, readonly=True)
if NumpyVersion(np.__version__) >= '1.9.0':
from numpy import unique
else:
# the return_counts keyword was added in 1.9.0
def unique(ar, return_index=False, return_inverse=False, return_counts=False):
"""
Find the unique elements of an array.
Returns the sorted unique elements of an array. There are three optional
outputs in addition to the unique elements: the indices of the input array
that give the unique values, the indices of the unique array that
reconstruct the input array, and the number of times each unique value
comes up in the input array.
Parameters
----------
ar : array_like
Input array. This will be flattened if it is not already 1-D.
return_index : bool, optional
If True, also return the indices of `ar` that result in the unique
array.
return_inverse : bool, optional
If True, also return the indices of the unique array that can be used
to reconstruct `ar`.
return_counts : bool, optional
| If True, also return the number of times each unique value comes up
in `ar`.
.. versionadded:: 1.9.0
Returns
-------
unique : ndarray
The sorted unique value | s.
unique_indices : ndarray, optional
The indices of the first occurrences of the unique values in the
(flattened) original array. Only provided if `return_index` is True.
unique_inverse : ndarray, optional
The indices to reconstruct the (flattened) original array from the
unique array. Only provided if `return_inverse` is True.
unique_counts : ndarray, optional
The number of times each of the unique values comes up in the
original array. Only provided if `return_counts` is True.
.. versionadded:: 1.9.0
Notes
-----
Taken over from numpy 1.12.0-dev (c8408bf9c). Omitted examples,
see numpy documentation for those.
"""
ar = np.asanyarray(ar).flatten()
optional_indices = return_index or return_inverse
optional_returns = optional_indices or return_counts
if ar.size == 0:
if not optional_returns:
ret = ar
else:
ret = (ar,)
if return_index:
ret += (np.empty(0, np.bool),)
if return_inverse:
ret += (np.empty(0, np.bool),)
if return_counts:
ret += (np.empty(0, np.intp),)
return ret
if optional_indices:
perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
aux = ar[perm]
else:
ar.sort()
aux = ar
flag = np.concatenate(([True], aux[1:] != aux[:-1]))
if not optional_returns:
ret = aux[flag]
else:
ret = (aux[flag],)
if return_index:
ret += (perm[flag],)
if return_inverse:
iflag = np.cumsum(flag) - 1
inv_idx = np.empty(ar.shape, dtype=np.intp)
inv_idx[perm] = iflag
ret += (inv_idx,)
if return_counts:
idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
ret += (np.diff(idx),)
return ret
if NumpyVersion(np.__ |
OCA/l10n-italy | l10n_it_website_portal_fiscalcode/__manifest__.py | Python | agpl-3.0 | 585 | 0 | # Copyright 2019 Simone Rubino
# Copyright 2019 Lorenzo Battistini
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl).
{
"name": "ITA - Website portal fiscalcode",
"summary": "Add fiscal code to details of frontend user",
"version": "14.0.1. | 0.0",
"author": "Odoo Community Association (OCA)",
"category": "Localization/Italy",
"website": "https://github.com/OCA/l10n-italy",
"license": "AGPL-3",
"depends": ["l | 10n_it_fiscalcode", "portal"],
"data": ["views/l10n_it_website_portal_fiscalcode_templates.xml"],
"auto_install": True,
}
|
hgl888/chromium-crosswalk | third_party/mojo/src/mojo/public/tools/dart_pkg.py | Python | bsd-3-clause | 7,740 | 0.001034 | #!/usr/bin/env python
#
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Utility for dart_pkg and dart_pkg_app rules"""
import argparse
import errno
import os
import shutil
import sys
# Disable lint check for finding modules:
# pylint: disable=F0401
sys.path.insert(0, os.path.join(os.path.dirname(os.path.abspath(__file__)),
"bindings/pylib"))
from mojom.parse.parser import Parse
from mojom.parse.translate import Translate
USE_LINKS = sys.platform != "win32"
def mojom_dart_filter(path):
if os.path.isdir(path):
return True
# Don't include all .dart, just .mojom.dart.
return path.endswith('.mojom.dart')
def dart_filter(path):
if os.path.isdir(path):
return True
_, ext = os.path.splitext(path)
# .dart includes '.mojom.dart'
return ext == '.dart'
def mojom_filter(path):
if os.path.isdir(path):
return True
_, ext = os.path.splitext(path)
return ext == '.mojom'
def ensure_dir_exists(path):
if not os.path.exists(path):
os.makedirs(path)
def has_pubspec_yaml(paths):
for path in paths:
_, filename = os.path.split(path)
if 'pubspec.yaml' == filename:
return True
return False
def link(from_root, to_root):
ensure_dir_exists(os.path.dirname(to_root))
if os.path.exists(to_root):
os.unlink(to_root)
try:
os.symlink(from_root, to_root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
def copy(from_root, to_root, filter_func=None):
if not os.path.exists(from_root):
return
if os.path.isfile(from_root):
ensure_dir_exists(os.path.dirname(to_root))
shutil.copy(from_root, to_root)
return
ensure_dir_exists(to_root)
for root, dirs, files in os.walk(from_root):
# filter_func expects paths not names, so wrap it to make them absolute.
wrapped_filter = None
if filter_func:
wrapped_filter = lambda name: filter_func(os.path.join(root, name))
for name in filter(wrapped_filter, files):
from_path = os.path.join(root, name)
root_rel_path = os.path.relpath(from_path, from_root)
to_path = os.path.join(to_root, root_rel_path)
to_dir = os.path.dirname(to_path)
if not os.path.exists(to_dir):
os.makedirs(to_dir)
shutil.copy(from_path, to_path)
dirs[:] = filter(wrapped_filter, dirs)
def copy_or_link(from_root, to_root, filter_func=None):
if USE_LINKS:
link(from_root, to_root)
else:
copy(from_root, to_root, filter_func)
def list_files(from_root, filter_func=None):
file_list = []
for root, dirs, files in os.walk(from_root):
# filter_func expects paths not names, so wrap it to make them absolute.
wrapped_filter = None
if filter_func:
wrapped_filter = lambda name: filter_func(os.path.join(root, name))
for name in filter(wrapped_filter, files):
path = os.path.join(root, name)
file_list.append(path)
dirs[:] = filter(wrapped_filter, dirs)
return file_list
def remove_broken_symlink(path):
try:
link_path = os.readlink(path)
except OSError as e:
# Path was not a symlink.
if e.errno == errno.EINVAL:
pass
else:
if not os.path.exists(link_path):
os.unlink(path)
def remove_broken_symlinks(root_dir):
for current_dir, _, child_files in os.walk(root_dir):
for filename in child_files:
path = os.path.join(current_dir, filename)
remove_broken_symlink(path)
def mojom_path(filename):
with open(filename) as f:
source = f.read()
tree = Parse(source, filename)
_, name = os.path.split(filename)
mojom = Translate(tree, name)
elements = mojom['namespace'].split('.')
elements.append("%s" % mojom['name'])
return os.path.join(*elements)
def main():
parser = argparse.ArgumentParser(description='Generate a dart-pkg')
pars | er.add_argument('--package-name',
action='store',
type=str,
metavar='package_name',
help='Name of p | ackage',
required=True)
parser.add_argument('--gen-directory',
metavar='gen_directory',
help="dart-gen directory",
required=True)
parser.add_argument('--pkg-directory',
metavar='pkg_directory',
help='Directory where dart_pkg should go',
required=True)
parser.add_argument('--package-root',
metavar='package_root',
help='packages/ directory',
required=True)
parser.add_argument('--stamp-file',
metavar='stamp_file',
help='timestamp file',
required=True)
parser.add_argument('--package-sources',
metavar='package_sources',
help='Package sources',
nargs='+')
parser.add_argument('--mojom-sources',
metavar='mojom_sources',
help='.mojom and .mojom.dart sources',
nargs='*',
default=[])
parser.add_argument('--sdk-ext-directories',
metavar='sdk_ext_directories',
help='Directory containing .dart sources',
nargs='*',
default=[])
args = parser.parse_args()
# We must have a pubspec.yaml.
assert has_pubspec_yaml(args.package_sources)
# Copy or symlink package sources into pkg directory.
target_dir = os.path.join(args.pkg_directory, args.package_name)
common_source_prefix = os.path.commonprefix(args.package_sources)
for source in args.package_sources:
relative_source = os.path.relpath(source, common_source_prefix)
target = os.path.join(target_dir, relative_source)
copy_or_link(source, target)
# Copy sdk-ext sources into pkg directory
sdk_ext_dir = os.path.join(target_dir, 'sdk_ext')
for directory in args.sdk_ext_directories:
sdk_ext_sources = list_files(directory, dart_filter)
common_prefix = os.path.commonprefix(sdk_ext_sources)
for source in sdk_ext_sources:
relative_source = os.path.relpath(source, common_prefix)
target = os.path.join(sdk_ext_dir, relative_source)
copy_or_link(source, target)
lib_path = os.path.join(target_dir, "lib")
lib_mojom_path = os.path.join(lib_path, "mojom")
# Copy generated mojom.dart files.
generated_mojom_lib_path = os.path.join(args.gen_directory, "mojom/lib")
for mojom_source_path in args.mojom_sources:
path = mojom_path(mojom_source_path)
source_path = '%s.dart' % os.path.join(generated_mojom_lib_path, path)
target_path = '%s.dart' % os.path.join(lib_mojom_path, path)
copy(source_path, target_path)
# Symlink packages/
package_path = os.path.join(args.package_root, args.package_name)
link(lib_path, package_path)
# Remove any broken symlinks in target_dir and package root.
remove_broken_symlinks(target_dir)
remove_broken_symlinks(args.package_root)
# Write stamp file.
with open(args.stamp_file, 'w'):
pass
if __name__ == '__main__':
sys.exit(main())
|
spaceone/pyjs | tests/test026.py | Python | apache-2.0 | 179 | 0 | class SchoolCalendarWidget:
def setDayIncluded(self, day, included):
if (self.daysFilter[day] == included):
retu | rn
self.daysFilter[ | day] = included
|
battlecat/Spirit | HelloWorld/HelloWorld/view.py | Python | mit | 96 | 0.03125 | from django.http import | HttpResponse
def hello(request):
return HttpResponse("Hell | o world ! ") |
leapcode/bitmask-dev | src/leap/bitmask/mail/imap/mailbox.py | Python | gpl-3.0 | 30,254 | 0 | # *- coding: utf-8 -*-
# mailbox.py
# Copyright (C) 2013-2015 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
IMAP Mailbox.
"""
import re
import os
import io
import cStringIO
import StringIO
import time
from collections import defaultdict
from email.utils import formatdate
from twisted.internet import defer
from twisted.internet import reactor
from twisted.logger import Logger
from twisted.mail import imap4
from zope.interface import implements
from leap.common.check import leap_assert
from leap.common.check import leap_assert_type
from leap.bitmask.mail.constants import INBOX_NAME, MessageFlags
from leap.bitmask.mail.imap.messages import IMAPMessage
# TODO LIST
# [ ] finish the implementation of IMailboxListener
# [ ] implement the rest of ISearchableMailbox
INIT_FLAGS = (MessageFlags.RECENT_FLAG, MessageFlags.LIST_FLAG)
def make_collection_listener(mailbox):
"""
Wrap a mailbox in a class that can be hashed according to the mailbox name.
This means that dicts or sets will use this new equality rule, so we won't
collect multiple instances of the same mailbox in collections like the
MessageCollection set where we keep track of listeners.
"""
class HashableMailbox(object):
def __init__(self, mbox):
self.mbox = mbox
# See #8083, pixelated adaptor introduces conflicts in the usage
self.mailbox_name = self.mbox.mbox_name + 'IMAP'
def __hash__(self):
return hash(self.mailbox_name)
def __eq__(self, other):
return self.mailbox_name == other.mbox.mbox_name + 'IMAP'
def notify_new(self):
self.mbox.notify_new()
return HashableMailbox(mailbox)
class IMAPMailbox(object):
"""
A Soledad-backed IMAP mailbox.
Implements the high-level method needed for the Mailbox interfaces.
The low-level database methods are contained in the generic
MessageCollection class. We receive an instance of it and it is made
accessible in the `collection` attribute.
"""
implements(
imap4.IMailbox,
imap4.IMailboxInfo,
imap4.ISearchableMailbox,
# XXX I think we do not need to implement CloseableMailbox, do we?
# We could remove ourselves from the collectionListener, although I
# think it simply will be garbage collected.
# imap4.ICloseableMailbox
imap4.IMessageCopier)
init_flags = INIT_FLAGS
CMD_MSG = "MESSAGES"
CMD_RECENT = "RECENT"
CMD_UIDNEXT = "UIDNEXT"
CMD_UIDVALIDITY = "UIDVALIDITY"
CMD_UNSEEN = "UNSEEN"
log = Logger()
# TODO we should turn this into a datastructure with limited capacity
_listeners = defaultdict(set)
def __init__(self, collection, rw=1):
"""
:param collection: instance of MessageCollection
:type collection: MessageCollection
:param rw: read-and-write flag for this mailbox
:type rw: int
"""
self.rw = rw
self._uidvalidity = None
self.collection = collection
self.collection.addListener(make_collection_listener(self))
@property
def mbox_name(self):
return self.collection.mbox_name
@property
def listeners(self):
"""
Returns listeners for this mbox.
The server itself is a listener to the mailbox.
so we can notify it (and should!) after changes in flags
and number of messages.
:rtype: set
"""
return self._listeners[self.mbox_name]
def get_imap_message(self, message):
d = defer.Deferred()
IMAPMessage(message, store=self.collection.store, d=d)
return d
# FIXME this grows too crazily when many instances are fired, like
# during imaptest stress testing. Should have a queue of limited size
# instead.
def addListener(self, listener):
"""
Add a listener to the listeners queue.
The server adds itself as a listener when there is a SELECT,
so it can send EXIST commands.
:param listener: listener to add
:type listener: an object that implements IMailboxListener
"""
listeners = self.listeners
self.log.debug('Adding mailbox listener: %s. Total: %s' % (
listener, len(listeners)))
listeners.add(listener)
def removeListener(self, listener):
"""
Remove a listener from the listeners queue.
:param listener: listener to remove
:type listener: an object that implements IMailboxListener
"""
self.listeners.remove(listener)
def getFlags(self):
"""
Returns the flags defined for this mailbox.
:returns: tuple of flags for this mailbox
:rtype: tuple of str
"""
flags = self.collection.mbox_wrapper.flags
if not flags:
flags = self.init_flags
flags_str = map(str, flags)
return flags_str
def setFlags(self, flags):
"""
Sets flags for this mailbox.
:param flags: a tuple with the flags
:type flags: tuple of str
"""
# XXX this is setting (overriding) old flags.
# Better pass a mode flag
leap_assert(isinstance(flags, tuple),
"flags expected to be a tuple")
return self.collection.set_mbox_attr("flags", flags)
def getUIDValidity(self):
"""
Return the unique validity identifier for this mailbox.
:return: unique validity identifier
:rtype: int
"""
return self.collection.get_mbox_attr("created")
def getUID(self, message_number):
"""
Return the UID of a message in the mailbox
.. note:: this implementation does not make much sense RIGHT NOW,
but in the future will be useful to get absolute UIDs from
message sequence numbers.
:param message: the message sequence number.
:type message: int
| :rtype: int
:return: the UID of the message.
"""
# TODO support relative sequences. The (imap) message should
# receive a sequence number attribute: a deferred is not expected
return message_number
def getUIDNext(self):
"""
Return the likely UID for the next message added to this
| mailbox. Currently it returns the higher UID incremented by
one.
:return: deferred with int
:rtype: Deferred
"""
d = self.collection.get_uid_next()
return d
def getMessageCount(self):
"""
Returns the total count of messages in this mailbox.
:return: deferred with int
:rtype: Deferred
"""
return self.collection.count()
def getUnseenCount(self):
"""
Returns the number of messages with the 'Unseen' flag.
:return: count of messages flagged `unseen`
:rtype: int
"""
return self.collection.count_unseen()
def getRecentCount(self):
"""
Returns the number of messages with the 'Recent' flag.
:return: count of messages flagged `recent`
:rtype: int
"""
return self.collection.count_recent()
def isWriteable(self):
"""
Get the read/write status of the mailbox.
:return: 1 if mailbox is read-writeable, 0 otherwise.
:rtype: int
"""
# XXX We don't need to store it in the mbox doc, do we?
# return int(self.collection.get_mbox_attr('rw'))
return self.rw |
manassolanki/erpnext | erpnext/buying/utils.py | Python | gpl-3.0 | 3,898 | 0.025654 | # Copyright (c) 2015, Frapp | e Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, cstr, cint
from frappe import _
import json
from erpnext.stock.doctype.item.item import get_last_purchase_details
from erpnext.stock.doctype.item.item import validate_end_of_life
def update_last_purchase_rate(doc, is_submit):
"""updates last_purchase_rate in it | em table for each item"""
import frappe.utils
this_purchase_date = frappe.utils.getdate(doc.get('posting_date') or doc.get('transaction_date'))
for d in doc.get("items"):
# get last purchase details
last_purchase_details = get_last_purchase_details(d.item_code, doc.name)
# compare last purchase date and this transaction's date
last_purchase_rate = None
if last_purchase_details and \
(last_purchase_details.purchase_date > this_purchase_date):
last_purchase_rate = last_purchase_details['base_rate']
elif is_submit == 1:
# even if this transaction is the latest one, it should be submitted
# for it to be considered for latest purchase rate
if flt(d.conversion_factor):
last_purchase_rate = flt(d.base_rate) / flt(d.conversion_factor)
else:
frappe.throw(_("UOM Conversion factor is required in row {0}").format(d.idx))
# update last purchsae rate
if last_purchase_rate:
frappe.db.sql("""update `tabItem` set last_purchase_rate = %s where name = %s""",
(flt(last_purchase_rate), d.item_code))
def validate_for_items(doc):
items = []
for d in doc.get("items"):
if not d.qty:
if doc.doctype == "Purchase Receipt" and d.rejected_qty:
continue
frappe.throw(_("Please enter quantity for Item {0}").format(d.item_code))
# update with latest quantities
bin = frappe.db.sql("""select projected_qty from `tabBin` where
item_code = %s and warehouse = %s""", (d.item_code, d.warehouse), as_dict=1)
f_lst ={'projected_qty': bin and flt(bin[0]['projected_qty']) or 0, 'ordered_qty': 0, 'received_qty' : 0}
if d.doctype in ('Purchase Receipt Item', 'Purchase Invoice Item'):
f_lst.pop('received_qty')
for x in f_lst :
if d.meta.get_field(x):
d.set(x, f_lst[x])
item = frappe.db.sql("""select is_stock_item,
is_sub_contracted_item, end_of_life, disabled from `tabItem` where name=%s""",
d.item_code, as_dict=1)[0]
validate_end_of_life(d.item_code, item.end_of_life, item.disabled)
# validate stock item
if item.is_stock_item==1 and d.qty and not d.warehouse and not d.get("delivered_by_supplier"):
frappe.throw(_("Warehouse is mandatory for stock Item {0} in row {1}").format(d.item_code, d.idx))
items.append(cstr(d.item_code))
if items and len(items) != len(set(items)) and \
not cint(frappe.db.get_single_value("Buying Settings", "allow_multiple_items") or 0):
frappe.throw(_("Same item cannot be entered multiple times."))
def check_for_closed_status(doctype, docname):
status = frappe.db.get_value(doctype, docname, "status")
if status == "Closed":
frappe.throw(_("{0} {1} status is {2}").format(doctype, docname, status), frappe.InvalidStatusError)
@frappe.whitelist()
def get_linked_material_requests(items):
items = json.loads(items)
mr_list = []
for item in items:
material_request = frappe.db.sql("""SELECT distinct mr.name AS mr_name,
(mr_item.qty - mr_item.ordered_qty) AS qty,
mr_item.item_code AS item_code,
mr_item.name AS mr_item
FROM `tabMaterial Request` mr, `tabMaterial Request Item` mr_item
WHERE mr.name = mr_item.parent
AND mr_item.item_code = %(item)s
AND mr.material_request_type = 'Purchase'
AND mr.per_ordered < 99.99
AND mr.docstatus = 1
AND mr.status != 'Stopped'
ORDER BY mr_item.item_code ASC""",{"item": item}, as_dict=1)
if material_request:
mr_list.append(material_request)
return mr_list
|
takeTrace/UrHouseBot | UrHouseBot/UrHouseBot/middlewares.py | Python | mit | 7,809 | 0.001194 | # -*- coding: utf-8 -*-
# Define here the models for your spider middleware
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/spider-middleware.html
from UrHouseBot.settings import PROXY_HOST
from scrapy import signals, Request
import requests
from UrHouseBot.spiders import doubanGroup
proxy_host = PROXY_HOST
class UrhousebotSpiderMiddleware(object):
# Not all methods need to be defined. If a method is not defined,
# scrapy acts as if the spider middleware does not modify the
# passed objects.
@classmethod
def from_crawler(cls, crawler):
# This method is used by Scrapy to create your spiders.
s = cls()
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
return s
def process_spider_input(response, spider):
# Called for each response that goes through the spider
# middleware and into the spider.
# Should return None or raise an exception.
return None
def process_spider_output(response, result, spider):
# Called with the results returned from the Spider, after
# it has processed the response.
# Must return an iterable of Request, dict or Item objects.
for i in result:
yield i
def process_spider_exception(response, exception, spider):
# Called when a spider or process_spider_input() method
# (from other spider middleware) raises an exception.
# Should return either None or an iterable of Response, dict
# or Item objects.
pass
def process_start_requests(start_requests, spider):
# Called with the start requests of the spider, and works
# similarly to the process_spider_output() method, except
# that it doesn’t have a response associated.
# Must return only requests (not items).
for r in start_requests:
yield r
def spider_opened(self, spider):
spider.logger.info('Spider opened: %s' % spider.name)
# -*- coding:utf-8 -*-
import logging
import random
import time
class RandomDelayMiddleware(object):
def __init__(self, delay):
self.delay = delay
@classmethod
def from_crawler(cls, crawler):
delay = crawler.spider.settings.get("RANDOM_DELAY", 2)
if not isinstance(delay, int):
raise ValueError("RANDOM_DELAY need a int")
return cls(delay)
def process_request(self, request, spider):
# print("don't delay cause there is proxy change")
delay = random.randint(0, self.delay)
logging.debug("### random delay: %s s ###" % delay)
time.sleep(delay)
from fake_useragent import UserAgent
class RandomUserAgentMiddlware(object):
# 随机更换user-agent
def __init__(self, crawler):
super(RandomUserAgentMiddlware, self).__init__()
self.ua = UserAgent()
self.ua_type = crawler.settings.get("RANDOM_UA_TYPE", "random")
@classmethod
def from_crawler(cls, crawler):
return cls(crawler)
def process_request(self, request, spider):
def get_ua():
return getattr(self.ua, self.ua_type)
ua = get_ua()
logging.debug(f"USerAgen: {ua}")
request.headers.setdefault('User-Agent', ua)
def getSmartProxy():
res = requests.get(f'{proxy_host}:35050/api/v1/proxy/?region=中国')
if res.status_code == 200:
p = res.json()['data']['proxy']
return f'http://{p}'
else:
print(f'XXXXXXXXXXXXXXXXXXXXXXX没有拿到代理: {res.content}')
return None
def proxyFromproxy_pool():
res = requests.get(f'{proxy_host}:5010/get')
if res.status_code == 200:
p = res.json()['data']['proxy']
return f'http://{p}'
else:
logging.debug(f'XXXXXXXXXXXXXXXXXXXXXXX没有拿到代理: {res.content}')
return None
def proxyFromProxyPool():
res = requests.get(f'{proxy_host}:5555/random')
if res.status_code == 200:
p = f'http://{res.text.strip()}'
return p
else:
logging.debug(f'XXXXXXXXXXXXXXXXXXXXXXX没有拿到代理')
return None
class ProxyMiddleware(object):
count = 51
proxy = "null"
def process_request(self, request, spider):
self.useProxyPoolRepo(request)
# self.useSmartProxyPoolRepo(request)
# self.useWebSpider(request)
def useProxyPoolRepo(self, request):
proxy = proxyFromProxyPool()
fail_times = 1
while proxy and doubanGroup.redirectCount.get(proxy, 0) > 10:
# time.sleep(fail_times)
fail_times = fail_times + 1
logging.debug(
f'代理 {proxy} block 过多, {doubanGroup.redirectCount.get(proxy, 0)}, 重新获取 {fail_times} 次'
)
proxy = proxyFromProxyPool()
logging.debug(f'成功: {proxy}')
request.meta["proxy"] = proxy
logging.debug(f'获取代理: {proxy} -> {request.url}')
def useWebSpider(self, request):
p = proxyFromproxy_pool()
times = 1
while p or doubanGroup.redirectCount.get(p, 0) > 10:
time.sleep(times)
times = times + 1
logging.debug(
f'代理 {p} block 过多, {doubanGroup.redirectCount.get(p, 0)}, 重新获取 {times} 次'
)
p = proxyFromproxy_pool()
logging.debug(f'成功: {p}')
request.meta["proxy"] = p
logging.debug(f'获取代理: {p} -> {request.url}')
def useSmartProxyPoolRepo(self, request):
p = getSmartProxy()
times = 1
while p and doubanGroup.redirectCount.get(p, 0) > 10:
time.sleep(times)
times = times + 1
logging.debug(
f'代理 {p} block 过多, {doubanGroup.redirectCount.get(p, 0)}, 重新获取 {times} 次'
)
p = getSmartProxy()
logging.debug(f'成功: {p}')
request.meta["proxy"] = p
logging.debug(f'获取代理: {p} -> {request.url}')
class Redirect302Middleware(object):
def process_response(self, request, response, spider):
if response.status in [200]:
return response
elif response.status in [302, 403]:
logging.debug(
f'重定向或者403, reschedule 请求 💫💫💫💫💫💫💫💫................\n{request.url}'
)
proxy = request.meta.get('proxy', 'No proxy')
blockCount = doubanGroup.redirectCount.get(proxy, 0)
blockCount = blockCount + 1
doubanGroup.redirectCount[proxy] = blockCount
logging.debug(f'本次302/403代理: ip: {proxy}')
if blockCount > 10:
logging.info(
f'[代理失效]: 302/403 次数: {blockCount}, block 过多, 代理有毒! 🤬🤬🤬🤬🤬🤬🤬'
)
else:
logging.debug(f'[代理失效]: 302/403 次数: {blockCount}')
# meta = request.meta
# delay = meta.get('302delay', 0)
# delay = delay + 5
# meta['302delay'] = delay
return Request(request.url,
dont_filter=True,
headers=request.headers,
callback=request.callback,
meta=request.meta)
elif response.status in [404]:
return response
else:
return response
class RedirectDelayMiddleware(object):
def process_r | equest(self, reque | st, spider):
delay = request.meta.get('302delay', 0)
if delay > 0:
logging.debug(f'🌙🌙🌙🌙🌙之前302, 这次请求睡眠: {delay}: {request.url}')
time.sleep(delay)
return request
|
ossem/member_database | nfc_lib.py | Python | apache-2.0 | 1,047 | 0.010506 | import random
def append_letter_or_number():
alphabet = ['a','b','c','d','e','f']
| use_number = 0
use_letter = 1
letter_or_string = random.randrange(2)
if letter_or_string == use_number:
result = str(random.randrange(0,9))
elif letter_or_string == use_letter:
next_character = random.randrange(len(alphabet))
result = str(alphabet[next_character])
else: |
print("Uh-oh! You've got a bug. This should have selected number or letter.")
return -1
return result
# generates a random 16-byte NFC ID tag when a NFC is unavailable
def create_nfc_tag():
random_nfc_tag = ''
tag_size = 7 # number of hex pairs
end_of_tag = tag_size - 1
current_byte = 0
byte_half = 0
while current_byte < tag_size:
while byte_half != 2:
random_nfc_tag += append_letter_or_number()
byte_half += 1
if current_byte != end_of_tag:
random_nfc_tag += ':'
current_byte += 1
byte_half = 0
return random_nfc_tag
|
arthurwolf/fabrica | smoothie-upload.py | Python | gpl-3.0 | 2,305 | 0.019089 | #!/usr/bin/env python
"""\
Upload a file to Smoothie over the network
"""
from __future__ import print_function
import sys
import argparse
import socket
import os
# Define command line argument interface
parser = argparse.ArgumentParser(description='Upload a file to Smoothie over network.')
parser.add_argument('file', type=argparse.FileType('r'),
help='filename to be uploaded')
parser.add_argument('ipaddr',
help='Smoothie IP address')
parser.add_argument('-v','--verbose',action='store_true',
help='Show data being uploaded')
parser.add_argument('-o','--output',
help='Set output filename')
parser.add_argument('-q','--quiet',action='store_true',
help='suppress all output to terminal')
args = parser.parse_args()
f = args.file
verbose = args.verbose
output = args.output
if output == None :
output= args.file.name
filesize= os.path.getsize(args.file.name)
if not args.quiet : print("Uploading " + args.file.name + " to " + | args.ipaddr + " as " + output + " size: " + str(filesize) )
# make connection to sftp server
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(4.0)
s.connect((args.ipaddr, 115))
tn= s.makefile()
# read startup prompt
ln= tn.readline()
if not ln.startswith("+") :
| print("Failed to connect with sftp: " + ln)
sys.exit();
if verbose: print("RSP: " + ln.strip())
# Issue initial store command
tn.write("STOR OLD /sd/" + output + "\n")
tn.flush()
ln= tn.readline()
if not ln.startswith("+") :
print("Failed to create file: " + ln)
sys.exit();
if verbose: print("RSP: " + ln.strip())
# send size of file
tn.write("SIZE " + str(filesize) + "\n")
tn.flush()
ln= tn.readline()
if not ln.startswith("+") :
print("Failed: " + ln)
sys.exit();
if verbose: print("RSP: " + ln.strip())
cnt= 0
# now send file
for line in f:
tn.write(line)
if verbose :
print("SND: " + line.strip())
elif not args.quiet :
cnt += len(line)
print(str(cnt) + "/" + str(filesize) + "\r", end='')
tn.flush()
ln= tn.readline()
if not ln.startswith("+") :
print("Failed to save file: " + ln)
sys.exit();
if verbose: print("RSP: " + ln.strip())
# exit
tn.write("DONE\n")
tn.flush()
ln= tn.readline()
tn.close()
f.close()
if not args.quiet : print("Upload complete")
|
acsone/acsone-addons | hr_timesheet_project_access_restriction/models/__init__.py | Python | agpl-3.0 | 63 | 0 | # -*- coding: utf-8 -*-
from . | import account_analytic_ac | count
|
ianawilson/kafka-python | kafka/producer/simple.py | Python | apache-2.0 | 1,893 | 0.001585 | from __future__ import absolute_import
from itertools import cycle
import logging
import random
import six
from six.moves import xrange
from .base import Producer
log = logging.getLogger(__name__)
class SimpleProducer(Producer):
"""A simple, round-robin producer.
See Producer class for Base Arguments
Additional Arguments:
random_start (bool, optional): randomize the initial partition which
the first message block will be published to, otherwise
if false, the first message block will always publish
to partition 0 before cycling through each partition,
defaults to True.
"""
def __init__(self, *args, **kwargs):
self.partition_cycles = {}
self.random_start = kwargs.pop('random_start', True)
super(SimpleP | roducer, self)._ | _init__(*args, **kwargs)
def _next_partition(self, topic):
if topic not in self.partition_cycles:
if not self.client.has_metadata_for_topic(topic):
self.client.load_metadata_for_topics(topic)
self.partition_cycles[topic] = cycle(self.client.get_partition_ids_for_topic(topic))
# Randomize the initial partition that is returned
if self.random_start:
num_partitions = len(self.client.get_partition_ids_for_topic(topic))
for _ in xrange(random.randint(0, num_partitions-1)):
next(self.partition_cycles[topic])
return next(self.partition_cycles[topic])
def send_messages(self, topic, *msg):
if not isinstance(topic, six.binary_type):
topic = topic.encode('utf-8')
partition = self._next_partition(topic)
return super(SimpleProducer, self).send_messages(
topic, partition, *msg
)
def __repr__(self):
return '<SimpleProducer batch=%s>' % self.async
|
alexhunterlang/natural_bm | natural_bm/datasets/random.py | Python | mit | 1,299 | 0.009238 | """Tiny dataset that is just used for tests """
#%%
import numpy as np
from natural_bm.datasets.common import Dataset
#%%
def _make_random(data_type):
num_pixels = 10
dataset = {}
for dset in ['train', 'valid', 'test']:
if dset == 'train':
num_samples = 12
else:
| num_samples = 6
if data_type == 'probability':
dataset[dset+'.data'] = np.random.u | niform(size=(num_samples, num_pixels))
else:
dataset[dset+'.data'] = np.random.randint(2, size=(num_samples, num_pixels))
dataset[dset+'.lbl'] = np.random.randint(2, size=(num_samples,))
return dataset
#%%
class Random(Dataset):
def __init__(self, datatype):
super().__init__('random', datatype)
def _create_probability(self):
dataset = _make_random('probability')
# save the dataset
np.savez_compressed(self.savename, **dataset)
def _create_sampled(self):
dataset = _make_random('sampled')
# save the dataset
np.savez_compressed(self.savename, **dataset)
def _create_threshold(self):
dataset = _make_random('threshold')
# save the dataset
np.savez_compressed(self.savename, **dataset)
|
nimily/backgammon-ai | backgammon/utils.py | Python | gpl-3.0 | 878 | 0 | from collections import Counter
def is_subplay(moves, play):
play_counter = Counter()
for m in play[0]:
play_counter[m] += 1
moves_counter = Counter()
for m in moves:
moves_counter[m] += 1
for m, c in moves_counter.items():
if m not in play_counter.keys():
return False
if c > play_counter[m]:
| return False
return True
def is_legal_play(moves, plays):
"""
:param moves: candidate moves
:param plays: set of all legal plays to be considered
:return:
"""
moves = sorted(moves)
for play in plays:
match = True
if len(play[0]) != len(moves):
continue
for m1, m2 in zip(moves, sorted(play[0])):
if m1 != m2:
match = False
| break
if match:
return True
return False
|
ConsenSys/testrpc | tests/endpoints/test_personal_newAccount.py | Python | mit | 283 | 0 | def test_ | personal_newAccount(rpc_client):
initial_accounts = rpc_client('personal_listAccounts')
new_account = rpc_client('personal_newAccount', ['some-pass | word'])
assert new_account not in initial_accounts
assert new_account in rpc_client('personal_listAccounts')
|
sonyahanson/yank | Yank/tests/test_examples.py | Python | lgpl-3.0 | 1,826 | 0.011501 | #!/usr/bin/python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
Test examples.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os, os.path
import subprocess
from openmmtools import testsystems
from nose.plugins.skip import Skip, SkipTest
from nose.plugins.attrib import attr
#=============================================================================================
# UNIT TESTS
#=============================================================================================
def run_example(path, example):
# Change to example directory.
cwd = os.getcwd()
os.chdir(os.path.join(path, example))
# Execute one iteration of the example.
import subprocess
returncode = subprocess.call('NITERATIONS | =1 ./run.sh', shell=True, executable='/bin/bash')
# Restore working directory.
os.chdir(cwd)
if returncode:
raise Exception('Example %s returned exit code %d' % (example, returncode))
return
def get_immediate_subdirectories(path):
return [name for name in os.listdir(path) if os.path.isdir(os.path.join(path, name)) and os.path.exi | sts(os.path.join(path, name, 'run.sh'))]
@attr('slow') # Skip on Travis-CI
def test_examples():
# Get example path.
from pkg_resources import resource_filename
path = resource_filename('yank', '../examples')
# Get list of examples.
directories = get_immediate_subdirectories(path)
# Test examples
for directory in directories:
run_example(path, directory)
|
hpproliant/proliantutils | proliantutils/ilo/ribcl.py | Python | apache-2.0 | 17,546 | 0.005813 | # Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Provides iLO management interface. Talks to the iLO management engine
over RIBCL scripting language
"""
import re
import six
import urllib2
import xml.etree.ElementTree as etree
POWER_STATE = {
'ON': 'Yes',
'OFF': 'No',
}
BOOT_MODE_CMDS = [
'GET_CURRENT_BOOT_MODE',
'GET_PENDING_BOOT_MODE',
'GET_SUPPORTED_BOOT_MODE',
'SET_PENDING_BOOT_MODE'
]
class IloError(Exception):
"""This exception is used when a problem is encountered in
executing an operation on the iLO
"""
def __init__(self, message, errorcode=None):
super(IloError, self).__init__(message)
class IloClientInternalError(IloError):
"""This exception is raised when iLO client library fails to
communicate properly with the iLO
"""
def __init__(self, message, errorcode=None):
super(IloError, self).__init__(message)
class IloCommandNotSupportedError(IloError):
"""This exception is raised when iLO client library fails to
communicate properly with the iLO
"""
def __init__(self, message, errorcode=None):
super(IloError, self).__init__(message)
class IloLoginFailError(IloError):
"""This exception is used to communicate a login failure to
the caller.
"""
messages = ['User login name was not found',
'Login failed', 'Login credentials rejected']
statuses = [0x005f, 0x000a]
class IloConnectionError(IloError):
"""This exception is used to communicate an HTTP connection
error from the iLO to the caller.
"""
def __init__(self, message):
super(IloConnectionError, self).__init__(message)
class IloInvalidInputError(IloError):
"""This exception is used when invalid inputs are passed to
the APIs exposed by this module.
"""
def __init__(self, message):
super(IloInvalidInputError, self).__init__(message)
class IloClient:
"""iLO class for RIBCL interface for iLO.
This class provides an OO interface for retrieving information
and managing iLO. This class currently uses RIBCL scripting
language to talk to the iLO. It implements the same interface in
python as described in HP iLO 4 Scripting and Command Line Guide.
"""
def __init__(self, host, login, password, timeout=60, port=443):
self.host = host
self.login = login
self.password = password
self.timeout = timeout
self.port = port
def _request_ilo(self, root):
"""This function sends the XML request to the ILO and
receives the output from ILO.
:raises: IloConnectionError() if
unable to send the request.
"""
if self.port:
urlstr = 'https://%s:%d/ribcl' % (self.host, self.port)
else:
urlstr = 'https://%s/ribcl' % (self.host)
xml = self._serialize_xml(root)
try:
req = urllib2.Request(url=urlstr, data=xml)
req.add_header("Content-length", len(xml))
data = urllib2.urlopen(req).read()
except (ValueError, urllib2.URLError, urllib2.HTTPError) as e:
raise IloConnectionError(e)
return data
def _create_dynamic_xml(self, cmdname, tag_name, mode, subelements=None):
"""This function creates the dynamic xml required to be sent
to the ILO for all the APIs.
:param cmdname: the API which needs to be implemented.
:param tag_name: the tag info under which ILO has defined
the particular API.
:param mode: 'read' or 'write'
:param subelements: dictionary containing subelements of the
particular API tree.
:returns: the etree.Element for the root of the RIBCL XML
"""
root = etree.Element('RIBCL', VERSION="2.0")
login = etree.SubElement(
root, 'LOGIN', USER_LOGIN=self.login, PASSWORD=self.password)
tagname = etree.SubElement(login, tag_name, MODE=mode)
subelements = subelement | s or {}
etree.SubElement(tagname, cmdname)
if six.PY2:
root_iterator = root.getiterator(cmdname)
else:
r | oot_iterator = root.iter(cmdname)
for cmd in root_iterator:
for key, value in subelements.items():
cmd.set(key, value)
return root
def _serialize_xml(self, root):
"""It serializes the dynamic xml created and converts
it to a string. This is done before sending the
xml to the ILO.
:param root: root of the dynamic xml.
"""
if hasattr(etree, 'tostringlist'):
xml = '\r\n'.join(etree.tostringlist(root)) + '\r\n'
else:
xml = etree.tostring(root) + '\r\n'
return xml
def _parse_output(self, xml_response):
"""This function parses the output received from ILO.
As the output contains multiple XMLs, it extracts
one xml at a time and loops over till all the xmls
in the response are exhausted.
It returns the data to APIs either in dictionary
format or as the string.
It creates the dictionary only if the Ilo response
contains the data under the requested RIBCL command.
If the Ilo response contains only the string,
then the string is returned back.
"""
count = 0
xml_dict = {}
resp_message = None
xml_start_pos = []
for m in re.finditer(r"\<\?xml", xml_response):
xml_start_pos.append(m.start())
while count < len(xml_start_pos):
if (count == len(xml_start_pos) - 1):
result = xml_response[xml_start_pos[count]:]
else:
result = \
xml_response[xml_start_pos[count]:
xml_start_pos[count + 1]]
result = result.strip()
message = etree.fromstring(result)
resp = self._validate_message(message)
if hasattr(resp, 'tag'):
xml_dict = self._elementtree_to_dict(resp)
elif resp is not None:
resp_message = resp
count = count + 1
if xml_dict:
return xml_dict
elif resp_message is not None:
return resp_message
def _elementtree_to_dict(self, element):
"""Converts the actual response from the ILO for an API
to the dictionary.
"""
node = dict()
text = getattr(element, 'text')
if text is not None:
text = text.strip()
if len(text) != 0:
node['text'] = text
node.update(element.items()) # element's attributes
child_nodes = {}
for child in element: # element's children
child_nodes.setdefault(child.tag, []).append(
self._elementtree_to_dict(child))
# convert all single-element lists into non-lists
for key, value in child_nodes.items():
if len(value) == 1:
child_nodes[key] = value[0]
node.update(child_nodes.items())
return node
def _validate_message(self, message):
"""This function validates the XML response to see
if the exit status is 0 or not in the response.
If the status is non-zero it raises exception.
"""
if message.tag != 'RIBCL':
# the true case shall be unreachable for response
# XML from Ilo as all messages are tagged with RIBCL
# but still raise an exception if any invalid
|
tzpBingo/github-trending | codespace/python/tencentcloud/yunjing/v20180228/yunjing_client.py | Python | mit | 122,172 | 0.002542 | # -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from tencentcloud.common.exception.tencent_cloud_sdk_exception import TencentCloudSDKException
from tencentcloud.common.abstract_client import AbstractClient
from tencentcloud.yunjing.v20180228 import models
class YunjingClient(AbstractClient):
_apiVersion = '2018-02-28'
_endpoint = 'yunjing.tencentcloudapi.com'
_service = 'yunjing'
def AddLoginWhiteList(self, request):
"""本接口(AddLoginWhiteList)用于添加白名单规则
:param request: Request instance for AddLoginWhiteList.
:type request: :class:`tencentcloud.yunjing.v20180228.models.AddLoginWhiteListRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.AddLoginWhiteListResponse`
"""
try:
params = request._serialize()
body = self.call("AddLoginWhiteList", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddLoginWhiteListResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def AddMachineTag(self, request):
"""增加机器关联标签
:param request: Request instance for AddMachineTag.
:type request: :class:`tencentcloud.yunjing.v20180228.models.AddMachineTagRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.AddMachineTagResponse`
"""
try:
params = request._serialize()
body = self.call("AddMachineTag", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.AddMachineTagResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CloseProVersion(self, request):
"""本接口 (CloseProVersion) 用于关闭专业版。
:param request: Request instance for CloseProVersion.
:type request: :class:`tencentcloud.yunjing.v20180228.models.CloseProVersionRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.CloseProVersionResponse`
"""
try:
params = request._serialize()
body = self.call("CloseProVersion", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CloseProVersionResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"] | ["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
| else:
raise TencentCloudSDKException(e.message, e.message)
def CreateBaselineStrategy(self, request):
"""根据策略信息创建基线策略
:param request: Request instance for CreateBaselineStrategy.
:type request: :class:`tencentcloud.yunjing.v20180228.models.CreateBaselineStrategyRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.CreateBaselineStrategyResponse`
"""
try:
params = request._serialize()
body = self.call("CreateBaselineStrategy", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateBaselineStrategyResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateOpenPortTask(self, request):
"""本接口 (CreateOpenPortTask) 用于创建实时获取端口任务。
:param request: Request instance for CreateOpenPortTask.
:type request: :class:`tencentcloud.yunjing.v20180228.models.CreateOpenPortTaskRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.CreateOpenPortTaskResponse`
"""
try:
params = request._serialize()
body = self.call("CreateOpenPortTask", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateOpenPortTaskResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateProcessTask(self, request):
"""本接口 (CreateProcessTask) 用于创建实时拉取进程任务。
:param request: Request instance for CreateProcessTask.
:type request: :class:`tencentcloud.yunjing.v20180228.models.CreateProcessTaskRequest`
:rtype: :class:`tencentcloud.yunjing.v20180228.models.CreateProcessTaskResponse`
"""
try:
params = request._serialize()
body = self.call("CreateProcessTask", params)
response = json.loads(body)
if "Error" not in response["Response"]:
model = models.CreateProcessTaskResponse()
model._deserialize(response["Response"])
return model
else:
code = response["Response"]["Error"]["Code"]
message = response["Response"]["Error"]["Message"]
reqid = response["Response"]["RequestId"]
raise TencentCloudSDKException(code, message, reqid)
except Exception as e:
if isinstance(e, TencentCloudSDKException):
raise
else:
raise TencentCloudSDKException(e.message, e.message)
def CreateUsualLoginPlaces(self, request):
"""此接口(CreateUsualLoginPlaces)用于添加常用登录地。
:param request: Request instance for CreateUsualLoginPlaces.
:type reques |
ivanprjcts/sdklib | tests/test_session.py | Python | bsd-2-clause | 1,479 | 0.000676 | import unittest
from sdklib.compat import cookies
from sdklib.http.session import Cookie
class TestSession(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.cookie = Cookie({"Set-Cookie": "chips=ahoy; vienna=finger"})
def test_as_cookie_header_value(self):
res = self.cookie.as_cookie_header_value()
self.assertIn("chips=ahoy", res)
self.assertIn("vienna=finger", res)
def test_as_cookie_header_value_tuples(self):
cookie = Cookie([("Set-Cookie", "chips=ahoy; httpOnly"), ("Set-Cookie", "vienna=finger")])
res = cookie.as_cookie_header_value()
self.assertIn("chips=ahoy", res)
self.assertIn("vienna=finger", res)
def test_as_cookie_header_value_none(self):
cookie = Cookie(None)
| res = cookie.as_cookie_header_value()
self.assertEqual(res, "")
def test_get_cookie_morsel(self):
res = self.cookie.get("chips")
self.assertEqual(res.value, "ahoy")
def test_get_cookie(self):
res = self.cookie.getcookie()
self.assertTrue(isinstance(res, cookies.SimpleCookie))
def test_update_cookie(self):
c = Cookie({"Set-Cookie": "new_param=marcos; another_new=ivan"})
c.up | date(self.cookie)
res = c.as_cookie_header_value()
self.assertIn("chips=ahoy", res)
self.assertIn("vienna=finger", res)
self.assertIn("new_param=marcos", res)
self.assertIn("another_new=ivan", res)
|
schleichdi2/OPENNFR-6.0-CORE | opennfr-meta-openembedded/contrib/oe-stylize.py | Python | gpl-2.0 | 11,631 | 0.006534 | #!/usr/bin/env python
"""\
Sanitize a bitbake file following the OpenEmbedded style guidelines,
see http://openembedded.org/wiki/StyleGuide
(C) 2006 Cyril Romain <cyril.romain@gmail.com>
MIT license
TODO:
- add the others OpenEmbedded variables commonly used:
- parse command arguments and print usage on misuse
. prevent giving more than one .bb file in arguments
- write result to a file
- backup the original .bb file
- make a diff and ask confirmation for patching ?
- do not use startswith only: |
/!\ startswith('SOMETHING') is not taken into account due to the previous startswith('S').
- count rule breaks and displays them in the order frequence
"""
from __future__ import print_function
import fileinput
import string
import re
__author__ = "Cyril Romain <cyril.romain@gmail.com>"
__version__ = | "$Revision: 0.5 $"
# The standard set of variables often found in .bb files in the preferred order
OE_vars = [
'SUMMARY',
'DESCRIPTION',
'AUTHOR',
'HOMEPAGE',
'SECTION',
'LICENSE',
'LIC_FILES_CHKSUM',
'DEPENDS',
'PROVIDES',
'SRCREV',
'SRCDATE',
'PE',
'PV',
'PR',
'INC_PR',
'SRC_URI',
'S',
'GPE_TARBALL_SUFFIX',
'inherit',
'EXTRA_',
'export',
'do_fetch',
'do_unpack',
'do_patch',
'WORKDIR',
'acpaths',
'do_configure',
'do_compile',
'do_install',
'PACKAGES',
'PACKAGE_ARCH',
'RDEPENDS',
'RRECOMMENDS',
'RSUGGESTS',
'RPROVIDES',
'RCONFLICTS',
'FILES',
'do_package',
'do_stage',
'addhandler',
'addtask',
'bindir',
'headers',
'include',
'includedir',
'python',
'qtopiadir',
'pkg_preins',
'pkg_prerm',
'pkg_postins',
'pkg_postrm',
'require',
'sbindir',
'basesysconfdir',
'sysconfdir',
'ALLOW_EMPTY',
'ALTERNATIVE_NAME',
'ALTERNATIVE_PATH',
'ALTERNATIVE_LINK',
'ALTERNATIVE_PRIORITY',
'ALTNAME',
'AMD_DRIVER_LABEL',
'AMD_DRIVER_VERSION',
'ANGSTROM_EXTRA_INSTALL',
'APPDESKTOP',
'APPIMAGE',
'APPNAME',
'APPTYPE',
'APPWEB_BUILD',
'APPWEB_HOST',
'AR',
'ARCH',
'ARM_INSTRUCTION_SET',
'MIPS_INSTRUCTION_SET',
'ARM_MUTEX',
'ART_CONFIG',
'B',
'BJAM_OPTS',
'BJAM_TOOLS',
'BONOBO_HEADERS',
'BOOTSCRIPTS',
'BROKEN',
'BUILD_CPPFLAGS',
'CFLAGS',
'CCFLAGS',
'CMDLINE',
'COLLIE_MEMORY_SIZE',
'COMPATIBLE_HOST',
'COMPATIBLE_MACHINE',
'COMPILE_HERMES',
'CONFFILES',
'CONFLICTS',
'CORE_EXTRA_D',
'CORE_IMAGE_EXTRA_INSTALL',
'CORE_PACKAGES_D',
'CORE_PACKAGES_RD',
'CPPFLAGS',
'CVSDATE',
'CXXFLAGS',
'DEBIAN_NOAUTONAME',
'DEBUG_APPS',
'DEFAULT_PREFERENCE',
'DB4_CONFIG',
'EXCLUDE_FROM_SHLIBS',
'EXCLUDE_FROM_WORLD',
'FIXEDSRCDATE',
'GLIBC_ADDONS',
'GLIBC_EXTRA_OECONF',
'GNOME_VFS_HEADERS',
'HEADERS',
'INHIBIT_DEFAULT_DEPS',
'INITSCRIPT_PACKAGES',
'INITSCRIPT_NAME',
'INITSCRIPT_PARAMS',
'INSANE_SKIP',
'PACKAGE_INSTALL',
'KERNEL_IMAGETYPE',
'KERNEL_IMAGEDEST',
'KERNEL_OUTPUT',
'KERNEL_RELEASE',
'KERNEL_PRIORITY',
'KERNEL_SOURCE',
'KERNEL_SUFFIX',
'KERNEL_VERSION',
'K_MAJOR',
'K_MICRO',
'K_MINOR',
'HHV',
'KV',
'LDFLAGS',
'LD',
'LD_SO',
'LDLIBS',
'LEAD_SONAME',
'LIBTOOL',
'LIBBDB_EXTRA',
'LIBV',
'MACHINE_ESSENTIAL_EXTRA_RDEPENDS',
'MACHINE_ESSENTIAL_EXTRA_RRECOMMENDS',
'MACHINE_EXTRA_RDEPENDS',
'MACHINE_EXTRA_RRECOMMENDS',
'MACHINE_FEATURES',
'MACHINE_TASKS',
'MACHINE',
'MACHTYPE',
'MAKE_TARGETS',
'MESSAGEUSER',
'MESSAGEHOME',
'MIRRORS',
'MUTEX',
'OE_QMAKE_INCDIR_QT',
'OE_QMAKE_CXXFLAGS',
'ORBIT_IDL_SRC',
'PARALLEL_MAKE',
'PAKCAGE_ARCH',
'PCMCIA_MANAGER',
'PKG_BASENAME',
'PKG',
'QEMU',
'QMAKE_PROFILES',
'QPEDIR',
'QPF_DESCRIPTION',
'QPF_PKGPATTERN',
'QT_CONFIG_FLAGS',
'QT_LIBRARY',
'ROOTFS_POSTPROCESS_COMMAND',
'RREPLACES',
'TARGET_CFLAGS',
'TARGET_CPPFLAGS',
'TARGET_LDFLAGS',
'UBOOT_MACHINE',
'UCLIBC_BASE',
'UCLIBC_PATCHES',
'USERADD_PACKAGES',
'USERADD_PARAM',
'VIRTUAL_NAME',
'XORG_PN',
'XSERVER',
'others'
]
varRegexp = r'^([a-zA-Z_0-9${}-]*)([ \t]*)([+.:]?=[+.]?)([ \t]*)([^\t]+)'
routineRegexp = r'^([a-zA-Z0-9_ ${}-]+?)\('
# Variables seen in the processed .bb
seen_vars = {}
for v in OE_vars:
seen_vars[v] = []
# _Format guideline #0_:
# No spaces are allowed at the beginning of lines that define a variable or
# a do_ routine
def respect_rule0(line):
return line.lstrip()==line
def conformTo_rule0(line):
return line.lstrip()
# _Format guideline #1_:
# No spaces are allowed behind the line continuation symbol '\'
def respect_rule1(line):
if line.rstrip().endswith('\\'):
return line.endswith('\\')
else:
return True
def conformTo_rule1(line):
return line.rstrip()
# _Format guideline #2_:
# Tabs should not be used (use spaces instead).
def respect_rule2(line):
return line.count('\t')==0
def conformTo_rule2(line):
return line.expandtabs()
# _Format guideline #3_:
# Comments inside bb files are allowed using the '#' character at the
# beginning of a line.
def respect_rule3(line):
if line.lstrip().startswith('#'):
return line.startswith('#')
else:
return True
def conformTo_rule3(line):
return line.lstrip()
# _Format guideline #4_:
# Use quotes on the right hand side of assignments FOO = "BAR"
def respect_rule4(line):
r = re.search(varRegexp, line)
if r is not None:
r2 = re.search(r'("?)([^"\\]*)(["\\]?)', r.group(5))
# do not test for None it because always match
return r2.group(1)=='"' and r2.group(3)!=''
return False
def conformTo_rule4(line):
r = re.search(varRegexp, line)
return ''.join([r.group(1), ' ', r.group(3), ' "', r.group(5), r.group(5).endswith('"') and '' or '"'])
# _Format guideline #5_:
# The correct spacing for a variable is FOO = "BAR".
def respect_rule5(line):
r = re.search(varRegexp, line)
return r is not None and r.group(2)==" " and r.group(4)==" "
def conformTo_rule5(line):
r = re.search(varRegexp, line)
return ''.join([r.group(1), ' ', r.group(3), ' ', r.group(5)])
# _Format guideline #6_:
# Don't use spaces or tabs on empty lines
def respect_rule6(line):
return not line.isspace() or line=="\n"
def conformTo_rule6(line):
return ""
# _Format guideline #7_:
# Indentation of multiline variables such as SRC_URI is desireable.
def respect_rule7(line):
return True
def conformTo_rule7(line):
return line
rules = (
(respect_rule0, conformTo_rule0, "No spaces are allowed at the beginning of lines that define a variable or a do_ routine"),
(respect_rule1, conformTo_rule1, "No spaces are allowed behind the line continuation symbol '\\'"),
(respect_rule2, conformTo_rule2, "Tabs should not be used (use spaces instead)"),
(respect_rule3, conformTo_rule3, "Comments inside bb files are allowed using the '#' character at the beginning of a line"),
(respect_rule4, conformTo_rule4, "Use quotes on the right hand side of assignments FOO = \"BAR\""),
(respect_rule5, conformTo_rule5, "The correct spacing for a variable is FOO = \"BAR\""),
(respect_rule6, conformTo_rule6, "Don't use spaces or tabs on empty lines"),
(respect_rule7, conformTo_rule7, "Indentation of multiline variables such as SRC_URI is desireable"),
)
# Function to check that a line respects a rule. If not, it tries to conform
# the line to the rule. Reminder or Disgression message are dump accordingly.
def follow_rule(i, line):
oldline = line
# if the line does not respect the rule
if not rules[i][0](line):
# try to conform it to the rule
line = rules[i][1](line)
# if the line still does not respect the rule
if not rules[i][0](line):
# this is a rule disgression
print ("## Disgression: ", rules[i][2], " in: '", oldlin |
Alberdi/marcestats | bgplays/services.py | Python | gpl-3.0 | 3,592 | 0.003341 | from .models import *
from django.db.models import Count, F, Max, Sum
# Game info services
def get_median_points(game_id):
return median_value(Team.objects.filter(play__game__id=game_id) \
.exclude(play__team__points__isnull=True),
'play__team__points')
def get_play_count(game_id):
return Play.objects.filter(game__id=game_id).count()
def get_game_list():
games = Game.objects.all() \
.annotate(plays=Count('play__id')) \
.annotate(last_played=Max('play__date')) \
.order_by('-plays')
return games
def get_game_players(game_id):
play_players = Play.objects.filter(game__id=game_id) \
.annotate(name=F('team__players__name')) \
.annotate(winner=F('team__winner')) \
.annotate(pid=F('team__players__id')) \
.values('id', 'name', 'pid', 'winner').distinct()
return Player.objects.raw(
'''SELECT pid, name, COUNT(DISTINCT id) AS 'count', SUM(winner) AS 'wins'
FROM ( %s )
GROUP BY pid ORDER BY COUNT(id) DESC, SUM(winner) DESC''' % str(play_players.query),
translations={'pid': 'id'})
def get_faction_plays(game_id):
return Faction.objects.filter(game__id=game_id) \
.values('name') \
.annotate(wins=Sum('team__winner')) \
.annotate(count=Count('name')) \
.order_by('-count', '-wins')
# Player info services
def get_player_games(player_id):
plays_ids = get_play_ids(player_id)
return Game.objects.filter(play__id__in=plays_ids) \
.extra(select={
'wins': 'SELECT COUNT(DISTINCT bgplays_team.play_id) FROM bgplays_team '
'INNER JOIN bgplays_team_players ON bgplays_team.id = bgplays_team_players.team_id '
'INNER JOIN bgplays_play ON bgplays_play.id = bgplays_team.play_id '
'WHERE winner = 1 '
'AND bgplays_play.game_id = bgplays_game.id '
'AND bgplays_team_players.player_id = % s' % player_id}, ) \
.values('name', 'id', 'wins') \
.annotate(count=Count('name')) \
.order_by('-count')
def get_player_list():
# The plays calculation takes a lot.
# TODO: We should find another way to fetch them.
players = Player.objects.all() \
.annotate(last_played=Max('team__play__date')) \
.extra(select={
'plays': 'SELECT COUNT(DISTINCT bgplays_team.play_id) FROM bgplays_team '
'INNER JOIN bgplays_team_players ON bgplays_team.id = bgplays_team_players.team_id '
'WHERE bgplays_team_players.player_id = bgplays_player.id'}, ) \
.order_by('-plays', '-last_played')
return players
def get_player_mates(player_id):
# XXX: That distin | ct() is probably not working as expected
# But there are not counterexamples in the current data set
plays_ids = get_play_ids(player_id)
return Player.objects.filter(team__play__id__in=plays_ids) \
.exclude(id=player_id) \
.values('name', 'team__play__id') \
.distinct() \
.values('name') \
.annotate(count=Count('name')) \
.order_by('-count')
# Helper methods
def get_play_ids(player_id):
return Play.obje | cts.filter(team__players__id=player_id).values('id').distinct()
def median_value(queryset, term):
count = queryset.count()
values = queryset.values_list(term, flat=True).order_by(term)
if count % 2 == 1:
return values[int(round(count / 2))]
elif count > 0:
return sum(values[count / 2 - 1:count / 2 + 1]) / 2.0
else:
return 0
|
geokrety/geokrety-api | tests/unittests/api/news/test_news_collection.py | Python | gpl-3.0 | 933 | 0 | # -*- coding: utf-8 -*-
from parameteri | zed import parameterized
from tests.unittests.utils.base_test_case import BaseTestCase, request_context
from tests.unittests.utils.payload.news import NewsPayload
class TestNewsCollection(BaseTestCase):
"""Test News collection"""
@parameterized.expand([
[None, 200],
['admin', 200],
['user_1', 200], # Owner
['user_2', | 200],
])
@request_context
def test_news_collection_can_be_accessed_as(self, username, expected):
news = self.blend_news(author=self.user_1, count=3)
user = getattr(self, username) if username else None
response = NewsPayload()\
.get_collection(user=user, code=expected)\
.assertCount(3)
response.data[0].assertHasPublicAttributes(news[0])
response.data[1].assertHasPublicAttributes(news[1])
response.data[2].assertHasPublicAttributes(news[2])
|
tivek/conan | conans/test/model/manifest_test.py | Python | mit | 2,065 | 0.002421 | import unittest
from conans.util.files import save, load, md5
import os
from conans.model.manifest import FileTreeManifest
from conans.test.utils.test_files import temp_folder
class ManifestTest(unittest.TestCase):
def test_tree_manifest(self):
tmp_dir = temp_folder()
files = {"one.ext": "aalakjshdlkjahsdlkjahsdljkhsadljkhasljkdhlkjashd",
"path/to/two.txt": "asdas13123",
"two.txt": "asdasdasdasdasdasd",
"folder/damn.pyc": "binarythings",
"folder/damn.pyo": "binarythings2",
"pythonfile.pyc": "binarythings3"}
for filename, | content in files.items():
save(os.path.join(tmp_dir, filename), content)
manifest = FileTreeManifest.create(tmp_dir)
save(os.path.join(tmp_dir, "THEMANIFEST.txt"), st | r(manifest))
readed_manifest = FileTreeManifest.loads(load(os.path.join(tmp_dir, "THEMANIFEST.txt")))
self.assertEqual(readed_manifest.time, manifest.time)
self.assertEqual(readed_manifest, manifest)
# Not included the pycs or pyo
self.assertEquals(set(manifest.file_sums.keys()),
set(["one.ext", "path/to/two.txt", "two.txt"]))
for filepath, md5readed in manifest.file_sums.items():
content = files[filepath]
self.assertEquals(md5(content), md5readed)
def already_pyc_in_manifest_test(self):
tmp_dir = temp_folder()
save(os.path.join(tmp_dir, "man.txt"), "1478122267\nconanfile.pyc: "
"2bcac725a0e6843ef351f4d18cf867ec\n"
"conanfile.py: 2bcac725a0e6843ef351f4d18cf867ec",
"conanfile.pyo: 2bcac725a0e6843ef351f4d18cf867ec")
read_manifest = FileTreeManifest.loads(load(os.path.join(tmp_dir, "man.txt")))
# Not included the pycs or pyo
self.assertEquals(set(read_manifest.file_sums.keys()),
set(["conanfile.py"]))
|
ahwillia/tensortools | tensortools/optimize/ncp_hals.py | Python | mit | 6,382 | 0.00094 | """
Nonnegative CP decomposition by Hierarchical alternating least squares (HALS).
Author: N. Benjamin Erichson <erichson@uw.edu>
"""
import numpy as np
import numba
from tensortools.operations import unfold, khatri_rao
from tensortools.tensors import KTensor
from tensortools.optimize import FitResult, optim_utils
def ncp_hals(
X, rank, mask=None, random_state=None, init='rand',
skip_modes=[], negative_modes=[], **options):
"""
Fits nonnegtaive CP Decomposition using the Hierarcial Alternating Least
Squares (HALS) Method.
Parameters
----------
X : (I_1, ..., I_N) array_like
A real array with nonnegative entries and ``X.ndim >= 3``.
rank : integer
The `rank` sets the number of components to be computed.
mask : (I_1, ..., I_N) array_like
Binary tensor, same shape as X, specifying censored or missing data values
at locations where (mask == 0) and observed data where (mask == 1).
random_state : integer, RandomState instance or None, optional (default ``None``)
If integer, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used by np.random.
init : str, or KTensor, optional (default ``'rand'``).
Specifies initial guess for KTensor factor matrices.
If ``'randn'``, Gaussian random numbers are used to initialize.
If ``'rand'``, uniform random numbers are used to initialize.
If KTensor instance, a copy is made to initialize the optimization.
skip_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor that are not fit. This can be
used to fix certain factor matrices that have been previously
fit.
negative_modes : iterable, optional (default ``[]``).
Specifies modes of the tensor whose factors are not constrained
to be nonnegative.
options : dict, specifying fitting options.
tol : float, optional (default ``tol=1E-5``)
Stopping tolerance for reconstruction error.
max_iter : integer, optional (default ``max_iter = 500``)
Maximum number of iterations to perform before exiting.
min_iter : integer, optional (default ``min_iter = 1``)
Minimum number of iterations to perform before exiting.
max_time : integer, optional (default ``max_time = np.inf``)
Maximum computational time before exiting.
verbose : bool ``{'True', 'False'}``, optional (default ``verbose=True``)
Display progress.
Returns
-------
result : FitResult instance
Object which holds the fitted results. It provides the factor matrices
in form of a KTens | or, ``result.factors``.
Notes
-----
This implemenation is using the Hierarcial Alternating Least Squares Method.
References
----------
Cichocki, Andrzej, and P. H. A. N. Anh-Huy. "Fast local algorithms for
large scale nonnegative matrix and tensor factorizations."
IEICE transactions on fundamentals of electronics, communications and
computer sciences 92.3: 708-721, 2009.
Examples
--------
"""
# Mask mi | ssing elements.
if mask is not None:
X = np.copy(X)
X[~mask] = np.mean(X[mask])
# Check inputs.
optim_utils._check_cpd_inputs(X, rank)
# Initialize problem.
U, normX = optim_utils._get_initial_ktensor(init, X, rank, random_state)
result = FitResult(U, 'NCP_HALS', **options)
# Store problem dimensions.
normX = np.linalg.norm(X)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Iterate the HALS algorithm until convergence or maxiter is reached
# i) compute the N gram matrices and multiply
# ii) Compute Khatri-Rao product
# iii) Update component U_1, U_2, ... U_N
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
while result.still_optimizing:
for n in range(X.ndim):
# Skip modes that are specified as fixed.
if n in skip_modes:
continue
# Select all components, but U_n
components = [U[j] for j in range(X.ndim) if j != n]
# i) compute the N-1 gram matrices
grams = np.prod([arr.T @ arr for arr in components], axis=0)
# ii) Compute Khatri-Rao product
kr = khatri_rao(components)
Xmkr = unfold(X, n).dot(kr)
# iii) Update component U_n
_hals_update(U[n], grams, Xmkr, n not in negative_modes)
# iv) Update masked elements.
if mask is not None:
pred = U.full()
X[~mask] = pred[~mask]
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Update the optimization result, checks for convergence.
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
if mask is None:
# Determine mode that was fit last.
n = np.setdiff1d(np.arange(X.ndim), skip_modes).max()
# Add contribution of last fit factors to gram matrix.
grams *= U[n].T @ U[n]
residsq = np.sum(grams) - 2 * np.sum(U[n] * Xmkr) + (normX ** 2)
result.update(np.sqrt(residsq) / normX)
else:
result.update(np.linalg.norm(X - pred) / normX)
# end optimization loop, return result.
return result.finalize()
@numba.jit(nopython=True)
def _hals_update(factors, grams, Xmkr, nonneg):
dim = factors.shape[0]
rank = factors.shape[1]
indices = np.arange(rank)
# Handle special case of rank-1 model.
if rank == 1:
if nonneg:
factors[:] = np.maximum(0.0, Xmkr / grams[0, 0])
else:
factors[:] = Xmkr / grams[0, 0]
# Do a few inner iterations.
else:
for itr in range(3):
for p in range(rank):
idx = (indices != p)
Cp = factors[:, idx] @ grams[idx][:, p]
r = (Xmkr[:, p] - Cp) / np.maximum(grams[p, p], 1e-6)
if nonneg:
factors[:, p] = np.maximum(r, 0.0)
else:
factors[:, p] = r
|
danstoner/python_experiments | playing_with_kivy/kivi-examples/demo/touchtracer/main.py | Python | gpl-2.0 | 2,938 | 0.002383 | import kivy
kivy.require('1.0.6')
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.label import Label
from kivy.graphics import Color, Rectangle, Point, GraphicException
from random import random
from math import sqrt
def calculate_points(x1, y1, x2, y2, steps=5):
dx = x2 - x1
dy = y2 - y1
dist = sqrt(dx * dx + dy * dy)
if dist < steps:
return None
o = []
m = dist / steps
for i in xrange(1, int(m)):
mi = i / m
lastx = x1 + dx * mi
lasty = y1 + dy * mi
o.extend([lastx, lasty])
return o
class Touchtracer(FloatLayout):
def on_touch_down(self, touch):
win = self.get_parent_window()
ud = touch.ud
ud['group'] = g = str(touch.uid)
with self.canvas:
ud['color'] = Color(random(), 1, 1, mode='hsv', group=g)
ud['lines'] = (
Rectangle(pos=(touch.x, 0), size=(1, win.height), group=g),
Rectangle(pos=(0, touch.y), size=(win.width, 1), group=g),
Point(points=(touch.x, touch.y), source='particle.png',
pointsize=5, group=g))
ud['label'] = Label(size_hint=(None, None))
self.update_touch_label(ud['label'], touch)
self.add_widget(ud['label'])
touch.grab(self)
return True
def on_touch_move(self, touch):
if touch.grab_current is not self:
return
ud = touch.ud
ud['lines'][0].pos = touch.x, 0
ud['lines'][1].pos = 0, touch.y
points = ud['lines'][2].points
oldx, oldy = points[-2], points[-1]
points = calculate_points(oldx, oldy, touch.x, touch.y)
if points:
try:
lp = ud['lines'][2].add_point
for idx in xrange(0, len(points), 2):
lp(points[idx], points[idx+1])
except GraphicException:
pass
ud['label'].pos = touch.pos
import time
t = int(time.time())
if t not in ud:
ud[t] = 1
else:
ud[t] += 1
self.update_touch_label(ud['label'], touch)
def on_touch_up(self, touch):
if touch.grab_current is not self:
return
touch.ungrab(self)
ud = touch.ud
self.canvas.remove_group(ud['group'])
self.remove_widget(ud['label'])
def update_touch_label(self, label, touch):
label.text = 'ID: %s\nPos: (%d, %d)\nClass: %s' % (
touch.id, touch.x, touch.y, touch.__class__.__name__)
label.texture_update()
label.pos = touch.pos
label.size = label.texture_size[0] + 20, label.texture_size[1] + 20
class TouchtracerApp(App):
title = 'Touchtracer'
icon = 'icon.png'
def build(self):
| return Touchtracer()
def on_pause(self):
| return True
if __name__ == '__main__':
TouchtracerApp().run()
|
socialplanning/opencore | opencore/project/browser/add.py | Python | gpl-3.0 | 7,853 | 0.003566 | """
project and subproject adding
# @@ needs tests
"""
from Acquisition import aq_inner
from Products.CMFCore.utils import getToolByName
from Products.Five.browser.pagetemplatefile import ZopeTwoPageTemplateFile
from opencore.browser.formhandler import OctopoLite, action
from opencore.i18n import _
from opencore.interfaces import IHomePage
from opencore.interfaces.event import AfterProjectAddedEvent
from opencore.browser.naming import get_view_names
from opencore.project.browser.base import ProjectBaseView
from topp.featurelets.interfaces import IFeatureletSupporter, IFeaturelet
from topp.utils.text import valid_title, valid_id, strip_extra_whitespace
from zope import event
from zope.component import getAdapters, getMultiAdapter
from zope.interface import implements
import logging
log = logging.getLogger('opencore.project.browser.add')
class ProjectAddView(ProjectBaseView, OctopoLite):
template = ZopeTwoPageTemplateFile('create.pt')
def reserved_names(self):
return list(get_view_names(self.context)) + ['people', 'projects', 'unique', 'summary', 'pending']
@action('validate')
def validate(self, target=None, fields=None):
putils = getToolByName(self.context, 'plone_utils')
errors = {}
id_ = self.request.form.get('projid')
id_ = putils.normalizeString(id_)
if (self.context.has_key(id_)
or id_ in self.reserved_names()):
errors['oc-id-error'] = {
'html': 'The requested url is already taken.',
'action': 'copy',
'effects': 'highlight'
}
else:
errors['oc-id-error'] = {
'html': '',
'action': 'copy',
'effects': ''
}
return errors
def check_logo(self, project, logo):
try:
project.setLogo(logo)
except ValueError: # must have tried to upload an unsupported filetype
self.addPortalStatusMessage(' | Please choose an image in gif, jpeg, png, or bmp format.')
return False
return True
@action('add')
def handle_request(self, target=None, fields=None):
#XXX all of the errors that are reported back here are not going
# through the translation machinery
putils = getToolByName(self.context, | 'plone_utils')
self.request.set('__initialize_project__', True)
self.errors = {}
title = self.request.form.get('project_title')
title = strip_extra_whitespace(title)
if not isinstance(title, unicode):
title = unicode(title, 'utf-8')
self.request.form['project_title'] = title
if not valid_title(title):
self.errors['project_title'] = 'The name must contain 2 or more characters.'
id_ = self.request.form.get('projid')
if not valid_id(id_):
self.errors['id'] = 'The url must contain 2 or more characters; ' + \
'only A-Z, 0-9 and "-" are valid characters.'
else:
id_ = putils.normalizeString(id_)
if self.context.has_key(id_):
self.errors['id'] = 'The requested url is already taken.'
# Give plugin viewlets a chance to validate. We don't have a
# project yet, so they'll have to tolerate validating with the
# project container as the context.
viewlet_mgr = getMultiAdapter((self.context, self.request, self),
name='opencore.proj_prefs')
if not hasattr(viewlet_mgr, 'viewlets'):
viewlet_mgr.update()
viewlets = viewlet_mgr.viewlets
for viewlet in viewlets:
if hasattr(viewlet, 'validate'):
self.errors.update(viewlet.validate())
# XXX TO DO: handle featurelets, just like in preferences.py
if self.errors:
self.add_status_message(_(u'psm_correct_errors_below', u'Please correct the errors indicated below.'))
return
self.request.form['featurelets'] = [f['id'] for f in self.featurelets()]
# Aarrgghh!! #*!&% plone snoops into the request, and reads the form variables directly,
# so we have to set the form variables with the same names as the schema
self.request.form['title'] = title
proj = self.context.restrictedTraverse('portal_factory/OpenProject/%s' %id_)
# not calling validate because it explodes on "'" for project titles
# XXX is no validation better than an occasional ugly error?
#proj.validate(REQUEST=self.request, errors=self.errors, data=1, metadata=0)
if self.errors:
self.add_status_message(_(u'psm_correct_errors_below', u'Please correct the errors indicated below.'))
return
if id_ in self.reserved_names():
self.errors['id'] = 'Name reserved'
self.add_status_message(_(u'psm_project_name_reserved', u'The name "${project_name}" is reserved. Please try a different name.',
mapping={u'project_name':id_}))
return
self.context.portal_factory.doCreate(proj, id_)
proj = aq_inner(self.context)._getOb(id_)
self.notify(proj)
logo = self.request.form.get('logo')
if logo:
if not self.check_logo(proj, logo):
return
del self.request.form['logo']
hpcontext = IHomePage(proj)
hpcontext.home_page = 'summary'
# We have to look up the viewlets again, now that we have
# a project for them to use as the context to save to.
viewlet_mgr = getMultiAdapter((proj, self.request, self),
name='opencore.proj_prefs')
if not hasattr(viewlet_mgr, 'viewlets'):
viewlet_mgr.update()
for viewlet in viewlet_mgr.viewlets:
if hasattr(viewlet, 'save'):
viewlet.save()
self.template = None # Don't render anything before redirect.
site_url = getToolByName(self.context, 'portal_url')()
proj_edit_url = '%s/projects/%s/project-home/edit' % (site_url, id_)
s_message_mapping = {'title': title, 'proj_edit_url': proj_edit_url,
'project_noun': self.project_noun,}
s_message = _(u'project_created',
u'"${title}" has been created. Create a team by searching for other members to invite to your ${project_noun}, then <a href="${proj_edit_url}">edit your ${project_noun} home page</a>.',
mapping=s_message_mapping)
# self.add_status_message(s_message)
self.redirect('%s/tour' % proj.absolute_url())
def notify(self, project):
event.notify(AfterProjectAddedEvent(project, self.request))
def featurelets(self):
# create a stub object that provides IFeatureletSupporter
# is there a better way to get the list of adapters without having
# the "for" object?
# @@ dwm: look at the adapter reg or uses the apidoc api which
# featurelet to display is a policy decision on the portal
# (like opencore_properties). Might work best to build the ui
# around a policy abstraction
obj = DummyFeatureletSupporter()
flets = getAdapters((obj,), IFeaturelet)
flet_data = [dict(id=f.id,
title=f.title,
url=f._info['menu_items'][0]['action'],
checked=False,
)
for name, f in flets]
return flet_data
def homepages(self):
flet_data = self.intrinsic_homepages() + self.featurelets()
return flet_data
class DummyFeatureletSupporter(object):
implements(IFeatureletSupporter)
|
LaMi-/pmatic | update-file-headers.py | Python | gpl-2.0 | 2,611 | 0.002298 | #!/usr/bin/env python
# encoding: utf-8
#
# pmatic - Python API for Homematic. Easy to use.
# Copyright (C) 2016 Lars Michelsen <lm@larsmichelsen.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this pro | gram; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Fl | oor, Boston, MA 02110-1301 USA.
# Add Python 3.x behaviour to 2.7
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import time
import glob
new_header = '''#!/usr/bin/env python
# encoding: utf-8
#
# pmatic - Python API for Homematic. Easy to use.
# Copyright (C) %s Lars Michelsen <lm@larsmichelsen.com>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
''' % time.strftime('%Y')
repo_path = os.path.dirname(os.path.realpath(__file__))
for f in [ "pmatic-manager" ] \
+ glob.glob('%s/*.py' % repo_path) \
+ glob.glob('%s/*/*.py' % repo_path) \
+ glob.glob('%s/*/*/*.py' % repo_path) \
+ glob.glob('%s/pmatic/*/*.py' % repo_path):
if f.endswith("doc/conf.py"):
continue
new = [ new_header ]
first_code_line_found = False
for l in open(f):
l = l.decode("utf-8")
if not l.startswith('#'):
first_code_line_found = True
if first_code_line_found:
new.append(l)
open(f, 'w').write((''.join(new)).encode("utf-8"))
|
PayPal-Opportunity-Hack-Chennai-2015/AID-India | server/aiserverproj/aiserverapp/centre_helper.py | Python | apache-2.0 | 1,656 | 0.007246 | from aiserverapp.models import Child
from aiserverapp.models import Assessment
from aiserverapp.models import Village
from aiserverapp.models import Centre
from aiserverapp.models import Block
from aiserverapp.models import District
def consolidate_centre_info(centre_id):
print('CENTER:', centre_id)
centre_info = {'centre_id': centre_id}
centre = Centre.objects.get(centre_id=centre_id)
centre_info['centre_name'] = centre.centre_name
#Get the village
#centre.village_id is of type Village hence will have to get village_id out of it
village = Village.objects.get(village_id=centre.village_id.village_id)
centre_info['village_name'] = village.village_name
#Get the block
#village.block_id is of type Block hence will have to get block_id out of it
block = Block.objects.get(block_id=village.block_id.block_id)
centre_info['block_name'] = block.block_name
#Get the district
#block.district_id is of type District hence will have to get district_id out of it
district = District.objects.get(district_id=block.district_id.district_id)
print('DIS', district, type(district), district.district_name)
centre_info['district_name'] = district.district_name
# centre_info['children_count'] | = 0
#Get all children in this centre
# for child in Child.objects.filter(centre_id=centre_id):
# centre_info['children_count'] = centre_info['children_count'] + 1
# #Get assement of that child
# print('CHILD:', child)
# for assement in Assessment.objects.filter(c | hild_id=child.child_id):
# print('assement:', assement)
return centre_info |
Lapin-Blanc/django_beid | __init__.py | Python | gpl-3.0 | 57 | 0 | defa | ult_app_config = "django_beid.apps.DjangoBeidConfi | g"
|
geekaia/edx-platform | cms/djangoapps/contentstore/management/commands/export_all_courses.py | Python | agpl-3.0 | 1,450 | 0.001379 | """
Script for exporting all courseware from Mongo to a directory
"""
from django.core.management.base import BaseCommand, CommandError
from xmodule.modulestore.xml_exporter import export_to_xml
from xmodule.modulestore.django import modulestore
from xmodule.contentstore.django import contentstore
class Command(BaseCommand):
"""Export all courses from mongo to the specified data directory"""
help = 'Export all courses from mongo to the specified data directory'
def handle(self, *args, **options):
"Execute the command"
if len(args) != 1:
raise CommandError("export requires one argument: <output path>")
output_path = args[0]
cs = contentstore()
ms = modulestore('direct')
root_dir = output_path
courses = ms.get_courses()
print("%d courses to export:" % len(courses))
cids = [x.id for x in courses]
print(cids)
for course_id in cids:
print("-"*77)
print("Exporting course id = {0} to {1}".format(course_id, output_path))
if 1:
try:
course_dir = course_id.replace('/', '...')
export_to_xml(ms, cs, course_id, root_dir, course_dir, modulestore())
except Exception as err:
print("="*30 + "> Oops, fai | led to export %s" % course_id)
print("Error:")
| print(err)
|
build18-fpga-on-the-web/server | fpga_process.py | Python | mit | 1,898 | 0.035827 | import time
import multiprocessing
sev_seg = \
{0 : [ 1, 1, 1, 1, 1, 1, 0],
1 : [ 0, 1, 1, 0, 0, 0, 0],
2 : [ 1, 1, 0, 1, 1, 0, 1],
3 : [ 1, 1, 1, 1, 0, 0, 1],
4 : [ 0, 1, 1, 0, 0, 1, 1],
5 : [ 1, 0, 1, 1, 0, 1, 1],
6 : [ 1, 0, 1, 1, 1, 1, 1],
7 : [ 1, 1, 1, 0, 0, 1, 0],
8 : [ 1, 1, 1, 1, 1, 1, 1],
9 : [ 1, 1, 1, 1, 0, 1, 1],
'a' : [ 1, 1, 1, 0, 1, 1, 1],
'b' : [ 0, 0, 1, 1, 1, 1, 1],
'c' : [ 1, 0, 0, 1, 1, 1, 0],
'd' : [ 0, 1, 1, 1, 1, 0, 1],
'e' : [ 1, 0, 0, 1, 1, 1, 1],
'f' : [ 1, 0, 0, 0, 1, 1, 1],
'u' : [ 0, 0, 1, 1, 1, 0, 0],
'L' : [ 0, 1, 1, 0, 0, 0, 0],
'd' : [ 0, 1, 1, 1, 1, 0, 1],
'i' : [ 0, 0, 1, 0, 0, 0, 0],
' ' : [0]*7}
def flip(n):
return 1 - n
def flip_hex(hex):
return [list(map(flip, seg)) for seg in hex]
def num_to_segs(n):
hexs = []
for _ in range(8):
hexs.append(list(map(flip, sev_seg[n%10])))
n = n // 10
return hexs
def sw_to_hex(sw):
total = 0
for i in range(18):
total += sw[i]*(2**i)
return total
class FPGAProcess(multiprocessing.Process):
def __init__(self, inputQ, outputQ):
multiprocessing.Process.__init__(self)
self.inputQ = inputQ
self.outputQ = outputQ
def get_outputs(self, inputs):
outputs = {}
switches = inputs.get("sw", [0]*18)
outputs["ledr"] = switches
outputs["ledg"] = inputs.get("key", [0]*4)*2 + [1]
outputs["hex"] = num_to_segs(sw_to_hex(switches))
return outputs
def run(self):
prev_outputs = None
inputs = {}
while True:
if not self.inputQ.empty():
print("From inputQ")
inputs = self.inputQ.get()
print(inputs)
| outputs = self.get_outputs(inputs)
if outputs != prev_outputs:
self.outp | utQ.put(outputs)
prev_outputs = outputs
time.sleep(0.01)
|
2ndy/RaspIM | usr/lib/python2.6/distutils/text_file.py | Python | gpl-2.0 | 15,086 | 0.004242 | """text_file
provides the TextFile class, which gives an interface to text files
that (optionally) takes care of stripping comments, ignoring blank
lines, and joining lines with backslashes."""
__revision__ = "$Id$"
from types import *
import sys, os, string
class TextFile:
"""Provides a file-like object that takes care of all the things you
commonly want to do when processing a text file that has some
line-by-line syntax: strip comments (as long as "#" is your
comment character), skip blank lines, join adjacent lines by
escaping the newline (ie. backslash at end of line), strip
leading and/or trailing whitespace. All of these are optional
and independently controllable.
Provides a 'warn()' method so you can generate warning messages that
report physical line number, even if the logical line in question
spans multiple physical lines. Also provides 'unreadline()' for
implementing line-at-a-time lookahead.
Constructor is called as:
TextFile (filename=None, file=None, ** | options)
It bombs (RuntimeError) if both 'filename' and 'file' are None;
'filename' should be a string, and 'file' a file object (or
something that provides 'readline()' and 'close()' methods). It is
recommended that you supply at least 'filename', so that TextFile
can include it in warning messages. If 'file' is not supplied,
TextFile creates its own using the 'open()' builtin.
The options are all | boolean, and affect the value returned by
'readline()':
strip_comments [default: true]
strip from "#" to end-of-line, as well as any whitespace
leading up to the "#" -- unless it is escaped by a backslash
lstrip_ws [default: false]
strip leading whitespace from each line before returning it
rstrip_ws [default: true]
strip trailing whitespace (including line terminator!) from
each line before returning it
skip_blanks [default: true}
skip lines that are empty *after* stripping comments and
whitespace. (If both lstrip_ws and rstrip_ws are false,
then some lines may consist of solely whitespace: these will
*not* be skipped, even if 'skip_blanks' is true.)
join_lines [default: false]
if a backslash is the last non-newline character on a line
after stripping comments and whitespace, join the following line
to it to form one "logical line"; if N consecutive lines end
with a backslash, then N+1 physical lines will be joined to
form one logical line.
collapse_join [default: false]
strip leading whitespace from lines that are joined to their
predecessor; only matters if (join_lines and not lstrip_ws)
Note that since 'rstrip_ws' can strip the trailing newline, the
semantics of 'readline()' must differ from those of the builtin file
object's 'readline()' method! In particular, 'readline()' returns
None for end-of-file: an empty string might just be a blank line (or
an all-whitespace line), if 'rstrip_ws' is true but 'skip_blanks' is
not."""
default_options = { 'strip_comments': 1,
'skip_blanks': 1,
'lstrip_ws': 0,
'rstrip_ws': 1,
'join_lines': 0,
'collapse_join': 0,
}
def __init__ (self, filename=None, file=None, **options):
"""Construct a new TextFile object. At least one of 'filename'
(a string) and 'file' (a file-like object) must be supplied.
They keyword argument options are described above and affect
the values returned by 'readline()'."""
if filename is None and file is None:
raise RuntimeError, \
"you must supply either or both of 'filename' and 'file'"
# set values for all options -- either from client option hash
# or fallback to default_options
for opt in self.default_options.keys():
if opt in options:
setattr (self, opt, options[opt])
else:
setattr (self, opt, self.default_options[opt])
# sanity check client option hash
for opt in options.keys():
if opt not in self.default_options:
raise KeyError, "invalid TextFile option '%s'" % opt
if file is None:
self.open (filename)
else:
self.filename = filename
self.file = file
self.current_line = 0 # assuming that file is at BOF!
# 'linebuf' is a stack of lines that will be emptied before we
# actually read from the file; it's only populated by an
# 'unreadline()' operation
self.linebuf = []
def open (self, filename):
"""Open a new file named 'filename'. This overrides both the
'filename' and 'file' arguments to the constructor."""
self.filename = filename
self.file = open (self.filename, 'r')
self.current_line = 0
def close (self):
"""Close the current file and forget everything we know about it
(filename, current line number)."""
self.file.close ()
self.file = None
self.filename = None
self.current_line = None
def gen_error (self, msg, line=None):
outmsg = []
if line is None:
line = self.current_line
outmsg.append(self.filename + ", ")
if type (line) in (ListType, TupleType):
outmsg.append("lines %d-%d: " % tuple (line))
else:
outmsg.append("line %d: " % line)
outmsg.append(str(msg))
return string.join(outmsg, "")
def error (self, msg, line=None):
raise ValueError, "error: " + self.gen_error(msg, line)
def warn (self, msg, line=None):
"""Print (to stderr) a warning message tied to the current logical
line in the current file. If the current logical line in the
file spans multiple physical lines, the warning refers to the
whole range, eg. "lines 3-5". If 'line' supplied, it overrides
the current line number; it may be a list or tuple to indicate a
range of physical lines, or an integer for a single physical
line."""
sys.stderr.write("warning: " + self.gen_error(msg, line) + "\n")
def readline (self):
"""Read and return a single logical line from the current file (or
from an internal buffer if lines have previously been "unread"
with 'unreadline()'). If the 'join_lines' option is true, this
may involve reading multiple physical lines concatenated into a
single string. Updates the current line number, so calling
'warn()' after 'readline()' emits a warning about the physical
line(s) just read. Returns None on end-of-file, since the empty
string can occur if 'rstrip_ws' is true but 'strip_blanks' is
not."""
# If any "unread" lines waiting in 'linebuf', return the top
# one. (We don't actually buffer read-ahead data -- lines only
# get put in 'linebuf' if the client explicitly does an
# 'unreadline()'.
if self.linebuf:
line = self.linebuf[-1]
del self.linebuf[-1]
return line
buildup_line = ''
while 1:
# read the line, make it None if EOF
line = self.file.readline()
if line == '': line = None
if self.strip_comments and line:
# Look for the first "#" in the line. If none, never
# mind. If we find one and it's the first character, or
# is not preceded by "\", then it starts a comment --
# strip the comment, strip whitespace before it, and
# carry on. Otherwise, it's just an escaped "#", so
|
devalfrz/django-image-styles | image_styles/views.py | Python | bsd-2-clause | 8,038 | 0.011072 | from django.shortcuts import render, HttpResponse, get_object_or_404
from django.http import Http404
from django.utils.decorators import method_decorator
from django.contrib.auth.decorators import login_required
from django.contrib.admin.views.decorators import staff_member_required
from django.urls import reverse,reverse_lazy
from django.utils.translation import ugettext_lazy as _
from django.views import View
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
import mimetypes
from .models import Style
from .forms import EffectForm,StyleForm
from .utils import get_effect_form_class,render_image
class RenderImageView(View):
def get(self,request,style_name,path):
image = render_image(style_name,path)
content_type = mimetypes.guess_type(image.image.path)
f = open(image.image.path,'rb')
r = HttpResponse(f,content_type=content_type[0])
f.close()
return r
class ModalForm(FormView):
template_name = 'image_styles/modal_form.html'
submit_button = _('Save')
delete_button = ''
title = _('Create')
action = '.'
def get_action(self):
return self.action
def get_submit_button(self):
return self.submit_button
def get_delete_button(self):
return self.delete_button
def get_title(self):
return self.title
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['action'] = self.get_action()
context['submit_button'] = self.get_submit_button()
context['delete_button'] = self.get_delete_button()
context['title'] = self.get_title()
return context
class EffectFormMixin:
effect = None
style = None
title = _('Create Effect')
submit_button = _('Create')
def dispatch(self,request,*args,**kwargs):
self.effect_name = self.kwargs.get('effect_name')
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
effect_id = self.kwargs.get('effect_id')
if effect_id and self.effect_name:
from image_styles import models
self.effect = get_object_or_404(getattr(models,self.effect_name),id=effect_id)
return super().dispatch(request,*args,**kwargs)
def get_form_class(self):
form_class = get_effect_form_class(self.effect_name)
if form_class:
return form_class
raise Http404("Not Found")
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.effect:
data['instance'] = self.effect
return data
def get_submit_button(self):
if self.effect:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.effect:
return _('Update Effect')
return super().get_title()
def get_action(self):
if self.style:
return reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':self.effect_name}
)
return reverse(
'image_styles:effect_update',
kwargs={'effect':self.effect.id,'effect_name':self.effect_name}
)
def form_valid(self,form):
form.save()
return HttpResponse(_('Effect Created!'))
def delete(self,*args,**kwargs):
if self.effect:
self.effect.delete()
return HttpResponse(_('Effect Removed!'))
return HttpResponse(_('Delete failed!'))
class StyleFormMixin:
style = None
form_class = StyleForm
def dispatch(self,request,*args,**kwargs):
style_id = self.kwargs.get('style_id')
if style_id:
self.style = get_object_or_404(Style,id=style_id)
self.delete_button = _('Delete')
return super().dispatch(request,*args,**kwargs)
def get_form_kwargs(self,*args,**kwargs):
data = super().get_form_kwargs(*args,**kwargs)
if self.style:
data['instance'] = self.style
return data
def get_action(self):
if self.style:
return reverse(
'image_styles:style_update',
kwargs={'style_id':self.style.id}
)
return reverse('image_styles:style_create')
def get_submit_button(self):
if self.style:
return _('Update')
return super().get_submit_button()
def get_title(self):
if self.style:
return _('Update Style')
return super().get_title()
def form_valid(self,form):
form.save()
return HttpResponse(_('Style Created!'))
def delete(self,*args,**kwargs):
if self.style:
self.style.delete()
return HttpResponse(_('Style Removed!'))
return HttpResponse(_('Delete failed!'))
@method_decorator(staff_member_required(),name='dispatch')
class ManageImageStylesView(TemplateView):
template_name = 'image_styles/home.html'
def get_image_styles(self):
ims = []
for s in Style.objects.all():
effects = s.get_effects()
for i in range(len(effects)):
form = get_effect_form_class(effect_model=effects[i]['object'])
if form:
effects[i]['form'] = form(instance=effects[i]['object'])
effects[i]['action'] = reverse(
'image_styles:effect_update',
kwargs = {
'effect_id':effects[i]['object'].id,
'effect_name':effects[i]['object'].get_name()
}
)
ims.append({
'style':s,
'effects':effects,
})
return ims
def get_context_data(self,**kwargs):
context = super().get_context_data(**kwargs)
context['styles'] = self.get_image_styles()
return context
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateInitView(ModalForm):
form_class = EffectForm
submit_button = _('Next')
title = _('Select Effect')
def dispatch(self,request,*args,**kwargs):
self.style = get_object_or_404(Style,id=self.kwargs.get('style_id'))
return super().dispatch(request,*args,**kwargs)
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
def get_submit_button(self):
if self.form_class != EffectForm:
return _('Create')
return super().get_submit_button()
def get | _title(self):
if self.form_class != EffectForm:
return _('Create Effect')
return super().get_title()
def get_action(self):
if self.action == '.':
return reverse('image_styles:effect_create_init',kwargs={'style_id':self.style.id})
return self.action
| def form_valid(self,form):
effect_name = form.cleaned_data.get('effect')
self.form_class = get_effect_form_class(effect_name=effect_name)
self.action = reverse(
'image_styles:effect_create',
kwargs={'style_id':self.style.id,'effect_name':effect_name}
)
self.request.method = 'GET'
return super().get(self.request,style_id=self.style.id)
@method_decorator(staff_member_required(),name='dispatch')
class EffectCreateView(EffectFormMixin,ModalForm):
title = _('Create Effect')
submit_button = _('Create')
def get_form(self,**kwargs):
form = super().get_form(**kwargs)
form.initial['style'] = self.style
return form
@method_decorator(staff_member_required(),name='dispatch')
class EffectUpdateView(EffectFormMixin,ModalForm):
pass
@method_decorator(staff_member_required(),name='dispatch')
class StyleView(StyleFormMixin,ModalForm):
pass
|
tomasstorck/diatomas | blender/rendermonitor.py | Python | mit | 5,383 | 0.007988 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Generic rendering function that can handle all simulations, but makes poor renders. Nice for sketching.
import os, time, subprocess, re
def Say(text, verbosity=0, end='\n', suppressTime=False):
if verbosity<=VERBOSITY:
if suppressTime:
timeStr = ''
else:
timeStr = time.strftime('%H:%M:%S ')
if verbosity == 0:
print(timeStr + text, end=end)
else:
print('DEBUG: ' + text, end=end)
###############################################################################
# Global settings #
###################
dirFilter = '.*' # regular expression
#dirFilter = 'ecoli_'
#dirFilter = 'odetol' # regular expression
VERBOSITY = 0 # Note that only rendermonitor.py is printed to console, render.py shows up in logfile.txt
iterModDiv = 5
###############################################################################
resultsPath = os.getcwd()[:os.getcwd().index("/blender")] + "/results"
while True:
t0 = time.time()
dirList = [dirs for dirs in os.listdir(resultsPath) if os.path.isdir(os.path.join(resultsPath, dirs)) and os.path.isdir(os.path.join(resultsPath, dirs, 'output')) and re.search(dirFilter,dirs)]
dirList.sort()
for d in dirList:
Say(d)
#######################################################################
# Pre-render settings #
#######################
renderpySettingsDict = {'VERBOSITY':VERBOSITY,
'resolution_percentage':50,
'offset':'array([120,120,20])',
'model.L':'array([60e-6,60e-6,60e-6])',
'saveBlend':True,
| 'drawStick':False,
'renderDir':'render'
}
| #renderpySettingsDict['suppressRender'] = True
if re.match('^aom', d):
renderpySettingsDict['model.L'] = 'array([20e-6,20e-6,20e-6])'
renderpySettingsDict['offset'] = 'array([10,10,10])'
renderpySettingsDict['configMaterial'] = 'ConfigAOM'
renderpySettingsDict['gridStepSize'] = 5
elif re.match('^as', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,40])'
renderpySettingsDict['configMaterial'] = 'ConfigAS'
elif re.match('^ecoli', d):
renderpySettingsDict['model.L'] = 'array([80e-6,80e-6,80e-6])'
renderpySettingsDict['offset'] = 'array([40,40,0])'
renderpySettingsDict['configMaterial'] = 'ConfigEcoli' # Change colours of cells for consistency with paper/experiments
renderpySettingsDict['colourByGeneration'] = True
#######################################################################
dAbs = resultsPath + "/" + d + "/output"
Say(dAbs, 2)
fileList = [files for files in os.listdir(dAbs) if os.path.splitext(files)[-1]=='.mat']
fileList.sort(reverse=True)
for f in fileList:
if not int(re.match('g(\d{4})r(\d{4}).mat',f).group(2))%iterModDiv == 0:
# relaxation iteration (YYYY in filename gXXXXrYYYY.mat) % iterModulusDivider == 0
continue
fAbs = dAbs + "/" + f
# Check if file is already plotted
fName = os.path.splitext(fAbs.split("/")[-1])[0]
renderPath = (fAbs[:fAbs.index("/output/"+fName)] + "/" + renderpySettingsDict['renderDir']) if ("/output/"+f in fAbs) else ("/".join(fAbs.split("/")[:-1]))
if os.path.isfile(renderPath + "/" + fName + ".png"):
Say(" " + f + ' --> already rendered', 2)
else:
Say(" " + f, end='\r')
callStr = ["blender", "--background", "--python", "render.py", "--", fAbs] # Call string is with filename
[callStr.extend([key,str(val)]) for key,val in renderpySettingsDict.items()] # Append settingsDict
Say("\nCall string = " + " ".join(callStr), verbosity=2)
[stdout, _] = subprocess.Popen(callStr, stdout=subprocess.PIPE, stderr=subprocess.STDOUT).communicate()
stdout = stdout.decode()
if 'Error' in stdout or 'WARNING' in stdout:
with open('logfile.txt', 'w') as file:
file.write(time.strftime('%Y/%m/%d, %H:%M:%S') + " (" + fAbs + ")\n\n" + stdout)
if 'error' in stdout.lower() and 'warning' in stdout.lower():
suffix = " --> WARNING and ERROR"
elif 'error' in stdout.lower():
suffix = " --> ERROR"
else:
suffix = " --> "
for line in stdout.split('\n'):
if 'warning' in line.lower():
suffix += line + ' '
Say(" " + f + suffix)
else:
Say('', suppressTime=True) # Make newline
time.sleep(max(0, 10-(time.time()-t0))) # There must be at least some time between each loop
|
mbookman/dsub | dsub/lib/job_util.py | Python | apache-2.0 | 2,587 | 0.002319 | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file | except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations | under the License.
"""Classes for dsub job script and resource."""
import collections
class JobResources(
collections.namedtuple('JobResources', [
'min_cores', 'min_ram', 'disk_size', 'boot_disk_size', 'preemptible',
'image', 'logging', 'zones', 'scopes', 'keep_alive'
])):
"""Job resource parameters related to CPUs, memory, and disk.
Attributes:
min_cores (int): number of CPU cores
min_ram (float): amount of memory (in GB)
disk_size (int): size of the data disk (in GB)
boot_disk_size (int): size of the boot disk (in GB)
preemptible (bool): use a preemptible VM for the job
image (str): Docker image name
logging (param_util.LoggingParam): path to location for jobs to write logs
zones (str): location in which to run the job
scopes (list): OAuth2 scopes for the job
keep_alive (int): Seconds to keep VM alive on failure
"""
__slots__ = ()
def __new__(cls,
min_cores=1,
min_ram=1,
disk_size=10,
boot_disk_size=10,
preemptible=False,
image=None,
logging=None,
zones=None,
scopes=None,
keep_alive=None):
return super(JobResources, cls).__new__(cls, min_cores, min_ram, disk_size,
boot_disk_size, preemptible, image,
logging, zones, scopes, keep_alive)
class Script(object):
"""Script to be run by for the job.
The Pipeline's API specifically supports bash commands as the docker
command. To support any type of script (Python, Ruby, etc.), the contents
are uploaded as a simple environment variable input parameter.
The docker command then writes the variable contents to a file and
executes it.
Attributes:
name: (str) File name of this script.
value: (str) Content of this script.
"""
def __init__(self, name, value):
self.name = name
self.value = value
|
luisbg/gst-introspection | gstgengui/gtk_controller.py | Python | lgpl-2.1 | 13,592 | 0.004194 | #!/usr/bin/env python
import pygtk
pygtk.require('2.0')
import gtk
import gobject
# TODOs
# * refresh is unstable
# * auto video embed is unstable (black contours ? lag ?)
class VideoWidget(gtk.DrawingArea):
def __init__(self):
gtk.DrawingArea.__init__(self)
self.imagesink = None
self.unset_flags(gtk.DOUBLE_BUFFERED)
def do_expose_event(self, event):
if self.imagesink:
self.imagesink.expose()
return False
else:
return True
def set_sink(self, sink):
assert self.window.xid
self.imagesink = sink
self.imagesink.set_xwindow_id(self.window.xid)
class GtkGstController:
def delete_event(self, widget, event, data=None):
print "delete event occurred"
self.pipeline_launcher.stop()
return False
def destroy(self, widget, data=None):
print "destroy signal occurred"
gtk.main_quit()
def __init__(self, pipeline_launcher):
self.pipeline_launcher = pipeline_launcher
self.pipeline_launcher.bus.enable_sync_message_emission()
self.pipeline_launcher.bus.connect('sync-message::element', self.on_sync_message)
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_title("gst-gengui")
self.window.set_size_request(800, 600)
# Sets the border width of the window.
self.window.set_border_width(6)
#self.main_container = gtk.VBox(False, 0)
self.main_container = gtk.VPaned()
self.properties_container = gtk.VBox(False, 0)
# graphical pipeline output
self.preview_container = gtk.HBox(False, 0)
self.preview_container.set_size_request(800,200)
# parameter area
self.scrolled_window = scrolled_window = gtk.ScrolledWindow()
scrolled_window.set_border_width(0)
scrolled_window.set_policy(gtk.POLICY_AU | TOMATIC, gtk.POLICY_ALWAYS)
scrolled_window.add_with_viewport(self.properties_container)
# play/stop/pause controls
pipeline_controls = self._create_pipeline_cont | rols(pipeline_launcher)
#self.main_container.pack_start(self.preview_container, True, True, 3)
#self.main_container.pack_start(pipeline_controls, False, False, 3)
#self.main_container.pack_end(scrolled_window, True, True, 3)
#self.main_container.add1(self.preview_container)
self.main_container.add1(self.preview_container)
self.main_container.add2(pipeline_controls)
self.window.add(self.main_container)
self.window.connect("delete_event", self.delete_event)
self.window.connect("destroy", self.destroy)
self.window.show_all()
def on_sync_message(self, bus, message):
if message.structure is None:
return
if message.structure.get_name() == 'prepare-xwindow-id':
print "prepare-xwindow-id, %s" %message
self._create_videowidget(message)
def _create_videowidget(self, message):
videowidget = None
videowidget = VideoWidget()
videowidget.show()
self.preview_container.pack_start(videowidget, True, True, 0)
# Sync with the X server before giving the X-id to the sink
# gtk.gdk.display_get_default().sync()
# gobject.idle_add(videowidget.set_sink, message.src)
videowidget.set_sink(message.src)
message.src.set_property('force-aspect-ratio', True)
def _create_pipeline_controls(self, pipeline_launcher):
container = gtk.VBox(False,3)
label = gtk.Label("Pipeline description")
entry = gtk.TextView()
entry.set_size_request(400,50)
entry.set_wrap_mode(gtk.WRAP_CHAR)
self.textbuffer = textbuffer = entry.get_buffer()
textbuffer.set_text(pipeline_launcher.pipeline_desc)
textbuffer.set_modified(False)
container.add(label)
container.add(entry)
container_btns = gtk.HBox()
container.add(container_btns)
self.refresh_button = refresh_btn = self._create_button(label="Refresh", callback=self._refresh)
refresh_btn.set_sensitive(False)
container_btns.add(refresh_btn)
state_label = gtk.Label("State")
container_btns.add(state_label)
position_label = gtk.Label("Position")
container_btns.add(position_label)
start_btn = self._create_button(label="Play", callback=pipeline_launcher.run)
container_btns.add(start_btn)
stop_btn = self._create_button(label="Stop", callback=self.stop)
container_btns.add(stop_btn)
pause_btn = self._create_button(label="Pause", callback=pipeline_launcher.pause)
container_btns.add(pause_btn)
eos_btn = self._create_button(label="Send EOS", callback=pipeline_launcher.send_eos)
container_btns.add(eos_btn)
container.add(self.scrolled_window)
# Polling for changes
gobject.timeout_add(500, self._check_for_pipeline_changes, textbuffer)
gobject.timeout_add(500, self._check_for_pipeline_position, position_label)
gobject.timeout_add(500, self._check_for_pipeline_state, state_label)
return container
def main(self):
gobject.idle_add(self.pipeline_launcher.run)
gtk.main()
def stop(self, *args):
self.pipeline_launcher.stop(*args)
self._clean_previews()
def _clean_previews(self):
for video in self.preview_container:
self.preview_container.remove(video)
del(video)
def _check_for_pipeline_state(self, state_label):
state = self.pipeline_launcher.get_state()
state_label.set_text(state)
return True
def _check_for_pipeline_position(self, position_label):
duration = str(self.pipeline_launcher.get_duration())
position = str(self.pipeline_launcher.get_position())
position_label.set_text("Position: %s s / %s s" %(position, duration))
return True
def _check_for_pipeline_changes(self, textbuffer):
if textbuffer.get_modified():
#print "Change detected"
self.new_description = textbuffer.get_text(*textbuffer.get_bounds())
self.refresh_button.set_sensitive(True)
return True
def _get_latest_description(self):
return self.new_description
def _reset_property(self, widget, args):
print "Resetting property value to default value"
property = args[0]
adj = args[1]
property.parent_element.set_property(property.name, property.default_value)
adj.set_value(property.default_value)
def _refresh(self, *args):
self._clean_controls()
self.stop(*args)
print "Refreshing pipeline with description: %s" %self.new_description
self.pipeline_launcher.redefine_pipeline(new_string=self._get_latest_description())
self.pipeline_launcher.bus.connect('message::element', self.on_sync_message)
self.pipeline_launcher.run()
self.textbuffer.set_modified(False)
self.rebuild_callback(self.pipeline_launcher.pipeline, self)
def _clean_controls(self):
print "Removing all controls"
for item in self.properties_container:
self.properties_container.remove(item)
def _create_button(self, label="Hello", callback=None, callback_args=None):
button = gtk.Button(label)
button.show()
if callback is not None:
button.connect("clicked", callback, callback_args)
return button
def _create_element_widget(self, element):
mcontainer = gtk.Expander(element.name)
container = gtk.VBox()
mcontainer.add(container)
print element.name
if len(element.number_properties) > 0:
for number_property in element.number_properties:
spinner = self._create_spinner(number_property)
container.pack_start(spinner, False, False, 6)
if len(element.boolean_properties) > 0:
for boolean_property in element.boolean_properties:
check_btn = self._create_check_btn(boolean_property)
|
genos/online_problems | rosalind/lia.py | Python | mit | 431 | 0 | #!/usr/bin/env python
# coding: utf-8
from rosalind import binom
def pbinom(n, p, r): |
return binom(n, r) * pow(p, r) * pow(1 - p, n - r)
def dbinom(n, p, r):
return sum(pbinom(n, p, i) for i in range(r + 1))
def lia(k, N):
t = 1 << k
return 1 - dbinom(t, .25, N - 1)
if __name__ == "__main__":
with open("data/rosalind_lia.txt") as f:
k, N = map(int, f.read().split())
print(lia( | k, N))
|
philanthropy-u/edx-platform | openedx/core/djangoapps/user_api/helpers.py | Python | agpl-3.0 | 20,700 | 0.001787 | """
Helper functions for the account/profile Python APIs.
This is NOT part of the public API.
"""
import json
import logging
import traceback
from collections import defaultdict
from functools import wraps
from django import forms
from django.core.serializers.json import DjangoJSONEncoder
from django.http import HttpResponseBadRequest, HttpRequest
from django.utils.encoding import force_text
from django.utils.functional import Promise
LOGGER = logging.getLogger(__name__)
def intercept_errors(api_error, ignore_errors=None):
"""
Function decorator that intercepts exceptions
and translates them into API-specific errors (usually an "internal" error).
This allows callers to gracefully handle unexpected errors from the API.
This method will also log all errors and function arguments to make
it easier to track down unexpected errors.
Arguments:
api_error (Exception): The exception to raise if an unexpected error is encountered.
Keyword Arguments:
ignore_errors (iterable): List of errors to ignore. By default, intercept every error.
Returns:
function
"""
def _decorator(func):
"""
Function decorator that intercepts exceptions and translates them into API-specific errors.
"""
@wraps(func)
def _wrapped(*args, **kwargs):
"""
Wrapper that evaluates a function, intercepting exceptions and translating them into
API-specific errors.
"""
try:
return func(*args, **kwargs)
except Exception as ex:
# Raise and log the original exception if it's in our list of "ignored" errors
for ignored in ignore_errors or []:
if isinstance(ex, ignored):
msg = (
u"A handled error occurred when calling '{func_name}' "
u"with arguments '{args}' and keyword arguments '{kwargs}': "
u"{exception}"
).format(
func_name=func.func_name,
args=args,
kwargs=kwargs,
exception=ex.developer_message if hasattr(ex, 'developer_message') else repr(ex)
)
LOGGER.warning(msg)
raise
caller = traceback.format_stack(limit=2)[0]
# Otherwise, log the error and raise the API-specific error
msg = (
u"An unexpected error occurred when calling '{func_name}' "
u"with arguments '{args}' and keyword arguments '{kwargs}' from {caller}: "
u"{exception}"
).format(
func_name=func.func_name,
args=args,
kwargs=kwargs,
exception=ex.developer_message if hasattr(ex, 'developer_message') else repr(ex),
caller=caller.strip(),
)
LOGGER.exception(msg)
raise api_error(msg)
return _wrapped
return _decorator
def require_post_params(required_params):
"""
View dec | orator that ensures the required POST params are
present. If not, returns an HTTP response with status 400.
Args:
required_params (list): The required parameter keys.
Returns:
HttpResponse
"""
def _decorator(func): # pyl | int: disable=missing-docstring
@wraps(func)
def _wrapped(*args, **_kwargs):
request = args[0]
missing_params = set(required_params) - set(request.POST.keys())
if len(missing_params) > 0:
msg = u"Missing POST parameters: {missing}".format(
missing=", ".join(missing_params)
)
return HttpResponseBadRequest(msg)
else:
return func(request)
return _wrapped
return _decorator
class InvalidFieldError(Exception):
"""The provided field definition is not valid. """
class FormDescription(object):
"""Generate a JSON representation of a form. """
ALLOWED_TYPES = ["text", "email", "select", "textarea", "checkbox", "plaintext", "password", "hidden"]
ALLOWED_RESTRICTIONS = {
"text": ["min_length", "max_length"],
"password": ["min_length", "max_length", "min_upper", "min_lower",
"min_punctuation", "min_symbol", "min_numeric", "min_alphabetic"],
"email": ["min_length", "max_length", "readonly"],
}
FIELD_TYPE_MAP = {
forms.CharField: "text",
forms.PasswordInput: "password",
forms.ChoiceField: "select",
forms.TypedChoiceField: "select",
forms.Textarea: "textarea",
forms.BooleanField: "checkbox",
forms.EmailField: "email",
}
OVERRIDE_FIELD_PROPERTIES = [
"label", "type", "defaultValue", "placeholder",
"instructions", "required", "restrictions",
"options", "supplementalLink", "supplementalText"
]
def __init__(self, method, submit_url):
"""Configure how the form should be submitted.
Args:
method (unicode): The HTTP method used to submit the form.
submit_url (unicode): The URL where the form should be submitted.
"""
self.method = method
self.submit_url = submit_url
self.fields = []
self._field_overrides = defaultdict(dict)
def add_field(
self, name, label=u"", field_type=u"text", default=u"",
placeholder=u"", instructions=u"", required=True, restrictions=None,
options=None, include_default_option=False, error_messages=None,
supplementalLink=u"", supplementalText=u""
):
"""Add a field to the form description.
Args:
name (unicode): The name of the field, which is the key for the value
to send back to the server.
Keyword Arguments:
label (unicode): The label for the field (e.g. "E-mail" or "Username")
field_type (unicode): The type of the field. See `ALLOWED_TYPES` for
acceptable values.
default (unicode): The default value for the field.
placeholder (unicode): Placeholder text in the field
(e.g. "user@example.com" for an email field)
instructions (unicode): Short instructions for using the field
(e.g. "This is the email address you used when you registered.")
required (boolean): Whether the field is required or optional.
restrictions (dict): Validation restrictions for the field.
See `ALLOWED_RESTRICTIONS` for acceptable values.
options (list): For "select" fields, a list of tuples
(value, display_name) representing the options available to
the user. `value` is the value of the field to send to the server,
and `display_name` is the name to display to the user.
If the field type is "select", you *must* provide this kwarg.
include_default_option (boolean): If True, include a "default" empty option
at the beginning of the options list.
error_messages (dict): Custom validation error messages.
Currently, the only supported key is "required" indicating
that the messages should be displayed if the user does
not provide a value for a required field.
supplementalLink (unicode): A qualified URL to provide supplemental information
for the form field. An example may be a link to documentation for creating
strong passwords.
supplementalText (unicode): The visible text for the supplemental link above.
Raises:
InvalidFieldError
"""
if field_type not in self.ALLOWED_TYPES:
msg = u"Field type '{field_type}' is not a valid type. Allowed types are: |
lukovnikov/teafacto | test/test_tensorWrapped.py | Python | mit | 687 | 0.001456 | from unittest import TestCase
from teafacto.core.base import Val, tensorops as T
import numpy as np
class TestTensorWrapped(TestCase):
def test_dimswap(self):
xval = np.random.randint(0, 5, (2, 3, 4))
x = Val(xval)
y = x.dimswap(1, 0)
eyval = xval | .transpose(1, 0, 2)
self.assertTrue(np.allclose(eyval, y.eval()))
def test_reverse(self):
xval = np.random.randint(0, 5, (2, 3, 4))
x = Val(xval)
y = x.reverse(1)
yval = xval[:, ::-1, :]
self.assertTrue(np.allclose(yval, y.ev | al()))
y = x.reverse(0, 1)
yval = xval[::-1, ::-1, :]
self.assertTrue(np.allclose(yval, y.eval()))
|
the-blue-alliance/the-blue-alliance | old_py2/helpers/insight_manipulator.py | Python | mit | 1,096 | 0.001825 | from helpers.manipulator_base import ManipulatorBase
class InsightManipulator(ManipulatorBase):
"""
Handle Insight database writes.
"""
@classmethod
def updateMerge(self, new_insight, old_insight, auto_union=True):
"""
Given an "old" and a "new" Insight object, replace the fields in the
"old" Insight that are present in the "new" Insigh | t, but keep fields from
the "old" Insight that are null in the "new" insight.
"""
attrs = [
'name',
'year',
'data_json',
]
for attr in attrs:
if getattr(new_insight, attr) is not None:
if geta | ttr(new_insight, attr) != getattr(old_insight, attr):
setattr(old_insight, attr, getattr(new_insight, attr))
old_insight.dirty = True
if getattr(new_insight, attr) == "None":
if getattr(old_insight, attr, None) != None:
setattr(old_insight, attr, None)
old_insight.dirty = True
return old_insight
|
swift-lang/swift-e-lab | parsl/tests/integration/test_early_attach_bug.py | Python | apache-2.0 | 1,099 | 0.00273 | """Testing early attach behavior with LoadBalanced view
Test setup:
Start the ipcontroller and 1 ipengine, and run this script.
The time to finish the 10 apps should be ~10s.
In the second run, start the parsl script, and as soon as the run starts,
start additional ipengines. The time to finish the 10 apps should still be ~10s.
This shows that the LoadBalanced View simply routes tasks to the available engines
at the time the apps were submitted to it. It is not capable o | f rebalancing the apps
among the engine once it has been sent to the the engine's queue.
"""
from parsl import *
import time
from parsl.tests.configs.local_ipp import config
dfk = DataFlowKernel(config=config)
@App('python', dfk)
def sleep_double(x):
import time
time.sleep(1)
return x * 2
def test_z_c | leanup():
dfk.cleanup()
if __name__ == "__main__":
print("Starting launch")
jobs = {}
for i in range(0, 20):
jobs[i] = sleep_double(i)
start = time.time()
for i in range(0, 10):
print(jobs[i].result())
print("Time to finish : ", time.time() - start)
|
h2oai/h2o-3 | h2o-py/tests/testdir_algos/targetencoder/pyunit_te_deprecated_params.py | Python | apache-2.0 | 7,090 | 0.003385 | from __future__ import print_function
import os
import sys
import warnings
sys.path.insert(1, os.path.join("..","..","..",".."))
import h2o
from h2o.estimators import H2OTargetEncoderEstimator
from h2o.exceptions import H2ODeprecationWarning
from h2o.utils.metaclass import fullname
from tests import pyunit_utils as pu
seed = 42
te_init_name = fullname(H2OTargetEncoderEstimator.__init__)
def load_dataset(incl_test=False, incl_foldc=False):
fr = h2o.import_file(pu.locate("smalldata/titanic/titanic_expanded.csv"), header=1)
target = "survived"
train = fr
test = None
if incl_test:
fr = fr.split_frame(ratios=[.8], destination_frames=["titanic_train", "titanic_test"], seed=seed)
train = fr[0]
test = fr[1]
if incl_foldc:
train["foldc"] = train.kfold_column(3, seed)
return pu.ns(train=train, test=test, target=target)
def test_deprecated_k_param_is_alias_for_inflection_point():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(noise=0)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_k = H2OTargetEncoderEstimator(noise=0, k=5, blending=True)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``k`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message) |
te_k.train(y=ds.target, training_frame=ds.train)
encoded_k = te_k.predict(ds.test)
# print(encoded_k)
te_ip = H2OTargetEncoderEstimator(noise=0, inflection_point=5, blending=True)
te_ip.train(y=ds.target, training_frame=ds.train)
encoded_ip = te_ip.p | redict(ds.test)
# print(encoded_ip)
try:
pu.compare_frames(encoded_k, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_k, encoded_ip, 0, tol_numeric=1e-5)
def test_deprecated_f_param_is_alias_for_smoothing():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(noise=0)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_f = H2OTargetEncoderEstimator(noise=0, f=25, blending=True)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``f`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
te_f.train(y=ds.target, training_frame=ds.train)
encoded_f = te_f.predict(ds.test)
# print(encoded_f)
te_s = H2OTargetEncoderEstimator(noise=0, smoothing=25, blending=True)
te_s.train(y=ds.target, training_frame=ds.train)
encoded_s = te_s.predict(ds.test)
# print(encoded_s)
try:
pu.compare_frames(encoded_f, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_f, encoded_s, 0, tol_numeric=1e-5)
def test_deprecated_noise_level_param_is_alias_for_noise():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator()
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
# print(encoded)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
te_nl = H2OTargetEncoderEstimator(noise_level=0)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "``noise_level`` param of ``{}`` is deprecated".format(te_init_name) in str(w[0].message)
te_nl.train(y=ds.target, training_frame=ds.train)
encoded_nl = te_nl.predict(ds.test)
# print(encoded_nl)
te_n = H2OTargetEncoderEstimator(noise=0)
te_n.train(y=ds.target, training_frame=ds.train)
encoded_n = te_n.predict(ds.test)
# print(encoded_n)
try:
pu.compare_frames(encoded_nl, encoded, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
assert pu.compare_frames(encoded_nl, encoded_n, 0, tol_numeric=1e-5)
def test_transform_seed_param_raise_warning():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(seed=42)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
transformed_1 = te.transform(ds.test)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_2 = te.transform(ds.test, seed=24)
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "`seed` is deprecated in `transform` method and will be ignored" in str(w[0].message)
assert pu.compare_frames(encoded, transformed_1, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded, transformed_2, 0, tol_numeric=1e-5)
def test_transform_data_leakage_handling_param_raise_warning():
ds = load_dataset(incl_test=True)
te = H2OTargetEncoderEstimator(data_leakage_handling="leave_one_out", seed=42)
te.train(y=ds.target, training_frame=ds.train)
encoded = te.predict(ds.test)
encoded_as_training = te.transform(ds.test, as_training=True)
transformed_1 = te.transform(ds.test)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_2 = te.transform(ds.test, data_leakage_handling="none")
assert len(w) == 1
assert issubclass(w[0].category, H2ODeprecationWarning)
assert "`data_leakage_handling` is deprecated in `transform` method and will be ignored" in str(w[0].message)
# if data_leakage_handling is specified and not "none", this is interpreted as `as_training=True`
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
transformed_3 = te.transform(ds.test, data_leakage_handling="leave_one_out")
assert len(w) == 2
assert issubclass(w[1].category, H2ODeprecationWarning)
assert "as_training=True" in str(w[1].message)
assert pu.compare_frames(encoded, transformed_1, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded, transformed_2, 0, tol_numeric=1e-5)
assert pu.compare_frames(encoded_as_training, transformed_3, 0, tol_numeric=1e-5)
try:
pu.compare_frames(encoded, transformed_3, 0, tol_numeric=1e-5)
assert False, "should have raised"
except AssertionError as ae:
assert "should have raised" not in str(ae)
pu.run_tests([
test_deprecated_k_param_is_alias_for_inflection_point,
test_deprecated_f_param_is_alias_for_smoothing,
test_deprecated_noise_level_param_is_alias_for_noise,
test_transform_seed_param_raise_warning,
test_transform_data_leakage_handling_param_raise_warning,
])
|
noinil/memory | kana_table_test.py | Python | gpl-2.0 | 3,595 | 0.00428 | #!/usr/bin/env python3
# coding: utf-8
romaji = []
hiragana = []
katakana = []
def num_permuts(N):
"""Permutation of 0 -- 46
"""
from random import shuffle as shf
from random import seed
seed()
a = [i for i in range(N)]
shf(a)
return a[:]
def main(ts):
import time
# -------------------- read in kana data --------------------
with open("share/kana_list.dat", 'r') as fin:
for lines in fin:
words = lines.split()
if lines.startswith('#') or len(words) <= 1:
continue
if not words[1].isdigit():
continue
roma, hira, kata = words[3], words[5], words[7]
romaji.append(roma)
hiragana.append(hira)
katakana.append(kata)
# -------------------- test settings --------------------
if ts == 1:
| nums = num_permuts(46)
l_test = romaji
elif ts == 2:
| nums = num_permuts(46)
l_test = katakana
elif ts == 3:
nums = num_permuts(46)
l_test = hiragana
elif ts == 4:
nums = num_permuts(46 * 3)
l_test = romaji + hiragana + katakana
else:
print(' Wrong testing style!')
exit()
# Begin testing, log time spent on memory test -----------------------------
t0 = time.time()
t1 = t0
t_log = {} # time log
for i in nums[:10]:
key_in = input(" " + l_test[i] + "\t")
t2 = time.time()
dt = t2 - t1
t1 = t2
j = i % 46
if key_in == 'x':
print(" \t", romaji[j], "\t", hiragana[j], "\t", katakana[j])
dt += 2
t_log[j] = round(dt, 2)
t_final = time.time()
s_t_out = time.ctime()
s = s_t_out.split()
out_filename = 'data/kana_test_result' + '_' + s[1] + '_' + s[2] + '_' + s[4] + '_' + s[3][:5]
fout = open(out_filename, 'w')
print(" ============================================================")
print(" You spent ", round(t_final - t0), " seconds. ")
print(" ------------------------------------------------------------")
fout.write(" ============================================================\n")
fout.write(" You spent " + str(round(t_final - t0)) + " seconds. \n")
fout.write(" ------------------------------------------------------------\n")
# Analyze the results ------------------------------------------------------
t_log_sorted = sorted(t_log.items(), key=lambda x:x[1], reverse = True)
for i in t_log_sorted:
num, t = i[0], i[1]
print(num, "\t", romaji[num], " \t", hiragana[num], " \t", katakana[num], " \t", t, "s")
fout.write(romaji[num].rjust(4) + ' \t')
fout.write(str(hiragana[num]).ljust(4) + ' \t')
fout.write(str(katakana[num]).ljust(4) + ' \t' + str(t).rjust(6) + ' s\n')
print(" ============================================================")
fout.write(" ============================================================\n")
if __name__ == '__main__':
hint_str = ''' ========================
日语假名记忆测试:
请选择测试类型:
1. 看罗马字回忆假名
2. 看片假名回忆读音
3. 看平假名回忆读音
4. 混乱回忆
...
'''
try:
test_style = int(input(hint_str))
except:
print(' Please choose from the list! ')
exit()
while test_style < 1 or test_style > 4:
test_style = int(input(' Please input an integer between (1..4) > '))
print(" ~~~~~~~~~~~~~~~~~~~~ ")
main(test_style)
|
plotly/python-api | packages/python/plotly/plotly/tests/test_core/test_figure_messages/test_plotly_restyle.py | Python | mit | 2,977 | 0.00168 | import sys
from unittest import TestCase
import plotly.graph_objs as go
if sys.version_info >= (3, 3):
from unittest.mock import MagicMock
else:
from mock import MagicMock
class TestRestyleMes | sage(TestCase):
def setUp(self):
# Construct with mocked _send_restyle_msg method
self.figure = go. | Figure(
data=[
go.Scatter(),
go.Bar(),
go.Parcoords(dimensions=[{}, {"label": "dim 2"}, {}]),
]
)
# Mock out the message method
self.figure._send_restyle_msg = MagicMock()
def test_property_assignment_toplevel(self):
# Set bar marker
self.figure.data[1].marker = {"color": "green"}
self.figure._send_restyle_msg.assert_called_once_with(
{"marker": [{"color": "green"}]}, trace_indexes=1
)
def test_property_assignment_nested(self):
# Set scatter marker color
self.figure.data[0].marker.color = "green"
self.figure._send_restyle_msg.assert_called_once_with(
{"marker.color": ["green"]}, trace_indexes=0
)
def test_property_assignment_nested_array(self):
# Set parcoords dimension
self.figure.data[2].dimensions[0].label = "dim 1"
self.figure._send_restyle_msg.assert_called_once_with(
{"dimensions.0.label": ["dim 1"]}, trace_indexes=2
)
# plotly_restyle
def test_plotly_restyle_toplevel(self):
# Set bar marker
self.figure.plotly_restyle({"marker": {"color": "green"}}, trace_indexes=1)
self.figure._send_restyle_msg.assert_called_once_with(
{"marker": {"color": "green"}}, trace_indexes=[1]
)
def test_plotly_restyle_nested(self):
# Set scatter marker color
self.figure.plotly_restyle({"marker.color": "green"}, trace_indexes=0)
self.figure._send_restyle_msg.assert_called_once_with(
{"marker.color": "green"}, trace_indexes=[0]
)
def test_plotly_restyle_nested_array(self):
# Set parcoords dimension
self.figure.plotly_restyle({"dimensions[0].label": "dim 1"}, trace_indexes=2)
self.figure._send_restyle_msg.assert_called_once_with(
{"dimensions[0].label": "dim 1"}, trace_indexes=[2]
)
def test_plotly_restyle_multi_prop(self):
self.figure.plotly_restyle(
{"marker": {"color": "green"}, "name": "MARKER 1"}, trace_indexes=1
)
self.figure._send_restyle_msg.assert_called_once_with(
{"marker": {"color": "green"}, "name": "MARKER 1"}, trace_indexes=[1]
)
def test_plotly_restyle_multi_trace(self):
self.figure.plotly_restyle(
{"marker": {"color": "green"}, "name": "MARKER 1"}, trace_indexes=[0, 1]
)
self.figure._send_restyle_msg.assert_called_once_with(
{"marker": {"color": "green"}, "name": "MARKER 1"}, trace_indexes=[0, 1]
)
|
jbarlow83/OCRmyPDF | src/ocrmypdf/hocrtransform.py | Python | gpl-3.0 | 17,755 | 0.000846 | #!/usr/bin/env python3
#
# Copyright (c) 2010, Jonathan Brinley
# Original version from: https://github.com/jbrinley/HocrConverter
#
# Copyright (c) 2013-14, Julien Pfefferkorn
# Modifications
#
# Copyright (c) 2015-16, James R. Barlow
# Set text to transparent
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import argparse
import os
import re
from math import atan, cos, sin
from pathlib import Path
from typing import Any, NamedTuple, Optional, Tuple, Union
from xml.etree import ElementTree
from reportlab.lib.colors import black, cyan, magenta, red
from reportlab.lib.units import inch
from reportlab.pdfgen.canvas import Canvas
# According to Wikipedia these languages are supported in the ISO-8859-1 character
# set, meaning reportlab can generate them and they are compatible with hocr,
# assuming Tesseract has the necessary languages installed. Note that there may
# not be language packs for them.
HOCR_OK_LANGS = frozenset(
[
# Languages fully covered by Latin-1:
'afr', # Afrikaans
'alb', # Albanian
'ast', # Leonese
'baq', # Basque
'bre', # Breton
'cos', # Corsican
'eng', # English
'eus', # Basque
'fao', # Faoese
'gla', # Scottish Gaelic
'glg', # Galician
'glv', # Manx
'ice', # Icelandic
'ind', # Indonesian
'isl', # Icelandic
'ita', # Italian
'ltz', # Luxembourgish
'mal', # Malay Rumi
'mga', # Irish
'nor', # Norwegian
'oci', # Occitan
'por', # Portugeuse
'roh', # Romansh
'sco', # Scots
'sma', # Sami
'spa', # Spanish
'sqi', # Albanian
'swa', # Swahili
'swe', # Swedish
'tgl', # Tagalog
'wln', # Walloon
# Languages supported by Latin-1 except for a few rare characters that OCR
# is probably not trained to recognize anyway:
'cat', # Catalan
'cym', # Welsh
'dan', # Danish
'deu', # German
'dut', # Dutch
'est', # Estonian
'fin', # Finnish
'fra', # French
'hun', # Hungarian
'kur', # Kurdish
'nld', # Dutch
'wel', # Welsh
]
)
Element = ElementTree.Element
class Rect(NamedTuple): # pylint: disable=inherit-non-class
"""A rectangle for managing PDF coordinates."""
x1: Any
y1: Any
x2: Any
y2: Any
class HocrTransformError(Exception):
pass
class HocrTransform:
"""
A class for converting documents from the hOCR format.
For details of the hOCR format, see:
http://kba.cloud/hocr-spec/
"""
box_pattern = re.compile(r'bbox((\s+\d+){4})')
baseline_pattern = re.compile(
r'''
baseline \s+
([\-\+]?\d*\.?\d*) \s+ # +/- decimal float
([\-\+]?\d+) # +/- int''',
re.VERBOSE,
)
ligatures = str.maketrans(
{'ff': 'ff', 'ffi': 'ffi', 'ffl': 'ffl', 'fi': 'fi', 'fl': 'fl'}
)
def __init__(self, *, hocr_filename: Union[str, Path], dpi: float):
self.dpi = dpi
self.hocr = ElementTree.parse(os.fspath(hocr_filename))
# if the hOCR file has a namespace, ElementTree requires its use to
# find elements
matches = re.match(r'({.*})html', self.hocr.getroot().tag)
self.xmlns = ''
if matches:
self.xmlns = matches.group(1)
# get dimension in pt (not pixel!!!!) of the OCRed image
self.width, self.height = None, None
for div in self.hocr.findall(self._child_xpath('div', 'ocr_page')):
coords = self.element_coordinates(div)
pt_coords = self.pt_from_pixel(coords)
self.width = pt_coords.x2 - pt_coords.x1
self.height = pt_coords.y2 - pt_coords.y1
# there shouldn't be more than one, and if there is, we don't want
# it
break
if self.width is None or self.height is None:
raise HocrTransformError("hocr file is missing page dimensions")
def __str__(self): # pragma: no cover
"""
Return the textual content of the HTML body
"""
if self.hocr is None:
return ''
body = self.hocr.find(self._child_xpath('body'))
if body:
return self._get_element_text(body)
else:
return ''
def _get_element_text(self, element: Element):
"""
Return the textual content of the element and its children
"""
text = ''
if element.text is not None:
text += element.text
for child in element:
text += self._get_element_text(child)
if element.tail is not None:
text += element.tail
return text
@classmethod
def element_coordinates(cls, element: Element) -> Rect:
"""
Returns a tuple containing the coordinates of the bounding box around
an element
"""
out = Rect._make(0 for _ in range(4))
if 'title' in element.attrib:
matches = cls.box_pattern.search(element.attrib['title'])
if matches:
coords = matches.group(1).split()
out = Rect._make(int(coords[n]) for n in range(4))
return out
@classmethod
def baseline(cls, element: Elem | ent) -> Tuple[float, float]:
"""
Returns a tuple containing the baseline slope and intercept.
"""
if 'title' in element.attrib:
matches = cls.baseline_pattern.search(element.attrib['title'])
if matches:
return float(matches.group(1)), int(matches.group(2))
return (0.0, 0.0)
def pt_from_pixel(self, pxl) -> Rect:
"""
Returns the quantity in P | DF units (pt) given quantity in pixels
"""
return Rect._make((c / self.dpi * inch) for c in pxl)
def _child_xpath(self, html_tag: str, html_class: Optional[str] = None) -> str:
xpath = f".//{self.xmlns}{html_tag}"
if html_class:
xpath += f"[@class='{html_class}']"
return xpath
@classmethod
def replace_unsupported_chars(cls, s: str) -> str:
"""
Given an input string, returns the corresponding string that:
* is available in the Helvetica facetype
* does not contain any ligature (to allow easy search in the PDF file)
"""
return s.translate(cls.ligatures)
def topdown_position(self, element):
pxl_line_coords = self.element_coordinates(element)
line_box = self.pt_from_pixel(pxl_line_coords)
# Coordinates here are still in the hocr coordinate system, so 0 on the y axis
# is the top of the page and increasing values of y will move towards the
# bottom of the page.
return line_box.y2
def to_pdf(
self,
*,
out_filename: Path,
image_filename: Optional[Path] = None,
show_bounding_boxes: bool = False,
fontname: str = "Helvetica",
invisible_text: bool = False,
interword_spaces: bool = False,
|
nagyistoce/devide | modules/vtk_basic/vtkFeatureEdges.py | Python | bsd-3-clause | 485 | 0.002062 | # class generated by DeVIDE::createDeVIDEModuleFromVTKObject
from module_kits.vtk_kit.mixins import SimpleVTKClassModuleBase
import vtk
class vtkFeatureEdges(SimpleVTKClassModuleBase):
def __init__(self, module_manager):
SimpleVTKClassModuleBase.__init__(
self, module_manager,
vtk.vtkFeatureEdges(), 'Processing.',
('vtkPolyD | ata',), ('vtkPolyData',),
| replaceDoc=True,
inputFunctions=None, outputFunctions=None)
|
zearom32/SmartBooks | books/admin.py | Python | mit | 402 | 0.032338 | from django.contrib import admin
from books.models import *
# Register your models here.
# Register your models here.
class BookInfoAdmin(admin.ModelAdmin):
list_display | =('title','isbn','price')
class GoodInfoAdmin(admin.ModelAdmin):
list_display= | ('seller','book','price')
admin.site.register(UserInfo)
admin.site.register(BookInfo,BookInfoAdmin)
admin.site.register(GoodsInfo,GoodInfoAdmin)
|
mastizada/kuma | kuma/wiki/forms.py | Python | mpl-2.0 | 19,906 | 0.000301 | import re
from tower import ugettext_lazy as _lazy
from tower import ugettext as _
from django import forms
from django.conf import settings
from django.forms.widgets import CheckboxSelectMultiple
from kuma.contentflagging.forms import ContentFlagForm
import kuma.wiki.content
from kuma.core.form_fields import StrippedCharField
from .constants import (SLUG_CLEANSING_REGEX, REVIEW_FLAG_TAGS,
LOCALIZATION_FLAG_TAGS, RESERVED_SLUGS)
from .models import (Document, Revision,
valid_slug_parent)
TITLE_REQUIRED = _lazy(u'Please provide a title.')
TITLE_SHORT = _lazy(u'The title is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
TITLE_LONG = _lazy(u'Please keep the length of the title to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
TITLE_PLACEHOLDER = _lazy(u'Name Your Article')
SLUG_REQUIRED = _lazy(u'Please provide a slug.')
SLUG_INVALID = _lazy(u'The slug provided is not valid.')
SLUG_SHORT = _lazy(u'The slug is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SLUG_LONG = _lazy(u'Please keep the length of the slug to %(limit_value)s '
u'characters or less. It is currently %(show_value)s '
u'characters.')
SUMMARY_REQUIRED = _lazy(u'Please provide a summary.')
SUMMARY_SHORT = _lazy(u'The summary is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
SUMMARY_LONG = _lazy(u'Please keep the length of the summary to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
CONTENT_REQUIRED = _lazy(u'Please provide content.')
CONTENT_SHORT = _lazy(u'The content is too short (%(show_value)s characters). '
u'It must be at least %(limit_value)s characters.')
CONTENT_LONG = _lazy(u'Please keep the length of the content to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
COMMENT_LONG = _lazy(u'Please keep the length of the comment to '
u'%(limit_value)s characters or less. It is currently '
u'%(show_value)s characters.')
SLUG_COLLIDES = _lazy(u'Another document with this slug already exists.')
OTHER_COLLIDES = _lazy(u'Another document with this metadata already exists.')
MIDAIR_COLLISION = _lazy(u'This document was modified while you were '
'editing it.')
MOVE_REQUIRED = _lazy(u"Changing this document's slug requires "
u"moving it and its children.")
class DocumentForm(forms.ModelForm):
"""Form to create/edit a document."""
title = StrippedCharField(min_length=1, max_length=255,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TI | TLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
| widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
category = forms.ChoiceField(choices=Document.CATEGORIES,
initial=10,
# Required for non-translations, which is
# enforced in Document.clean().
required=False,
label=_lazy(u'Category:'),
help_text=_lazy(u'Type of article'),
widget=forms.HiddenInput())
parent_topic = forms.ModelChoiceField(queryset=Document.objects.all(),
required=False,
label=_lazy(u'Parent:'))
locale = forms.CharField(widget=forms.HiddenInput())
def clean_slug(self):
slug = self.cleaned_data['slug']
if slug == '':
# Default to the title, if missing.
slug = self.cleaned_data['title']
# "?", " ", quote disallowed in slugs altogether
if '?' in slug or ' ' in slug or '"' in slug or "'" in slug:
raise forms.ValidationError(SLUG_INVALID)
# Pattern copied from urls.py
if not re.compile(r'^[^\$]+$').match(slug):
raise forms.ValidationError(SLUG_INVALID)
# Guard against slugs that match urlpatterns
for pat in RESERVED_SLUGS:
if re.compile(pat).match(slug):
raise forms.ValidationError(SLUG_INVALID)
return slug
class Meta:
model = Document
fields = ('title', 'slug', 'category', 'locale')
def save(self, parent_doc, **kwargs):
"""Persist the Document form, and return the saved Document."""
doc = super(DocumentForm, self).save(commit=False, **kwargs)
doc.parent = parent_doc
if 'parent_topic' in self.cleaned_data:
doc.parent_topic = self.cleaned_data['parent_topic']
doc.save()
# not strictly necessary since we didn't change
# any m2m data since we instantiated the doc
self.save_m2m()
return doc
class RevisionForm(forms.ModelForm):
"""Form to create new revisions."""
title = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(
attrs={'placeholder': TITLE_PLACEHOLDER}),
label=_lazy(u'Title:'),
help_text=_lazy(u'Title of article'),
error_messages={'required': TITLE_REQUIRED,
'min_length': TITLE_SHORT,
'max_length': TITLE_LONG})
slug = StrippedCharField(min_length=1, max_length=255,
required=False,
widget=forms.TextInput(),
label=_lazy(u'Slug:'),
help_text=_lazy(u'Article URL'),
error_messages={'required': SLUG_REQUIRED,
'min_length': SLUG_SHORT,
'max_length': SLUG_LONG})
tags = StrippedCharField(required=False,
label=_lazy(u'Tags:'))
keywords = StrippedCharField(required=False,
label=_lazy(u'Keywords:'),
help_text=_lazy(u'Affects search results'))
summary = StrippedCharField(required=False,
min_length=5, max_length=1000, widget=forms.Textarea(),
label=_lazy(u'Search result summary:'),
help_text=_lazy(u'Only displayed on search results page'),
error_messages={'required': SUMMARY_REQUIRED,
'min_length': SUMMARY_SHORT,
'max_length': SUMMARY_LONG})
content = StrippedCharField(
min_length=5, max_length=300000,
label=_lazy(u'Content:'),
widget=forms.Textarea(),
error_messages={'required': CONTENT_REQUIRED,
'min_length': CONTENT_SHORT,
'max_length': CONTENT_LONG})
comment = StrippedCharField(required=F |
jstitch/gift_circle | gift_circle/config.py | Python | gpl-3.0 | 273 | 0.014652 | #
email = {
"email_server" : "localhost",
"email_from_addr" : "email@example.com",
}
twilio = {
"account_sid": "twilio_account_sid",
"auth_token" : "twilio_auth_token",
"from_number": "twilio_from_number",
"cod | e" | : "intl_code",
}
|
mppmu/secdec | pySecDec/integral_interface.py | Python | gpl-3.0 | 37,101 | 0.013369 | """
Integral Interface
------------------
An interface to libraries generated by
:func:`pySecDec.code_writer.make_package` or
:func:`pySecDec.loop_integral.loop_package`.
"""
from ctypes import CDLL, c_void_p, c_char_p, c_bool, c_int, c_uint, c_longlong, c_double, c_ulonglong
from threading import Thread
try:
from Queue import Queue
except ImportError:
from queue import Queue
# assuming
# enum qmc_transform_t : int
# {
# no_transform = -1,
#
# baker = -2,
#
# korobov1x1 = 1, korobov1x2 = 2, korobov1x3 = 3, korobov1x4 = 4, korobov1x5 = 5, korobov1x6 = 6,
# korobov2x1 = 7, korobov2x2 = 8, korobov2x3 = 9, korobov2x4 = 10, korobov2x5 = 11, korobov2x6 = 12,
# korobov3x1 = 13, korobov3x2 = 14, korobov3x3 = 15, korobov3x4 = 16, korobov3x5 = 17, korobov3x6 = 18,
# korobov4x1 = 19, korobov4x2 = 20, korobov4x3 = 21, korobov4x4 = 22, korobov4x5 = 23, korobov4x6 = 24,
# korobov5x1 = 25, korobov5x2 = 26, korobov5x3 = 27, korobov5x4 = 28, korobov5x5 = 29, korobov5x6 = 30,
# korobov6x1 = 31, korobov6x2 = 32, korobov6x3 = 33, korobov6x4 = 34, korobov6x5 = 35, korobov6x6 = 36,
#
# sidi1 = -11,
# sidi2 = -12,
# sidi3 = -13,
# sidi4 = -14,
# sidi5 = -15,
# sidi6 = -16
# };
known_qmc_transforms = dict(
none = -1,
baker = -2,
korobov1x1 = 1, korobov1x2 = 2, korobov1x3 = 3, korobov1x4 = 4, korobov1x5 = 5, korobov1x6 = 6,
korobov2x1 = 7, korobov2x2 = 8, korobov2x3 = 9, korobov2x4 = 10, korobov2x5 = 11, korobov2x6 = 12,
korobov3x1 = 13, korobov3x2 = 14, korobov3x3 = 15, korobov3x4 = 16, korobov3x5 = 17, korobov3x6 = 18,
korobov4x1 = 19, korobov4x2 = 20, korobov4x3 = 21, korobov4x4 = 22, korobov4x5 = 23, korobov4x6 = 24,
korobov5x1 = 25, korobov5x2 = 26, korobov5x3 = 27, korobov5x4 = 28, korobov5x5 = 29, korobov5x6 = 30,
korobov6x1 = 31, korobov6x2 = 32, korobov6x3 = 33, korobov6x4 = 34, korobov6x5 = 35, korobov6x6 = 36,
sidi1 = -11,
sidi2 = -12,
sidi3 = -13,
sidi4 = -14,
sidi5 = -15,
sidi6 = -16
)
for i in range(1,7):
known_qmc_transforms['korobov' + str(i)] = known_qmc_transforms['korobov%ix%i'%(i,i)]
# assuming
# enum qmc_fitfunction_t : int
# {
# default_fitfunction = 0,
#
# none = -1,
# polysingular = 1
# };
known_qmc_fitfunctions = dict(
default = 0,
none = -1,
polysingular = 1
)
# assuming
# enum qmc_generatingvectors_t: int
# {
# default_generatingvectors = 0,
#
# cbcpt_dn1_100 = 1,
# cbcpt_dn2_6 = 2,
# cbcpt_cfftw1_6 = 3
# };
known_qmc_generatingvectors = dict(
default = 0,
cbcpt_dn1_100 = 1,
cbcpt_dn2_6 = 2,
cbcpt_cfftw1_6 = 3
)
class CPPIntegrator(object):
'''
Abstract base class for integrators to be used with
an :class:`.IntegralLibrary`.
This class holds a pointer to the c++ integrator and
defines the destructor.
'''
def __del__(self):
if hasattr(self, 'c_integrator_ptr'):
self.c_lib.free_integrator.restype = None
self.c_lib.free_integrator.argtypes = [c_void_p]
self.c_lib.free_integrator(self.c_integrator_ptr)
class MultiIntegrator(CPPIntegrator):
'''
.. versionadded:: 1.3.1
Wrapper for the :cpp:class:`secdecutil::MultiIntegrator`.
:param integral_library:
:class:`IntegralLibrary`;
The integral to be computed with this integrator.
:param low_dim_integrator:
:class:`CPPIntegrator`;
The integrator to be used if the integrand is lower
dimensional than `critical_dim`.
:param high_dim_integrator:
:class:`CPPIntegrator`;
The integrator to be used if the integrand has dimension
`critical_dim` or higher.
:param critical_dim:
integer;
The dimension below which the `low_dimensional_integrator`
is used.
Use this class to switch between integrators based on the
dimension of the integrand when integrating the `integral_ibrary`.
For example, ":class:`CQuad` for 1D and :class:`Vegas` otherwise"
is implemented as::
integral_library.integrator = MultiIntegrator(integral_library,CQuad(integral_library),Vegas(integral_library),2)
:class:`MultiIntegrator` can be nested to implement multiple
critical dimensions. To use e.g. :class:`CQuad` for 1D,
:class:`Cuhre` for 2D and 3D, and :class:`Vegas` otherwise, do::
integral_library.integrator = MultiIntegrator(integral_library,CQuad(integral_library),MultiIntegrator(integral_library,Cuhre(integral_library),Vegas(integral_library),4),2)
.. warning::
The `integral_library` passed to the integrators must be the
same for all of them. Furthermore, an integrator can only be
used to integrate the `integral_library` it has beeen
co | nstructed with.
.. warning::
The :class:`MultiIntegrator` cannot be used with :class:`.CudaQmc`.
'''
def __init__(self,integral_library,low_dim_integrator,high_dim_integrator,critical_dim):
self.low_dim_integrator = low_dim_integrator # keep reference to avoid deallocation
self.high_dim_integrator = high_dim_in | tegrator # keep reference to avoid deallocation
self.c_lib = integral_library.c_lib
self.c_lib.allocate_MultiIntegrator.restype = c_void_p
self.c_lib.allocate_MultiIntegrator.argtypes = [c_void_p, c_void_p, c_int]
self.c_integrator_ptr = self.c_lib.allocate_MultiIntegrator(low_dim_integrator.c_integrator_ptr,high_dim_integrator.c_integrator_ptr,critical_dim)
class CQuad(CPPIntegrator):
'''
Wrapper for the cquad integrator defined in the gsl
library.
:param integral_library:
:class:`IntegralLibrary`;
The integral to be computed with this integrator.
The other options are defined in :numref:`chapter_cpp_cquad`
and in the gsl manual.
'''
def __init__(self,integral_library,epsrel=1e-2,epsabs=1e-7,n=100,verbose=False,zero_border=0.0):
self.c_lib = integral_library.c_lib
self.c_lib.allocate_gsl_cquad.restype = c_void_p
self.c_lib.allocate_gsl_cquad.argtypes = [c_double, c_double, c_uint, c_bool, c_double]
self.c_integrator_ptr = self.c_lib.allocate_gsl_cquad(epsrel,epsabs,n,verbose,zero_border)
class Vegas(CPPIntegrator):
'''
Wrapper for the Vegas integrator defined in the cuba
library.
:param integral_library:
:class:`IntegralLibrary`;
The integral to be computed with this integrator.
The other options are defined in :numref:`chapter_cpp_cuba`
and in the cuba manual.
'''
def __init__(self,integral_library,epsrel=1e-2,epsabs=1e-7,flags=0,seed=0,mineval=0,maxeval=10**6,zero_border=0.0,nstart=10000,nincrease=5000,nbatch=1000,real_complex_together=False):
self.c_lib = integral_library.c_lib
self.c_lib.allocate_cuba_Vegas.restype = c_void_p
self.c_lib.allocate_cuba_Vegas.argtypes = [c_double, c_double, c_int, c_int, c_longlong, c_longlong, c_double, c_longlong, c_longlong, c_longlong, c_bool]
self.c_integrator_ptr = self.c_lib.allocate_cuba_Vegas(epsrel,epsabs,flags,seed,mineval,maxeval,zero_border,nstart,nincrease,nbatch,real_complex_together)
class Suave(CPPIntegrator):
'''
Wrapper for the Suave integrator defined in the cuba
library.
:param integral_library:
:class:`IntegralLibrary`;
The integral to be computed with this integrator.
The other options are defined in :numref:`chapter_cpp_cuba`
and in the cuba manual.
'''
def __init__(self,integral_library,epsrel=1e-2,epsabs=1e-7,flags=0,seed=0,mineval=0,maxeval=10**6,zero_border=0.0,nnew=1000,nmin=10,flatness=25.,real_complex_together=False):
self.c_lib = integral_library.c_lib
self.c_lib.allocate_cuba_Suave.restype = c_void_p
self.c_lib.allocate_cuba_Suave.argtypes = [c_double, c_double, c_int, c_int, c_longlong, c_longlong, c_double, c_longlong, c_longlong, c_double, c_bool]
self.c_integrator_ptr = self.c_lib.allocate_cuba_Suave(epsrel,epsabs,flags,seed,mineval,maxeval,zero_border,nnew,nmin,flatness,real_complex_together)
class Divonne(CPPIntegrator):
'''
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.