code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Eloipool - Python Bitcoin pool server
# Copyright (C) 2011-2012 Luke Dashjr <luke-jr+eloipool@utopios.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from binascii import b2a_hex
import bitcoin.script
from bitcoin.script import countSigOps
from bitcoin.txn import Txn
from bitcoin.varlen import varlenEncode, varlenDecode
from collections import deque
from copy import deepcopy
from queue import Queue
import jsonrpc
import logging
from math import log
from merkletree import MerkleTree
from struct import pack
import threading
from time import sleep, time
import traceback
import config
_makeCoinbase = [0, 0]
def MakeBlockHeader(MRD):
(merkleRoot, merkleTree, coinbase, prevBlock, bits) = MRD[:5]
timestamp = pack('<L', int(time()))
hdr = b'\2\0\0\0' + prevBlock + merkleRoot + timestamp + bits + b'iolE'
return hdr
def assembleBlock(blkhdr, txlist):
payload = blkhdr
payload += varlenEncode(len(txlist))
for tx in txlist:
payload += tx.data
return payload
class merkleMaker(threading.Thread):
OldGMP = None
GBTCaps = [
'coinbasevalue',
'coinbase/append',
'coinbase',
'generation',
'time',
'transactions/remove',
'prevblock',
]
GBTReq = {
'capabilities': GBTCaps,
}
GMPReq = {
'capabilities': GBTCaps,
'tx': 'obj',
}
def __init__(self, *a, **k):
super().__init__(*a, **k)
self.daemon = True
self.logger = logging.getLogger('merkleMaker')
self.CoinbasePrefix = b'Mined by Multicoin.co'
self.CoinbaseAux = {}
self.isOverflowed = False
self.lastWarning = {}
self.MinimumTxnUpdateWait = 1
self.overflowed = 0
self.DifficultyChangeMod = 1
def _prepare(self):
self.access = jsonrpc.ServiceProxy(self.UpstreamURI)
self.MinimumTxnUpdateWait = 1
self.IdleSleepTime = 1
self.TxnUpdateRetryWait = 1
self.ready = False
self.readyCV = threading.Condition()
self.currentBlock = (None, None, None)
self.lastBlock = (None, None, None)
self.currentMerkleTree = None
self.merkleRoots = deque(maxlen=self.WorkQueueSizeRegular[1])
self.LowestMerkleRoots = self.WorkQueueSizeRegular[1]
if not hasattr(self, 'WorkQueueSizeClear'):
self.WorkQueueSizeClear = self.WorkQueueSizeLongpoll
self._MaxClearSize = max(self.WorkQueueSizeClear[1], self.WorkQueueSizeLongpoll[1])
self.clearMerkleRoots = Queue(self._MaxClearSize)
self.LowestClearMerkleRoots = self.WorkQueueSizeClear[1]
self.nextMerkleRoots = Queue(self._MaxClearSize)
if not hasattr(self, 'WarningDelay'):
self.WarningDelay = max(15, self.MinimumTxnUpdateWait * 2)
if not hasattr(self, 'WarningDelayTxnLongpoll'):
self.WarningDelayTxnLongpoll = self.WarningDelay
if not hasattr(self, 'WarningDelayMerkleUpdate'):
self.WarningDelayMerkleUpdate = self.WarningDelay
self.lastMerkleUpdate = 0
self.nextMerkleUpdate = 0
def createClearMerkleTree(self, height):
subsidy = self.access.getblocktemplate()['coinbasevalue']
cbtxn = self.makeCoinbaseTxn(subsidy, False)
cbtxn.assemble()
return MerkleTree([cbtxn])
def updateBlock(self, newBlock, height = None, bits = None, _HBH = None):
if newBlock == self.currentBlock[0]:
if height in (None, self.currentBlock[1]) and bits in (None, self.currentBlock[2]):
return
if not self.currentBlock[2] is None:
self.logger.error('Was working on block with wrong specs: %s (height: %d->%d; bits: %s->%s' % (
b2a_hex(newBlock[::-1]).decode('utf8'),
self.currentBlock[1],
height,
b2a_hex(self.currentBlock[2][::-1]).decode('utf8'),
b2a_hex(bits[::-1]).decode('utf8'),
))
# Old block is invalid
if self.currentBlock[0] != newBlock:
self.lastBlock = self.currentBlock
lastHeight = self.currentBlock[1]
if height is None:
height = self.currentBlock[1] + 1
if bits is None:
if height % self.DifficultyChangeMod == 1 or self.currentBlock[2] is None:
self.logger.warning('New block: %s (height %d; bits: UNKNOWN)' % (b2a_hex(newBlock[::-1]).decode('utf8'), height))
# Pretend to be 1 lower height, so we possibly retain nextMerkleRoots
self.currentBlock = (None, height - 1, None)
self.clearMerkleRoots = Queue(0)
self.merkleRoots.clear()
self.ready = False
return
else:
bits = self.currentBlock[2]
if _HBH is None:
_HBH = (b2a_hex(newBlock[::-1]).decode('utf8'), b2a_hex(bits[::-1]).decode('utf8'))
self.logger.info('New block: %s (height: %d; bits: %s)' % (_HBH[0], height, _HBH[1]))
self.currentBlock = (newBlock, height, bits)
if lastHeight != height:
# TODO: Perhaps reuse clear merkle trees more intelligently
if lastHeight == height - 1:
self.curClearMerkleTree = self.nextMerkleTree
self.clearMerkleRoots = self.nextMerkleRoots
self.logger.debug('Adopting next-height clear merkleroots :)')
else:
if lastHeight:
self.logger.warning('Change from height %d->%d; no longpoll merkleroots available!' % (lastHeight, height))
self.curClearMerkleTree = self.createClearMerkleTree(height)
self.clearMerkleRoots = Queue(self.WorkQueueSizeClear[1])
self.nextMerkleTree = self.createClearMerkleTree(height + 1)
self.nextMerkleRoots = Queue(self._MaxClearSize)
else:
self.logger.debug('Already using clear merkleroots for this height')
self.currentMerkleTree = self.curClearMerkleTree
self.merkleRoots.clear()
if not self.ready:
self.ready = True
with self.readyCV:
self.readyCV.notify_all()
self.needMerkle = 2
self.onBlockChange()
def _trimBlock(self, MP, txnlist, txninfo, floodn, msgf):
fee = txninfo[-1].get('fee', None)
if fee is None:
raise self._floodCritical(now, floodn, doin=msgf('fees unknown'))
if fee:
# FIXME: coinbasevalue is *not* guaranteed to exist here
MP['coinbasevalue'] -= fee
txnlist[-1:] = ()
txninfo[-1:] = ()
return True
# Aggressive "Power Of Two": Remove transactions even with fees to reach our goal
def _APOT(self, txninfopot, MP, POTInfo):
feeTxnsTrimmed = 0
feesTrimmed = 0
for txn in txninfopot:
if txn.get('fee') is None:
self._floodWarning(now, 'APOT-No-Fees', doin='Upstream didn\'t provide fee information required for aggressive POT', logf=self.logger.info)
return
if not txn['fee']:
continue
feesTrimmed += txn['fee']
feeTxnsTrimmed += 1
MP['coinbasevalue'] -= feesTrimmed
POTInfo[2] = [feeTxnsTrimmed, feesTrimmed]
self._floodWarning(now, 'POT-Trimming-Fees', doin='Aggressive POT trimming %d transactions with %d.%08d BTC total fees' % (feeTxnsTrimmed, feesTrimmed//100000000, feesTrimmed % 100000000), logf=self.logger.debug)
return True
def _makeBlockSafe(self, MP, txnlist, txninfo):
blocksize = sum(map(len, txnlist)) + 80
while blocksize > 934464: # 1 "MB" limit - 64 KB breathing room
txnsize = len(txnlist[-1])
self._trimBlock(MP, txnlist, txninfo, 'SizeLimit', lambda x: 'Making blocks over 1 MB size limit (%d bytes; %s)' % (blocksize, x))
blocksize -= txnsize
# NOTE: This check doesn't work at all without BIP22 transaction obj format
blocksigops = sum(a.get('sigops', 0) for a in txninfo)
while blocksigops > 19488: # 20k limit - 0x200 breathing room
txnsigops = txninfo[-1]['sigops']
self._trimBlock(MP, txnlist, txninfo, 'SigOpLimit', lambda x: 'Making blocks over 20k SigOp limit (%d; %s)' % (blocksigops, x))
blocksigops -= txnsigops
# Aim to produce blocks with "Power Of Two" transaction counts
# This helps avoid any chance of someone abusing CVE-2012-2459 with them
POTMode = getattr(self, 'POT', 0)
txncount = len(txnlist) + 1
if POTMode:
feetxncount = txncount
for i in range(txncount - 2, -1, -1):
if 'fee' not in txninfo[i] or txninfo[i]['fee']:
break
feetxncount -= 1
if getattr(self, 'Greedy', None):
# Aim to cut off extra zero-fee transactions on the end
# NOTE: not cutting out ones intermixed, in case of dependencies
idealtxncount = feetxncount
else:
idealtxncount = txncount
pot = 2**int(log(idealtxncount, 2))
POTInfo = MP['POTInfo'] = [[idealtxncount, feetxncount, txncount], [pot, None], None]
if pot < idealtxncount:
if pot * 2 <= txncount:
pot *= 2
elif pot >= feetxncount:
pass
elif POTMode > 1 and self._APOT(txninfo[pot-1:], MP, POTInfo):
# Trimmed even transactions with fees
pass
else:
pot = idealtxncount
self._floodWarning(now, 'Non-POT', doin='Making merkle tree with %d transactions (ideal: %d; max: %d)' % (pot, idealtxncount, txncount))
POTInfo[1][1] = pot
pot -= 1
txnlist[pot:] = ()
txninfo[pot:] = ()
def updateMerkleTree(self):
global now
self.logger.debug('Polling bitcoind for memorypool')
self.nextMerkleUpdate = now + self.TxnUpdateRetryWait
try:
# First, try BIP 22 standard getblocktemplate :)
MP = self.access.getblocktemplate(self.GBTReq)
self.OldGMP = False
except:
try:
# Failing that, give BIP 22 draft (2012-02 through 2012-07) getmemorypool a chance
MP = self.access.getmemorypool(self.GMPReq)
except:
try:
# Finally, fall back to bitcoind 0.5/0.6 getmemorypool
MP = self.access.getmemorypool()
except:
MP = False
if MP is False:
# This way, we get the error from the BIP22 call if the old one fails too
raise
# Pre-BIP22 server (bitcoind <0.7 or Eloipool <20120513)
if not self.OldGMP:
self.OldGMP = True
self.logger.warning('Upstream server is not BIP 22 compatible')
oMP = deepcopy(MP)
prevBlock = bytes.fromhex(MP['previousblockhash'])[::-1]
if 'height' in MP:
height = MP['height']
else:
height = self.access.getinfo()['blocks'] + 1
bits = bytes.fromhex(MP['bits'])[::-1]
if (prevBlock, height, bits) != self.currentBlock:
self.updateBlock(prevBlock, height, bits, _HBH=(MP['previousblockhash'], MP['bits']))
txnlist = MP['transactions']
if len(txnlist) and isinstance(txnlist[0], dict):
txninfo = txnlist
txnlist = tuple(a['data'] for a in txnlist)
txninfo.insert(0, {
})
elif 'transactionfees' in MP:
# Backward compatibility with pre-BIP22 gmp_fees branch
txninfo = [{'fee':a} for a in MP['transactionfees']]
else:
# Backward compatibility with pre-BIP22 hex-only (bitcoind <0.7, Eloipool <future)
txninfo = [{}] * len(txnlist)
# TODO: cache Txn or at least txid from previous merkle roots?
txnlist = [a for a in map(bytes.fromhex, txnlist)]
self._makeBlockSafe(MP, txnlist, txninfo)
cbtxn = self.makeCoinbaseTxn(MP['coinbasevalue'])
cbtxn.setCoinbase(b'\0\0')
cbtxn.assemble()
txnlist.insert(0, cbtxn.data)
txnlist = [a for a in map(Txn, txnlist[1:])]
txnlist.insert(0, cbtxn)
txnlist = list(txnlist)
newMerkleTree = MerkleTree(txnlist)
if newMerkleTree.merkleRoot() != self.currentMerkleTree.merkleRoot():
newMerkleTree.POTInfo = MP.get('POTInfo')
newMerkleTree.oMP = oMP
if (not self.OldGMP) and 'proposal' in MP.get('capabilities', ()):
(prevBlock, height, bits) = self.currentBlock
coinbase = self.makeCoinbase(height=height)
cbtxn.setCoinbase(coinbase)
cbtxn.assemble()
merkleRoot = newMerkleTree.merkleRoot()
MRD = (merkleRoot, newMerkleTree, coinbase, prevBlock, bits)
blkhdr = MakeBlockHeader(MRD)
data = assembleBlock(blkhdr, txnlist)
propose = self.access.getblocktemplate({
"mode": "proposal",
"data": b2a_hex(data).decode('utf8'),
})
if propose is None:
self.logger.debug('Updating merkle tree (upstream accepted proposal)')
self.currentMerkleTree = newMerkleTree
else:
self.RejectedProposal = (newMerkleTree, propose)
try:
propose = propose['reject-reason']
except:
pass
self.logger.error('Upstream rejected proposed block: %s' % (propose,))
else:
self.logger.debug('Updating merkle tree (no proposal support)')
self.currentMerkleTree = newMerkleTree
self.lastMerkleUpdate = now
self.nextMerkleUpdate = now + self.MinimumTxnUpdateWait
if self.needMerkle == 2:
self.needMerkle = 1
self.needMerkleSince = now
def makeCoinbase(self, height):
now = int(time())
if now > _makeCoinbase[0]:
_makeCoinbase[0] = now
_makeCoinbase[1] = 0
else:
_makeCoinbase[1] += 1
rv = self.CoinbasePrefix
rv += pack('>L', now) + pack('>Q', _makeCoinbase[1]).lstrip(b'\0')
# NOTE: Not using varlenEncode, since this is always guaranteed to be < 100
rv = bytes( (len(rv),) ) + rv
for v in self.CoinbaseAux.values():
rv += v
if len(rv) > 95:
t = time()
if self.overflowed < t - 300:
self.logger.warning('Overflowing coinbase data! %d bytes long' % (len(rv),))
self.overflowed = t
self.isOverflowed = True
rv = rv[:95]
else:
self.isOverflowed = False
rv = bitcoin.script.encodeUNum(height) + rv
return rv
def makeMerkleRoot(self, merkleTree, height):
cbtxn = merkleTree.data[0]
cb = self.makeCoinbase(height=height)
cbtxn.setCoinbase(cb)
cbtxn.assemble()
merkleRoot = merkleTree.merkleRoot()
return (merkleRoot, merkleTree, cb)
_doing_last = None
def _doing(self, what):
if self._doing_last == what:
self._doing_i += 1
return
global now
if self._doing_last:
self.logger.debug("Switching from (%4dx in %5.3f seconds) %s => %s" % (self._doing_i, now - self._doing_s, self._doing_last, what))
self._doing_last = what
self._doing_i = 1
self._doing_s = now
def _floodWarning(self, now, wid, wmsgf = None, doin = True, logf = None):
if doin is True:
doin = self._doing_last
def a(f = wmsgf):
return lambda: "%s (doing %s)" % (f(), doin)
wmsgf = a()
winfo = self.lastWarning.setdefault(wid, [0, None])
(lastTime, lastDoing) = winfo
if now <= lastTime + max(5, self.MinimumTxnUpdateWait):
return
winfo[0] = now
nowDoing = doin
winfo[1] = nowDoing
if logf is None:
logf = self.logger.warning
logf(wmsgf() if wmsgf else doin)
def _makeOne(self, putf, merkleTree, height):
MT = self.currentMerkleTree
height = self.currentBlock[1]
MR = self.makeMerkleRoot(MT, height=height)
# Only add it if the height hasn't changed in the meantime, to avoid a race
if self.currentBlock[1] == height:
putf(MR)
def makeClear(self):
self._doing('clear merkle roots')
self._makeOne(self.clearMerkleRoots.put, self.curClearMerkleTree, height=self.currentBlock[1])
def makeNext(self):
self._doing('longpoll merkle roots')
self._makeOne(self.nextMerkleRoots.put, self.nextMerkleTree, height=self.currentBlock[1] + 1)
def makeRegular(self):
self._doing('regular merkle roots')
self._makeOne(self.merkleRoots.append, self.currentMerkleTree, height=self.currentBlock[1])
def merkleMaker_II(self):
global now
# No bits = no mining :(
if not self.ready:
return self.updateMerkleTree()
# First, ensure we have the minimum clear, next, and regular (in that order)
if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[0]:
return self.makeClear()
if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[0]:
return self.makeNext()
if len(self.merkleRoots) < self.WorkQueueSizeRegular[0]:
return self.makeRegular()
# If we've met the minimum requirements, consider updating the merkle tree
if self.nextMerkleUpdate <= now:
return self.updateMerkleTree()
# Finally, fill up clear, next, and regular until we've met the maximums
if self.clearMerkleRoots.qsize() < self.WorkQueueSizeClear[1]:
return self.makeClear()
if self.nextMerkleRoots.qsize() < self.WorkQueueSizeLongpoll[1]:
return self.makeNext()
if len(self.merkleRoots) < self.WorkQueueSizeRegular[1] or self.merkleRoots[0][1] != self.currentMerkleTree:
return self.makeRegular()
# Nothing left to do, fire onBlockUpdate event (if appropriate) and sleep
if self.needMerkle == 1:
self.onBlockUpdate()
self.needMerkle = False
self._doing('idle')
# TODO: rather than sleepspin, block until MinimumTxnUpdateWait expires or threading.Condition(?)
sleep(self.IdleSleepTime)
def merkleMaker_I(self):
global now
now = time()
self.merkleMaker_II()
if self.needMerkle == 1 and now > self.needMerkleSince + self.WarningDelayTxnLongpoll:
self._floodWarning(now, 'NeedMerkle', lambda: 'Transaction-longpoll requested %d seconds ago, and still not ready. Is your server fast enough to keep up with your configured WorkQueueSizeRegular maximum?' % (now - self.needMerkleSince,))
if now > self.nextMerkleUpdate + self.WarningDelayMerkleUpdate:
self._floodWarning(now, 'MerkleUpdate', lambda: "Haven't updated the merkle tree in at least %d seconds! Is your server fast enough to keep up with your configured work queue minimums?" % (now - self.lastMerkleUpdate,))
def run(self):
while True:
try:
self.merkleMaker_I()
except:
self.logger.critical(traceback.format_exc())
def start(self, *a, **k):
self._prepare()
super().start(*a, **k)
def getMRD(self):
try:
MRD = self.merkleRoots.pop()
self.LowestMerkleRoots = min(len(self.merkleRoots), self.LowestMerkleRoots)
rollPrevBlk = False
except IndexError:
qsz = self.clearMerkleRoots.qsize()
if qsz < 0x10:
self.logger.warning('clearMerkleRoots running out! only %d left' % (qsz,))
MRD = self.clearMerkleRoots.get()
self.LowestClearMerkleRoots = min(self.clearMerkleRoots.qsize(), self.LowestClearMerkleRoots)
rollPrevBlk = True
(merkleRoot, merkleTree, cb) = MRD
(prevBlock, height, bits) = self.currentBlock
return (merkleRoot, merkleTree, cb, prevBlock, bits, rollPrevBlk)
def getMC(self, wantClear = False):
if not self.ready:
with self.readyCV:
while not self.ready:
self.readyCV.wait()
(prevBlock, height, bits) = self.currentBlock
mt = self.curClearMerkleTree if wantClear else self.currentMerkleTree
cb = self.makeCoinbase(height=height)
rollPrevBlk = (mt == self.curClearMerkleTree)
return (height, mt, cb, prevBlock, bits, rollPrevBlk)
# merkleMaker tests
def _test():
global now
now = 1337039788
MM = merkleMaker()
reallogger = MM.logger
class fakelogger:
LO = False
def critical(self, *a):
if self.LO > 1: return
reallogger.critical(*a)
def warning(self, *a):
if self.LO: return
reallogger.warning(*a)
def debug(self, *a):
pass
MM.logger = fakelogger()
class NMTClass:
pass
# _makeBlockSafe tests
from copy import deepcopy
MP = {
'coinbasevalue':50,
}
txnlist = [b'\0', b'\x01', b'\x02']
txninfo = [{'fee':0, 'sigops':1}, {'fee':5, 'sigops':10000}, {'fee':0, 'sigops':10001}]
def MBS(LO = 0):
m = deepcopy( (MP, txnlist, txninfo) )
MM.logger.LO = LO
try:
MM._makeBlockSafe(*m)
except:
if LO < 2:
raise
else:
assert LO < 2 # An expected error wasn't thrown
if 'POTInfo' in m[0]:
del m[0]['POTInfo']
return m
MM.POT = 0
assert MBS() == (MP, txnlist[:2], txninfo[:2])
txninfo[2]['fee'] = 1
MPx = deepcopy(MP)
MPx['coinbasevalue'] -= 1
assert MBS() == (MPx, txnlist[:2], txninfo[:2])
txninfo[2]['sigops'] = 1
assert MBS(1) == (MP, txnlist, txninfo)
# APOT tests
MM.POT = 2
txnlist.append(b'\x03')
txninfo.append({'fee':1, 'sigops':0})
MPx = deepcopy(MP)
MPx['coinbasevalue'] -= 1
assert MBS() == (MPx, txnlist[:3], txninfo[:3])
_test()
|
uingei/mm
|
merklemaker.py
|
Python
|
agpl-3.0
| 19,767
|
"""Certificates API
This is a Python API for generating certificates asynchronously.
Other Django apps should use the API functions defined in this module
rather than importing Django models directly.
"""
import logging
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db.models import Q
from opaque_keys.edx.django.models import CourseKeyField
from opaque_keys.edx.keys import CourseKey
from branding import api as branding_api
from lms.djangoapps.certificates.models import (
CertificateGenerationConfiguration,
CertificateGenerationCourseSetting,
CertificateInvalidation,
CertificateStatuses,
CertificateTemplate,
CertificateTemplateAsset,
ExampleCertificateSet,
GeneratedCertificate,
certificate_status_for_student
)
from lms.djangoapps.certificates.queue import XQueueCertInterface
from eventtracking import tracker
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from util.organizations_helpers import get_course_organization_id
from xmodule.modulestore.django import modulestore
log = logging.getLogger("edx.certificate")
MODES = GeneratedCertificate.MODES
def is_passing_status(cert_status):
"""
Given the status of a certificate, return a boolean indicating whether
the student passed the course. This just proxies to the classmethod
defined in models.py
"""
return CertificateStatuses.is_passing_status(cert_status)
def format_certificate_for_user(username, cert):
"""
Helper function to serialize an user certificate.
Arguments:
username (unicode): The identifier of the user.
cert (GeneratedCertificate): a user certificate
Returns: dict
"""
return {
"username": username,
"course_key": cert.course_id,
"type": cert.mode,
"status": cert.status,
"grade": cert.grade,
"created": cert.created_date,
"modified": cert.modified_date,
"is_passing": is_passing_status(cert.status),
# NOTE: the download URL is not currently being set for webview certificates.
# In the future, we can update this to construct a URL to the webview certificate
# for courses that have this feature enabled.
"download_url": (
cert.download_url or get_certificate_url(cert.user.id, cert.course_id)
if cert.status == CertificateStatuses.downloadable
else None
),
}
def get_certificates_for_user(username):
"""
Retrieve certificate information for a particular user.
Arguments:
username (unicode): The identifier of the user.
Returns: list
Example Usage:
>>> get_certificates_for_user("bob")
[
{
"username": "bob",
"course_key": CourseLocator('edX', 'DemoX', 'Demo_Course', None, None),
"type": "verified",
"status": "downloadable",
"download_url": "http://www.example.com/cert.pdf",
"grade": "0.98",
"created": 2015-07-31T00:00:00Z,
"modified": 2015-07-31T00:00:00Z
}
]
"""
return [
format_certificate_for_user(username, cert)
for cert in GeneratedCertificate.eligible_certificates.filter(user__username=username).order_by("course_id")
]
def get_certificate_for_user(username, course_key):
"""
Retrieve certificate information for a particular user for a specific course.
Arguments:
username (unicode): The identifier of the user.
course_key (CourseKey): A Course Key.
Returns: dict
"""
try:
cert = GeneratedCertificate.eligible_certificates.get(
user__username=username,
course_id=course_key
)
except GeneratedCertificate.DoesNotExist:
return None
return format_certificate_for_user(username, cert)
def generate_user_certificates(student, course_key, course=None, insecure=False, generation_mode='batch',
forced_grade=None):
"""
It will add the add-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'. It also emits
`edx.certificate.created` event for analytics.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
insecure - (Boolean)
generation_mode - who has requested certificate generation. Its value should `batch`
in case of django command and `self` if student initiated the request.
forced_grade - a string indicating to replace grade parameter. if present grading
will be skipped.
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
if not course:
course = modulestore().get_course(course_key, depth=0)
generate_pdf = not has_html_certificates_enabled(course)
cert = xqueue.add_cert(
student,
course_key,
course=course,
generate_pdf=generate_pdf,
forced_grade=forced_grade
)
# If cert_status is not present in certificate valid_statuses (for example unverified) then
# add_cert returns None and raises AttributeError while accesing cert attributes.
if cert is None:
return
if CertificateStatuses.is_passing_status(cert.status):
emit_certificate_event('created', student, course_key, course, {
'user_id': student.id,
'course_id': unicode(course_key),
'certificate_id': cert.verify_uuid,
'enrollment_mode': cert.mode,
'generation_mode': generation_mode
})
return cert.status
def regenerate_user_certificates(student, course_key, course=None,
forced_grade=None, template_file=None, insecure=False):
"""
It will add the regen-cert request into the xqueue.
A new record will be created to track the certificate
generation task. If an error occurs while adding the certificate
to the queue, the task will have status 'error'.
Args:
student (User)
course_key (CourseKey)
Keyword Arguments:
course (Course): Optionally provide the course object; if not provided
it will be loaded.
grade_value - The grade string, such as "Distinction"
template_file - The template file used to render this certificate
insecure - (Boolean)
"""
xqueue = XQueueCertInterface()
if insecure:
xqueue.use_https = False
if not course:
course = modulestore().get_course(course_key, depth=0)
generate_pdf = not has_html_certificates_enabled(course)
return xqueue.regen_cert(
student,
course_key,
course=course,
forced_grade=forced_grade,
template_file=template_file,
generate_pdf=generate_pdf
)
def certificate_downloadable_status(student, course_key):
"""
Check the student existing certificates against a given course.
if status is not generating and not downloadable or error then user can view the generate button.
Args:
student (user object): logged-in user
course_key (CourseKey): ID associated with the course
Returns:
Dict containing student passed status also download url, uuid for cert if available
"""
current_status = certificate_status_for_student(student, course_key)
# If the certificate status is an error user should view that status is "generating".
# On the back-end, need to monitor those errors and re-submit the task.
response_data = {
'is_downloadable': False,
'is_generating': True if current_status['status'] in [CertificateStatuses.generating,
CertificateStatuses.error] else False,
'is_unverified': True if current_status['status'] == CertificateStatuses.unverified else False,
'download_url': None,
'uuid': None,
}
may_view_certificate = CourseOverview.get_from_id(course_key).may_certify()
if current_status['status'] == CertificateStatuses.downloadable and may_view_certificate:
response_data['is_downloadable'] = True
response_data['download_url'] = current_status['download_url'] or get_certificate_url(student.id, course_key)
response_data['uuid'] = current_status['uuid']
return response_data
def set_cert_generation_enabled(course_key, is_enabled):
"""Enable or disable self-generated certificates for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
The second flag should be enabled *only* when someone has successfully
generated example certificates for the course. This helps avoid
configuration errors (for example, not having a template configured
for the course installed on the workers). The UI for the instructor
dashboard enforces this constraint.
Arguments:
course_key (CourseKey): The course identifier.
Keyword Arguments:
is_enabled (boolean): If provided, enable/disable self-generated
certificates for this course.
"""
CertificateGenerationCourseSetting.set_self_generatation_enabled_for_course(course_key, is_enabled)
cert_event_type = 'enabled' if is_enabled else 'disabled'
event_name = '.'.join(['edx', 'certificate', 'generation', cert_event_type])
tracker.emit(event_name, {
'course_id': unicode(course_key),
})
if is_enabled:
log.info(u"Enabled self-generated certificates for course '%s'.", unicode(course_key))
else:
log.info(u"Disabled self-generated certificates for course '%s'.", unicode(course_key))
def is_certificate_invalid(student, course_key):
"""Check that whether the student in the course has been invalidated
for receiving certificates.
Arguments:
student (user object): logged-in user
course_key (CourseKey): The course identifier.
Returns:
Boolean denoting whether the student in the course is invalidated
to receive certificates
"""
is_invalid = False
certificate = GeneratedCertificate.certificate_for_student(student, course_key)
if certificate is not None:
is_invalid = CertificateInvalidation.has_certificate_invalidation(student, course_key)
return is_invalid
def cert_generation_enabled(course_key):
"""Check whether certificate generation is enabled for a course.
There are two "switches" that control whether self-generated certificates
are enabled for a course:
1) Whether the self-generated certificates feature is enabled.
2) Whether self-generated certificates have been enabled for this particular course.
Certificates are enabled for a course only when both switches
are set to True.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
boolean: Whether self-generated certificates are enabled
for the course.
"""
return (
CertificateGenerationConfiguration.current().enabled and
CertificateGenerationCourseSetting.is_self_generation_enabled_for_course(course_key)
)
def generate_example_certificates(course_key):
"""Generate example certificates for a course.
Example certificates are used to validate that certificates
are configured correctly for the course. Staff members can
view the example certificates before enabling
the self-generated certificates button for students.
Several example certificates may be generated for a course.
For example, if a course offers both verified and honor certificates,
examples of both types of certificate will be generated.
If an error occurs while starting the certificate generation
job, the errors will be recorded in the database and
can be retrieved using `example_certificate_status()`.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
None
"""
xqueue = XQueueCertInterface()
for cert in ExampleCertificateSet.create_example_set(course_key):
xqueue.add_example_cert(cert)
def example_certificates_status(course_key):
"""Check the status of example certificates for a course.
This will check the *latest* example certificate task.
This is generally what we care about in terms of enabling/disabling
self-generated certificates for a course.
Arguments:
course_key (CourseKey): The course identifier.
Returns:
list
Example Usage:
>>> from lms.djangoapps.certificates import api as certs_api
>>> certs_api.example_certificate_status(course_key)
[
{
'description': 'honor',
'status': 'success',
'download_url': 'http://www.example.com/abcd/honor_cert.pdf'
},
{
'description': 'verified',
'status': 'error',
'error_reason': 'No template found!'
}
]
"""
return ExampleCertificateSet.latest_status(course_key)
def _safe_course_key(course_key):
if not isinstance(course_key, CourseKey):
return CourseKey.from_string(course_key)
return course_key
def _course_from_key(course_key):
return CourseOverview.get_from_id(_safe_course_key(course_key))
def _certificate_html_url(user_id, course_id, uuid):
if uuid:
return reverse('certificates:render_cert_by_uuid', kwargs={'certificate_uuid': uuid})
elif user_id and course_id:
kwargs = {"user_id": str(user_id), "course_id": unicode(course_id)}
return reverse('certificates:html_view', kwargs=kwargs)
return ''
def _certificate_download_url(user_id, course_id):
try:
user_certificate = GeneratedCertificate.eligible_certificates.get(
user=user_id,
course_id=_safe_course_key(course_id)
)
return user_certificate.download_url
except GeneratedCertificate.DoesNotExist:
log.critical(
'Unable to lookup certificate\n'
'user id: %d\n'
'course: %s', user_id, unicode(course_id)
)
return ''
def has_html_certificates_enabled(course):
if not settings.FEATURES.get('CERTIFICATES_HTML_VIEW', False):
return False
return course.cert_html_view_enabled
def get_certificate_url(user_id=None, course_id=None, uuid=None):
url = ''
course = _course_from_key(course_id)
if not course:
return url
if has_html_certificates_enabled(course):
url = _certificate_html_url(user_id, course_id, uuid)
else:
url = _certificate_download_url(user_id, course_id)
return url
def get_active_web_certificate(course, is_preview_mode=None):
"""
Retrieves the active web certificate configuration for the specified course
"""
certificates = getattr(course, 'certificates', '{}')
configurations = certificates.get('certificates', [])
for config in configurations:
if config.get('is_active') or is_preview_mode:
return config
return None
def get_certificate_template(course_key, mode, language):
"""
Retrieves the custom certificate template based on course_key, mode, and language.
"""
template = None
# fetch organization of the course
org_id = get_course_organization_id(course_key)
# only consider active templates
active_templates = CertificateTemplate.objects.filter(is_active=True)
if org_id and mode: # get template by org, mode, and key
org_mode_and_key_templates = active_templates.filter(
organization_id=org_id,
mode=mode,
course_key=course_key
)
template = get_language_specific_template_or_default(language, org_mode_and_key_templates)
# since no template matched that course_key, only consider templates with empty course_key
empty_course_key_templates = active_templates.filter(course_key=CourseKeyField.Empty)
if not template and org_id and mode: # get template by org and mode
org_and_mode_templates = empty_course_key_templates.filter(
organization_id=org_id,
mode=mode
)
template = get_language_specific_template_or_default(language, org_and_mode_templates)
if not template and org_id: # get template by only org
org_templates = empty_course_key_templates.filter(
organization_id=org_id,
mode=None
)
template = get_language_specific_template_or_default(language, org_templates)
if not template and mode: # get template by only mode
mode_templates = empty_course_key_templates.filter(
organization_id=None,
mode=mode
)
template = get_language_specific_template_or_default(language, mode_templates)
return template if template else None
def get_language_specific_template_or_default(language, templates):
"""
Returns templates that match passed in language.
Returns default templates If no language matches, or language passed is None
"""
two_letter_language = _get_two_letter_language_code(language)
language_or_default_templates = list(templates.filter(Q(language=two_letter_language) | Q(language=None) | Q(language='')))
language_specific_template = get_language_specific_template(two_letter_language, language_or_default_templates)
if language_specific_template:
return language_specific_template
else:
return get_all_languages_or_default_template(language_or_default_templates)
def get_language_specific_template(language, templates):
for template in templates:
if template.language == language:
return template
return None
def get_all_languages_or_default_template(templates):
for template in templates:
if template.language == '':
return template
return templates[0] if templates else None
def _get_two_letter_language_code(language_code):
"""
Shortens language to only first two characters (e.g. es-419 becomes es)
This is needed because Catalog returns locale language which is not always a 2 letter code.
"""
if language_code is None:
return None
elif language_code == '':
return ''
else:
return language_code[:2]
def emit_certificate_event(event_name, user, course_id, course=None, event_data=None):
"""
Emits certificate event.
"""
event_name = '.'.join(['edx', 'certificate', event_name])
if course is None:
course = modulestore().get_course(course_id, depth=0)
context = {
'org_id': course.org,
'course_id': unicode(course_id)
}
data = {
'user_id': user.id,
'course_id': unicode(course_id),
'certificate_url': get_certificate_url(user.id, course_id)
}
event_data = event_data or {}
event_data.update(data)
with tracker.get_tracker().context(event_name, context):
tracker.emit(event_name, event_data)
def get_asset_url_by_slug(asset_slug):
"""
Returns certificate template asset url for given asset_slug.
"""
asset_url = ''
try:
template_asset = CertificateTemplateAsset.objects.get(asset_slug=asset_slug)
asset_url = template_asset.asset.url
except CertificateTemplateAsset.DoesNotExist:
pass
return asset_url
def get_certificate_header_context(is_secure=True):
"""
Return data to be used in Certificate Header,
data returned should be customized according to the site configuration.
"""
data = dict(
logo_src=branding_api.get_logo_url(is_secure),
logo_url=branding_api.get_base_url(is_secure),
)
return data
def get_certificate_footer_context():
"""
Return data to be used in Certificate Footer,
data returned should be customized according to the site configuration.
"""
data = dict()
# get Terms of Service and Honor Code page url
terms_of_service_and_honor_code = branding_api.get_tos_and_honor_code_url()
if terms_of_service_and_honor_code != branding_api.EMPTY_URL:
data.update({'company_tos_url': terms_of_service_and_honor_code})
# get Privacy Policy page url
privacy_policy = branding_api.get_privacy_url()
if privacy_policy != branding_api.EMPTY_URL:
data.update({'company_privacy_url': privacy_policy})
# get About page url
about = branding_api.get_about_url()
if about != branding_api.EMPTY_URL:
data.update({'company_about_url': about})
return data
|
procangroup/edx-platform
|
lms/djangoapps/certificates/api.py
|
Python
|
agpl-3.0
| 21,092
|
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
class BaseComponent:
"""This is a basic interface for defining components.
The only requirement is to implement the name method that
uniquely identifies a component. It should also define other
methods that implement the component functionality.
"""
@classmethod
def name(cls):
raise NotImplementedError()
|
superdesk/superdesk-core
|
apps/common/components/base_component.py
|
Python
|
agpl-3.0
| 653
|
"""
News resource
=============
It is an alias for archive without filtering out published items.
"""
from superdesk.resource import build_custom_hateoas
from apps.archive.archive import ArchiveResource, ArchiveService
from apps.archive.common import CUSTOM_HATEOAS
class NewsResource(ArchiveResource):
datasource = ArchiveResource.datasource.copy()
datasource.update(
{
"source": "archive",
"elastic_filter": {"bool": {"must_not": {"term": {"version": 0}}}},
}
)
resource_methods = ["GET"]
item_methods = []
class NewsService(ArchiveService):
def enhance_items(self, items):
super().enhance_items(items)
for item in items:
build_custom_hateoas(CUSTOM_HATEOAS, item)
|
superdesk/superdesk-core
|
apps/archive/news.py
|
Python
|
agpl-3.0
| 767
|
# Copyright (c) 2014, Hubert Kario
#
# See the LICENSE file for legal information regarding use of this file.
# compatibility with Python 2.6, for that we need unittest2 package,
# which is not available on 3.3 or 3.4
try:
import unittest2 as unittest
except ImportError:
import unittest
from tlslite.utils.ecc import decodeX962Point, encodeX962Point, getCurveByName,\
getPointByteSize
import ecdsa
class TestEncoder(unittest.TestCase):
def test_encode_P_256_point(self):
point = ecdsa.NIST256p.generator * 200
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
# x coordinate
b'\x3a\x53\x5b\xd0\xbe\x46\x6f\xf3\xd8\x56'
b'\xa0\x77\xaa\xd9\x50\x4f\x16\xaa\x5d\x52'
b'\x28\xfc\xd7\xc2\x77\x48\x85\xee\x21\x3f'
b'\x3b\x34'
# y coordinate
b'\x66\xab\xa8\x18\x5b\x33\x41\xe0\xc2\xe3'
b'\xd1\xb3\xae\x69\xe4\x7d\x0f\x01\xd4\xbb'
b'\xd7\x06\xd9\x57\x8b\x0b\x65\xd6\xd3\xde'
b'\x1e\xfe'
))
def test_encode_P_256_point_with_zero_first_byte_on_x(self):
point = ecdsa.NIST256p.generator * 379
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
b'\x3f\x92'
))
def test_encode_P_256_point_with_zero_first_byte_on_y(self):
point = ecdsa.NIST256p.generator * 43
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x98\x6a\xe2\x50\x6f\x1f\xf1\x04\xd0\x42'
b'\x30\x86\x1d\x8f\x4b\x49\x8f\x4b\xc4\xc6'
b'\xd0\x09\xb3\x0f\x75\x44\xdc\x12\x9b\x82'
b'\xd2\x8d'
b'\x00\x3c\xcc\xc0\xa6\x46\x0e\x0a\xe3\x28'
b'\xa4\xd9\x7d\x3c\x7b\x61\xd8\x6f\xc6\x28'
b'\x9c\x18\x9f\x25\x25\x11\x0c\x44\x1b\xb0'
b'\x7e\x97'
))
def test_encode_P_256_point_with_two_zero_first_bytes_on_x(self):
point = ecdsa.NIST256p.generator * 40393
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x00\x3f\x5f\x17\x8a\xa0\x70\x6c\x42'
b'\x31\xeb\x6e\x54\x95\xaa\x16\x42\xc5\xb8'
b'\xa9\x94\x12\x7c\x89\x46\x5f\x22\x99\x4a'
b'\x42\xf9'
b'\xc2\x48\xb3\x37\x59\x9f\x0c\x2f\x29\x77'
b'\x2e\x25\x6f\x1d\x55\x49\xc8\x9b\xa9\xe5'
b'\x73\x13\x82\xcd\x1e\x3c\xc0\x9d\x10\xd0'
b'\x0b\x55'))
def test_encode_P_521_point(self):
point = ecdsa.NIST521p.generator * 200
self.assertEqual(encodeX962Point(point),
bytearray(b'\x04'
b'\x00\x3e\x2a\x2f\x9f\xd5\x9f\xc3\x8d\xfb'
b'\xde\x77\x26\xa0\xbf\xc6\x48\x2a\x6b\x2a'
b'\x86\xf6\x29\xb8\x34\xa0\x6c\x3d\x66\xcd'
b'\x79\x8d\x9f\x86\x2e\x89\x31\xf7\x10\xc7'
b'\xce\x89\x15\x9f\x35\x8b\x4a\x5c\x5b\xb3'
b'\xd2\xcc\x9e\x1b\x6e\x94\x36\x23\x6d\x7d'
b'\x6a\x5e\x00\xbc\x2b\xbe'
b'\x01\x56\x7a\x41\xcb\x48\x8d\xca\xd8\xe6'
b'\x3a\x3f\x95\xb0\x8a\xf6\x99\x2a\x69\x6a'
b'\x37\xdf\xc6\xa1\x93\xff\xbc\x3f\x91\xa2'
b'\x96\xf3\x3c\x66\x15\x57\x3c\x1c\x06\x7f'
b'\x0a\x06\x4d\x18\xbd\x0c\x81\x4e\xf7\x2a'
b'\x8f\x76\xf8\x7f\x9b\x7d\xff\xb2\xf4\x26'
b'\x36\x43\x43\x86\x11\x89'))
class TestDecoder(unittest.TestCase):
def test_decode_P_256_point(self):
point = ecdsa.NIST256p.generator * 379
data = bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
b'\x3f\x92'
)
decoded_point = decodeX962Point(data, ecdsa.NIST256p)
self.assertEqual(point, decoded_point)
def test_decode_P_521_point(self):
data = bytearray(b'\x04'
b'\x01\x7d\x8a\x5d\x11\x03\x4a\xaf\x01\x26'
b'\x5f\x2d\xd6\x2d\x76\xeb\xd8\xbe\x4e\xfb'
b'\x3b\x4b\xd2\x05\x5a\xed\x4c\x6d\x20\xc7'
b'\xf3\xd7\x08\xab\x21\x9e\x34\xfd\x14\x56'
b'\x3d\x47\xd0\x02\x65\x15\xc2\xdd\x2d\x60'
b'\x66\xf9\x15\x64\x55\x7a\xae\x56\xa6\x7a'
b'\x28\x51\x65\x26\x5c\xcc'
b'\x01\xd4\x19\x56\xfa\x14\x6a\xdb\x83\x1c'
b'\xb6\x1a\xc4\x4b\x40\xb1\xcb\xcc\x9e\x4f'
b'\x57\x2c\xb2\x72\x70\xb9\xef\x38\x15\xae'
b'\x87\x1f\x85\x40\x94\xda\x69\xed\x97\xeb'
b'\xdc\x72\x25\x25\x61\x76\xb2\xde\xed\xa2'
b'\xb0\x5c\xca\xc4\x83\x8f\xfb\x54\xae\xe0'
b'\x07\x45\x0b\xbf\x7c\xfc')
point = decodeX962Point(data, ecdsa.NIST521p)
self.assertIsNotNone(point)
self.assertEqual(encodeX962Point(point), data)
def test_decode_with_missing_data(self):
data = bytearray(b'\x04'
b'\x00\x55\x43\x89\x4a\xf3\xd0\x0e\xd7\xd7'
b'\x40\xab\xdb\xd7\x5c\x96\xb0\x68\x77\xb7'
b'\x87\xdb\x5f\x70\xee\xa7\x8b\x90\xa8\xd7'
b'\xc0\x0a'
b'\xbb\x4c\x85\xa3\xd8\xea\x29\xef\xaa\xfa'
b'\x24\x40\x69\x12\xdd\x84\xd5\xb1\x4d\xc3'
b'\x2b\xf6\x56\xef\x6c\x6b\xd5\x8a\x5d\x94'
#b'\x3f\x92'
)
# XXX will change later as decoder in tlslite-ng needs to be updated
with self.assertRaises(SyntaxError):
decodeX962Point(data, ecdsa.NIST256p)
class TestCurveLookup(unittest.TestCase):
def test_with_correct_name(self):
curve = getCurveByName('secp256r1')
self.assertIs(curve, ecdsa.NIST256p)
def test_with_invalid_name(self):
with self.assertRaises(ValueError):
getCurveByName('NIST256p')
class TestGetPointByteSize(unittest.TestCase):
def test_with_curve(self):
self.assertEqual(getPointByteSize(ecdsa.NIST256p), 32)
def test_with_point(self):
self.assertEqual(getPointByteSize(ecdsa.NIST384p.generator * 10), 48)
def test_with_invalid_argument(self):
with self.assertRaises(ValueError):
getPointByteSize("P-256")
|
ioef/tlslite-ng
|
unit_tests/test_tlslite_utils_ecc.py
|
Python
|
lgpl-2.1
| 8,381
|
##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import sys
from spack import *
class Slepc(Package):
"""Scalable Library for Eigenvalue Problem Computations."""
homepage = "http://www.grycap.upv.es/slepc"
url = "http://slepc.upv.es/download/distrib/slepc-3.6.2.tar.gz"
git = "https://bitbucket.org/slepc/slepc.git"
version('develop', branch='master')
version('3.9.1', 'e174ea7c127d9161eef976b0288f0c56d443a58d6ab2dc8af1e8bd66f156ce17')
version('3.9.0', '1f3930db56b4065aaf214ea758ddff1a70bf19d45544cbdfd19d2787db4bfe0b')
version('3.8.2', '1e7d20d20eb26da307d36017461fe4a55f40e947e232739179dbe6412e22ed13')
version('3.8.0', 'c58ccc4e852d1da01112466c48efa41f0839649f3a265925788237d76cd3d963')
version('3.7.4', '2fb782844e3bc265a8d181c3c3e2632a4ca073111c874c654f1365d33ca2eb8a')
version('3.7.3', '3ef9bcc645a10c1779d56b3500472ceb66df692e389d635087d30e7c46424df9')
version('3.7.1', '670216f263e3074b21e0623c01bc0f562fdc0bffcd7bd42dd5d8edbe73a532c2')
version('3.6.3', '384939d009546db37bc05ed81260c8b5ba451093bf891391d32eb7109ccff876')
version('3.6.2', '2ab4311bed26ccf7771818665991b2ea3a9b15f97e29fd13911ab1293e8e65df')
variant('arpack', default=True, description='Enables Arpack wrappers')
variant('blopex', default=False, description='Enables BLOPEX wrappers')
# NOTE: make sure PETSc and SLEPc use the same python.
depends_on('python@2.6:2.8', type='build')
# Cannot mix release and development versions of SLEPc and PETSc:
depends_on('petsc@develop', when='@develop')
depends_on('petsc@3.9:3.9.99', when='@3.9:3.9.99')
depends_on('petsc@3.8:3.8.99', when='@3.8:3.8.99')
depends_on('petsc@3.7:3.7.7', when='@3.7.1:3.7.4')
depends_on('petsc@3.6.3:3.6.4', when='@3.6.2:3.6.3')
depends_on('arpack-ng~mpi', when='+arpack^petsc~mpi~int64')
depends_on('arpack-ng+mpi', when='+arpack^petsc+mpi~int64')
patch('install_name_371.patch', when='@3.7.1')
# Arpack can not be used with 64bit integers.
conflicts('+arpack', when='^petsc+int64')
resource(name='blopex',
url='http://slepc.upv.es/download/external/blopex-1.1.2.tar.gz',
sha256='0081ee4c4242e635a8113b32f655910ada057c59043f29af4b613508a762f3ac',
destination=join_path('installed-arch-' + sys.platform + '-c-opt',
'externalpackages'),
when='+blopex')
def install(self, spec, prefix):
# set SLEPC_DIR for installation
# Note that one should set the current (temporary) directory instead
# its symlink in spack/stage/ !
os.environ['SLEPC_DIR'] = os.getcwd()
options = []
if '+arpack' in spec:
options.extend([
'--with-arpack-dir=%s' % spec['arpack-ng'].prefix.lib,
])
if 'arpack-ng~mpi' in spec:
options.extend([
'--with-arpack-flags=-larpack'
])
else:
options.extend([
'--with-arpack-flags=-lparpack,-larpack'
])
# It isn't possible to install BLOPEX separately and link to it;
# BLOPEX has to be downloaded with SLEPc at configure time
if '+blopex' in spec:
options.append('--download-blopex')
configure('--prefix=%s' % prefix, *options)
make('MAKE_NP=%s' % make_jobs, parallel=False)
if self.run_tests:
make('test', parallel=False)
make('install', parallel=False)
def setup_dependent_environment(self, spack_env, run_env, dependent_spec):
# set up SLEPC_DIR for everyone using SLEPc package
spack_env.set('SLEPC_DIR', self.prefix)
|
mfherbst/spack
|
var/spack/repos/builtin/packages/slepc/package.py
|
Python
|
lgpl-2.1
| 4,929
|
"""distutils.dist
Provides the Distribution class, which represents the module distribution
being built/installed/distributed.
"""
import sys, os, re
try:
import warnings
except ImportError:
warnings = None
from distutils.errors import *
from distutils.fancy_getopt import FancyGetopt, translate_longopt
from distutils.util import check_environ, strtobool, rfc822_escape
from distutils import log
from distutils.debug import DEBUG
# Regex to define acceptable Distutils command names. This is not *quite*
# the same as a Python NAME -- I don't allow leading underscores. The fact
# that they're very similar is no coincidence; the default naming scheme is
# to look for a Python module named after the command.
command_re = re.compile (r'^[a-zA-Z]([a-zA-Z0-9_]*)$')
class Distribution:
"""The core of the Distutils. Most of the work hiding behind 'setup'
is really done within a Distribution instance, which farms the work out
to the Distutils commands specified on the command line.
Setup scripts will almost never instantiate Distribution directly,
unless the 'setup()' function is totally inadequate to their needs.
However, it is conceivable that a setup script might wish to subclass
Distribution for some specialized purpose, and then pass the subclass
to 'setup()' as the 'distclass' keyword argument. If so, it is
necessary to respect the expectations that 'setup' has of Distribution.
See the code for 'setup()', in core.py, for details.
"""
# 'global_options' describes the command-line options that may be
# supplied to the setup script prior to any actual commands.
# Eg. "./setup.py -n" or "./setup.py --quiet" both take advantage of
# these global options. This list should be kept to a bare minimum,
# since every global option is also valid as a command option -- and we
# don't want to pollute the commands with too many options that they
# have minimal control over.
# The fourth entry for verbose means that it can be repeated.
global_options = [('verbose', 'v', "run verbosely (default)", 1),
('quiet', 'q', "run quietly (turns verbosity off)"),
('dry-run', 'n', "don't actually do anything"),
('help', 'h', "show detailed help message"),
]
# 'common_usage' is a short (2-3 line) string describing the common
# usage of the setup script.
common_usage = """\
Common commands: (see '--help-commands' for more)
setup.py build will build the package underneath 'build/'
setup.py install will install the package
"""
# options that are not propagated to the commands
display_options = [
('help-commands', None,
"list all available commands"),
('name', None,
"print package name"),
('version', 'V',
"print package version"),
('fullname', None,
"print <package name>-<version>"),
('author', None,
"print the author's name"),
('author-email', None,
"print the author's email address"),
('maintainer', None,
"print the maintainer's name"),
('maintainer-email', None,
"print the maintainer's email address"),
('contact', None,
"print the maintainer's name if known, else the author's"),
('contact-email', None,
"print the maintainer's email address if known, else the author's"),
('url', None,
"print the URL for this package"),
('license', None,
"print the license of the package"),
('licence', None,
"alias for --license"),
('description', None,
"print the package description"),
('long-description', None,
"print the long package description"),
('platforms', None,
"print the list of platforms"),
('classifiers', None,
"print the list of classifiers"),
('keywords', None,
"print the list of keywords"),
('provides', None,
"print the list of packages/modules provided"),
('requires', None,
"print the list of packages/modules required"),
('obsoletes', None,
"print the list of packages/modules made obsolete")
]
display_option_names = [translate_longopt(x[0]) for x in display_options]
# negative options are options that exclude other options
negative_opt = {'quiet': 'verbose'}
# -- Creation/initialization methods -------------------------------
def __init__ (self, attrs=None):
"""Construct a new Distribution instance: initialize all the
attributes of a Distribution, and then use 'attrs' (a dictionary
mapping attribute names to values) to assign some of those
attributes their "real" values. (Any attributes not mentioned in
'attrs' will be assigned to some null value: 0, None, an empty list
or dictionary, etc.) Most importantly, initialize the
'command_obj' attribute to the empty dictionary; this will be
filled in with real command objects by 'parse_command_line()'.
"""
# Default values for our command-line options
self.verbose = 1
self.dry_run = 0
self.help = 0
for attr in self.display_option_names:
setattr(self, attr, 0)
# Store the distribution meta-data (name, version, author, and so
# forth) in a separate object -- we're getting to have enough
# information here (and enough command-line options) that it's
# worth it. Also delegate 'get_XXX()' methods to the 'metadata'
# object in a sneaky and underhanded (but efficient!) way.
self.metadata = DistributionMetadata()
for basename in self.metadata._METHOD_BASENAMES:
method_name = "get_" + basename
setattr(self, method_name, getattr(self.metadata, method_name))
# 'cmdclass' maps command names to class objects, so we
# can 1) quickly figure out which class to instantiate when
# we need to create a new command object, and 2) have a way
# for the setup script to override command classes
self.cmdclass = {}
# 'command_packages' is a list of packages in which commands
# are searched for. The factory for command 'foo' is expected
# to be named 'foo' in the module 'foo' in one of the packages
# named here. This list is searched from the left; an error
# is raised if no named package provides the command being
# searched for. (Always access using get_command_packages().)
self.command_packages = None
# 'script_name' and 'script_args' are usually set to sys.argv[0]
# and sys.argv[1:], but they can be overridden when the caller is
# not necessarily a setup script run from the command-line.
self.script_name = None
self.script_args = None
# 'command_options' is where we store command options between
# parsing them (from config files, the command-line, etc.) and when
# they are actually needed -- ie. when the command in question is
# instantiated. It is a dictionary of dictionaries of 2-tuples:
# command_options = { command_name : { option : (source, value) } }
self.command_options = {}
# 'dist_files' is the list of (command, pyversion, file) that
# have been created by any dist commands run so far. This is
# filled regardless of whether the run is dry or not. pyversion
# gives sysconfig.get_python_version() if the dist file is
# specific to a Python version, 'any' if it is good for all
# Python versions on the target platform, and '' for a source
# file. pyversion should not be used to specify minimum or
# maximum required Python versions; use the metainfo for that
# instead.
self.dist_files = []
# These options are really the business of various commands, rather
# than of the Distribution itself. We provide aliases for them in
# Distribution as a convenience to the developer.
self.packages = None
self.package_data = {}
self.package_dir = None
self.py_modules = None
self.libraries = None
self.headers = None
self.ext_modules = None
self.ext_package = None
self.include_dirs = None
self.extra_path = None
self.scripts = None
self.data_files = None
self.password = ''
# And now initialize bookkeeping stuff that can't be supplied by
# the caller at all. 'command_obj' maps command names to
# Command instances -- that's how we enforce that every command
# class is a singleton.
self.command_obj = {}
# 'have_run' maps command names to boolean values; it keeps track
# of whether we have actually run a particular command, to make it
# cheap to "run" a command whenever we think we might need to -- if
# it's already been done, no need for expensive filesystem
# operations, we just check the 'have_run' dictionary and carry on.
# It's only safe to query 'have_run' for a command class that has
# been instantiated -- a false value will be inserted when the
# command object is created, and replaced with a true value when
# the command is successfully run. Thus it's probably best to use
# '.get()' rather than a straight lookup.
self.have_run = {}
# Now we'll use the attrs dictionary (ultimately, keyword args from
# the setup script) to possibly override any or all of these
# distribution options.
if attrs:
# Pull out the set of command options and work on them
# specifically. Note that this order guarantees that aliased
# command options will override any supplied redundantly
# through the general options dictionary.
options = attrs.get('options')
if options is not None:
del attrs['options']
for (command, cmd_options) in options.items():
opt_dict = self.get_option_dict(command)
for (opt, val) in cmd_options.items():
opt_dict[opt] = ("setup script", val)
if 'licence' in attrs:
attrs['license'] = attrs['licence']
del attrs['licence']
msg = "'licence' distribution option is deprecated; use 'license'"
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
# Now work on the rest of the attributes. Any attribute that's
# not already defined is invalid!
for (key, val) in attrs.items():
if hasattr(self.metadata, "set_" + key):
getattr(self.metadata, "set_" + key)(val)
elif hasattr(self.metadata, key):
setattr(self.metadata, key, val)
elif hasattr(self, key):
setattr(self, key, val)
else:
msg = "Unknown distribution option: %s" % repr(key)
if warnings is not None:
warnings.warn(msg)
else:
sys.stderr.write(msg + "\n")
self.finalize_options()
def get_option_dict(self, command):
"""Get the option dictionary for a given command. If that
command's option dictionary hasn't been created yet, then create it
and return the new dictionary; otherwise, return the existing
option dictionary.
"""
dict = self.command_options.get(command)
if dict is None:
dict = self.command_options[command] = {}
return dict
def dump_option_dicts(self, header=None, commands=None, indent=""):
from pprint import pformat
if commands is None: # dump all command option dicts
commands = sorted(self.command_options.keys())
if header is not None:
self.announce(indent + header)
indent = indent + " "
if not commands:
self.announce(indent + "no commands known yet")
return
for cmd_name in commands:
opt_dict = self.command_options.get(cmd_name)
if opt_dict is None:
self.announce(indent +
"no option dict for '%s' command" % cmd_name)
else:
self.announce(indent +
"option dict for '%s' command:" % cmd_name)
out = pformat(opt_dict)
for line in out.split('\n'):
self.announce(indent + " " + line)
# -- Config file finding/parsing methods ---------------------------
def find_config_files(self):
"""Find as many configuration files as should be processed for this
platform, and return a list of filenames in the order in which they
should be parsed. The filenames returned are guaranteed to exist
(modulo nasty race conditions).
There are three possible config files: distutils.cfg in the
Distutils installation directory (ie. where the top-level
Distutils __inst__.py file lives), a file in the user's home
directory named .pydistutils.cfg on Unix and pydistutils.cfg
on Windows/Mac, and setup.cfg in the current directory.
"""
files = []
check_environ()
# Where to look for the system-wide Distutils config file
sys_dir = os.path.dirname(sys.modules['distutils'].__file__)
# Look for the system config file
sys_file = os.path.join(sys_dir, "distutils.cfg")
if os.path.isfile(sys_file):
files.append(sys_file)
# What to call the per-user config file
if os.name == 'posix':
user_filename = ".pydistutils.cfg"
else:
user_filename = "pydistutils.cfg"
# And look for the user config file
user_file = os.path.join(os.path.expanduser('~'), user_filename)
if os.path.isfile(user_file):
files.append(user_file)
# All platforms support local setup.cfg
local_file = "setup.cfg"
if os.path.isfile(local_file):
files.append(local_file)
return files
def parse_config_files(self, filenames=None):
from configparser import ConfigParser
if filenames is None:
filenames = self.find_config_files()
if DEBUG:
self.announce("Distribution.parse_config_files():")
parser = ConfigParser()
for filename in filenames:
if DEBUG:
self.announce(" reading %s" % filename)
parser.read(filename)
for section in parser.sections():
options = parser.options(section)
opt_dict = self.get_option_dict(section)
for opt in options:
if opt != '__name__':
val = parser.get(section,opt)
opt = opt.replace('-', '_')
opt_dict[opt] = (filename, val)
# Make the ConfigParser forget everything (so we retain
# the original filenames that options come from)
parser.__init__()
# If there was a "global" section in the config file, use it
# to set Distribution options.
if 'global' in self.command_options:
for (opt, (src, val)) in self.command_options['global'].items():
alias = self.negative_opt.get(opt)
try:
if alias:
setattr(self, alias, not strtobool(val))
elif opt in ('verbose', 'dry_run'): # ugh!
setattr(self, opt, strtobool(val))
else:
setattr(self, opt, val)
except ValueError as msg:
raise DistutilsOptionError(msg)
# -- Command-line parsing methods ----------------------------------
def parse_command_line(self):
"""Parse the setup script's command line, taken from the
'script_args' instance attribute (which defaults to 'sys.argv[1:]'
-- see 'setup()' in core.py). This list is first processed for
"global options" -- options that set attributes of the Distribution
instance. Then, it is alternately scanned for Distutils commands
and options for that command. Each new command terminates the
options for the previous command. The allowed options for a
command are determined by the 'user_options' attribute of the
command class -- thus, we have to be able to load command classes
in order to parse the command line. Any error in that 'options'
attribute raises DistutilsGetoptError; any error on the
command-line raises DistutilsArgError. If no Distutils commands
were found on the command line, raises DistutilsArgError. Return
true if command-line was successfully parsed and we should carry
on with executing commands; false if no errors but we shouldn't
execute commands (currently, this only happens if user asks for
help).
"""
#
# We now have enough information to show the Macintosh dialog
# that allows the user to interactively specify the "command line".
#
toplevel_options = self._get_toplevel_options()
# We have to parse the command line a bit at a time -- global
# options, then the first command, then its options, and so on --
# because each command will be handled by a different class, and
# the options that are valid for a particular class aren't known
# until we have loaded the command class, which doesn't happen
# until we know what the command is.
self.commands = []
parser = FancyGetopt(toplevel_options + self.display_options)
parser.set_negative_aliases(self.negative_opt)
parser.set_aliases({'licence': 'license'})
args = parser.getopt(args=self.script_args, object=self)
option_order = parser.get_option_order()
log.set_verbosity(self.verbose)
# for display options we return immediately
if self.handle_display_options(option_order):
return
while args:
args = self._parse_command_opts(parser, args)
if args is None: # user asked for help (and got it)
return
# Handle the cases of --help as a "global" option, ie.
# "setup.py --help" and "setup.py --help command ...". For the
# former, we show global options (--verbose, --dry-run, etc.)
# and display-only options (--name, --version, etc.); for the
# latter, we omit the display-only options and show help for
# each command listed on the command line.
if self.help:
self._show_help(parser,
display_options=len(self.commands) == 0,
commands=self.commands)
return
# Oops, no commands found -- an end-user error
if not self.commands:
raise DistutilsArgError("no commands supplied")
# All is well: return true
return True
def _get_toplevel_options(self):
"""Return the non-display options recognized at the top level.
This includes options that are recognized *only* at the top
level as well as options recognized for commands.
"""
return self.global_options + [
("command-packages=", None,
"list of packages that provide distutils commands"),
]
def _parse_command_opts(self, parser, args):
"""Parse the command-line options for a single command.
'parser' must be a FancyGetopt instance; 'args' must be the list
of arguments, starting with the current command (whose options
we are about to parse). Returns a new version of 'args' with
the next command at the front of the list; will be the empty
list if there are no more commands on the command line. Returns
None if the user asked for help on this command.
"""
# late import because of mutual dependence between these modules
from distutils.cmd import Command
# Pull the current command from the head of the command line
command = args[0]
if not command_re.match(command):
raise SystemExit("invalid command name '%s'" % command)
self.commands.append(command)
# Dig up the command class that implements this command, so we
# 1) know that it's a valid command, and 2) know which options
# it takes.
try:
cmd_class = self.get_command_class(command)
except DistutilsModuleError as msg:
raise DistutilsArgError(msg)
# Require that the command class be derived from Command -- want
# to be sure that the basic "command" interface is implemented.
if not issubclass(cmd_class, Command):
raise DistutilsClassError(
"command class %s must subclass Command" % cmd_class)
# Also make sure that the command object provides a list of its
# known options.
if not (hasattr(cmd_class, 'user_options') and
isinstance(cmd_class.user_options, list)):
raise DistutilsClassError(("command class %s must provide " +
"'user_options' attribute (a list of tuples)") % \
cmd_class)
# If the command class has a list of negative alias options,
# merge it in with the global negative aliases.
negative_opt = self.negative_opt
if hasattr(cmd_class, 'negative_opt'):
negative_opt = negative_opt.copy()
negative_opt.update(cmd_class.negative_opt)
# Check for help_options in command class. They have a different
# format (tuple of four) so we need to preprocess them here.
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_options = fix_help_options(cmd_class.help_options)
else:
help_options = []
# All commands support the global options too, just by adding
# in 'global_options'.
parser.set_option_table(self.global_options +
cmd_class.user_options +
help_options)
parser.set_negative_aliases(negative_opt)
(args, opts) = parser.getopt(args[1:])
if hasattr(opts, 'help') and opts.help:
self._show_help(parser, display_options=0, commands=[cmd_class])
return
if (hasattr(cmd_class, 'help_options') and
isinstance(cmd_class.help_options, list)):
help_option_found=0
for (help_option, short, desc, func) in cmd_class.help_options:
if hasattr(opts, parser.get_attr_name(help_option)):
help_option_found=1
if hasattr(func, '__call__'):
func()
else:
raise DistutilsClassError(
"invalid help function %r for help option '%s': "
"must be a callable object (function, etc.)"
% (func, help_option))
if help_option_found:
return
# Put the options from the command-line into their official
# holding pen, the 'command_options' dictionary.
opt_dict = self.get_option_dict(command)
for (name, value) in vars(opts).items():
opt_dict[name] = ("command line", value)
return args
def finalize_options(self):
"""Set final values for all the options on the Distribution
instance, analogous to the .finalize_options() method of Command
objects.
"""
for attr in ('keywords', 'platforms'):
value = getattr(self.metadata, attr)
if value is None:
continue
if isinstance(value, str):
value = [elm.strip() for elm in value.split(',')]
setattr(self.metadata, attr, value)
def _show_help(self, parser, global_options=1, display_options=1,
commands=[]):
"""Show help for the setup script command-line in the form of
several lists of command-line options. 'parser' should be a
FancyGetopt instance; do not expect it to be returned in the
same state, as its option table will be reset to make it
generate the correct help text.
If 'global_options' is true, lists the global options:
--verbose, --dry-run, etc. If 'display_options' is true, lists
the "display-only" options: --name, --version, etc. Finally,
lists per-command help for every command name or command class
in 'commands'.
"""
# late import because of mutual dependence between these modules
from distutils.core import gen_usage
from distutils.cmd import Command
if global_options:
if display_options:
options = self._get_toplevel_options()
else:
options = self.global_options
parser.set_option_table(options)
parser.print_help(self.common_usage + "\nGlobal options:")
print('')
if display_options:
parser.set_option_table(self.display_options)
parser.print_help(
"Information display options (just display " +
"information, ignore any commands)")
print('')
for command in self.commands:
if isinstance(command, type) and issubclass(command, Command):
klass = command
else:
klass = self.get_command_class(command)
if (hasattr(klass, 'help_options') and
isinstance(klass.help_options, list)):
parser.set_option_table(klass.user_options +
fix_help_options(klass.help_options))
else:
parser.set_option_table(klass.user_options)
parser.print_help("Options for '%s' command:" % klass.__name__)
print('')
print(gen_usage(self.script_name))
def handle_display_options(self, option_order):
"""If there were any non-global "display-only" options
(--help-commands or the metadata display options) on the command
line, display the requested info and return true; else return
false.
"""
from distutils.core import gen_usage
# User just wants a list of commands -- we'll print it out and stop
# processing now (ie. if they ran "setup --help-commands foo bar",
# we ignore "foo bar").
if self.help_commands:
self.print_commands()
print('')
print(gen_usage(self.script_name))
return 1
# If user supplied any of the "display metadata" options, then
# display that metadata in the order in which the user supplied the
# metadata options.
any_display_options = 0
is_display_option = {}
for option in self.display_options:
is_display_option[option[0]] = 1
for (opt, val) in option_order:
if val and is_display_option.get(opt):
opt = translate_longopt(opt)
value = getattr(self.metadata, "get_"+opt)()
if opt in ['keywords', 'platforms']:
print(','.join(value))
elif opt in ('classifiers', 'provides', 'requires',
'obsoletes'):
print('\n'.join(value))
else:
print(value)
any_display_options = 1
return any_display_options
def print_command_list(self, commands, header, max_length):
"""Print a subset of the list of all commands -- used by
'print_commands()'.
"""
print(header + ":")
for cmd in commands:
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
print(" %-*s %s" % (max_length, cmd, description))
def print_commands(self):
"""Print out a help message listing all available commands with a
description of each. The list is divided into "standard commands"
(listed in distutils.command.__all__) and "extra commands"
(mentioned in self.cmdclass, but not a standard command). The
descriptions come from the command class attribute
'description'.
"""
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
max_length = 0
for cmd in (std_commands + extra_commands):
if len(cmd) > max_length:
max_length = len(cmd)
self.print_command_list(std_commands,
"Standard commands",
max_length)
if extra_commands:
print()
self.print_command_list(extra_commands,
"Extra commands",
max_length)
def get_command_list(self):
"""Get a list of (command, description) tuples.
The list is divided into "standard commands" (listed in
distutils.command.__all__) and "extra commands" (mentioned in
self.cmdclass, but not a standard command). The descriptions come
from the command class attribute 'description'.
"""
# Currently this is only used on Mac OS, for the Mac-only GUI
# Distutils interface (by Jack Jansen)
import distutils.command
std_commands = distutils.command.__all__
is_std = {}
for cmd in std_commands:
is_std[cmd] = 1
extra_commands = []
for cmd in self.cmdclass.keys():
if not is_std.get(cmd):
extra_commands.append(cmd)
rv = []
for cmd in (std_commands + extra_commands):
klass = self.cmdclass.get(cmd)
if not klass:
klass = self.get_command_class(cmd)
try:
description = klass.description
except AttributeError:
description = "(no description available)"
rv.append((cmd, description))
return rv
# -- Command class/object methods ----------------------------------
def get_command_packages(self):
"""Return a list of packages from which commands are loaded."""
pkgs = self.command_packages
if not isinstance(pkgs, list):
if pkgs is None:
pkgs = ''
pkgs = [pkg.strip() for pkg in pkgs.split(',') if pkg != '']
if "distutils.command" not in pkgs:
pkgs.insert(0, "distutils.command")
self.command_packages = pkgs
return pkgs
def get_command_class(self, command):
"""Return the class that implements the Distutils command named by
'command'. First we check the 'cmdclass' dictionary; if the
command is mentioned there, we fetch the class object from the
dictionary and return it. Otherwise we load the command module
("distutils.command." + command) and fetch the command class from
the module. The loaded class is also stored in 'cmdclass'
to speed future calls to 'get_command_class()'.
Raises DistutilsModuleError if the expected module could not be
found, or if that module does not define the expected class.
"""
klass = self.cmdclass.get(command)
if klass:
return klass
for pkgname in self.get_command_packages():
module_name = "%s.%s" % (pkgname, command)
klass_name = command
try:
__import__ (module_name)
module = sys.modules[module_name]
except ImportError:
continue
try:
klass = getattr(module, klass_name)
except AttributeError:
raise DistutilsModuleError(
"invalid command '%s' (no class '%s' in module '%s')"
% (command, klass_name, module_name))
self.cmdclass[command] = klass
return klass
raise DistutilsModuleError("invalid command '%s'" % command)
def get_command_obj(self, command, create=1):
"""Return the command object for 'command'. Normally this object
is cached on a previous call to 'get_command_obj()'; if no command
object for 'command' is in the cache, then we either create and
return it (if 'create' is true) or return None.
"""
cmd_obj = self.command_obj.get(command)
if not cmd_obj and create:
if DEBUG:
self.announce("Distribution.get_command_obj(): " \
"creating '%s' command object" % command)
klass = self.get_command_class(command)
cmd_obj = self.command_obj[command] = klass(self)
self.have_run[command] = 0
# Set any options that were supplied in config files
# or on the command line. (NB. support for error
# reporting is lame here: any errors aren't reported
# until 'finalize_options()' is called, which means
# we won't report the source of the error.)
options = self.command_options.get(command)
if options:
self._set_command_options(cmd_obj, options)
return cmd_obj
def _set_command_options(self, command_obj, option_dict=None):
"""Set the options for 'command_obj' from 'option_dict'. Basically
this means copying elements of a dictionary ('option_dict') to
attributes of an instance ('command').
'command_obj' must be a Command instance. If 'option_dict' is not
supplied, uses the standard option dictionary for this command
(from 'self.command_options').
"""
command_name = command_obj.get_command_name()
if option_dict is None:
option_dict = self.get_option_dict(command_name)
if DEBUG:
self.announce(" setting options for '%s' command:" % command_name)
for (option, (source, value)) in option_dict.items():
if DEBUG:
self.announce(" %s = %s (from %s)" % (option, value,
source))
try:
bool_opts = [translate_longopt(o)
for o in command_obj.boolean_options]
except AttributeError:
bool_opts = []
try:
neg_opt = command_obj.negative_opt
except AttributeError:
neg_opt = {}
try:
is_string = isinstance(value, str)
if option in neg_opt and is_string:
setattr(command_obj, neg_opt[option], not strtobool(value))
elif option in bool_opts and is_string:
setattr(command_obj, option, strtobool(value))
elif hasattr(command_obj, option):
setattr(command_obj, option, value)
else:
raise DistutilsOptionError(
"error in %s: command '%s' has no such option '%s'"
% (source, command_name, option))
except ValueError as msg:
raise DistutilsOptionError(msg)
def reinitialize_command(self, command, reinit_subcommands=0):
"""Reinitializes a command to the state it was in when first
returned by 'get_command_obj()': ie., initialized but not yet
finalized. This provides the opportunity to sneak option
values in programmatically, overriding or supplementing
user-supplied values from the config files and command line.
You'll have to re-finalize the command object (by calling
'finalize_options()' or 'ensure_finalized()') before using it for
real.
'command' should be a command name (string) or command object. If
'reinit_subcommands' is true, also reinitializes the command's
sub-commands, as declared by the 'sub_commands' class attribute (if
it has one). See the "install" command for an example. Only
reinitializes the sub-commands that actually matter, ie. those
whose test predicates return true.
Returns the reinitialized command object.
"""
from distutils.cmd import Command
if not isinstance(command, Command):
command_name = command
command = self.get_command_obj(command_name)
else:
command_name = command.get_command_name()
if not command.finalized:
return command
command.initialize_options()
command.finalized = 0
self.have_run[command_name] = 0
self._set_command_options(command)
if reinit_subcommands:
for sub in command.get_sub_commands():
self.reinitialize_command(sub, reinit_subcommands)
return command
# -- Methods that operate on the Distribution ----------------------
def announce(self, msg, level=log.INFO):
log.log(level, msg)
def run_commands(self):
"""Run each command that was seen on the setup script command line.
Uses the list of commands found and cache of command objects
created by 'get_command_obj()'.
"""
for cmd in self.commands:
self.run_command(cmd)
# -- Methods that operate on its Commands --------------------------
def run_command(self, command):
"""Do whatever it takes to run a command (including nothing at all,
if the command has already been run). Specifically: if we have
already created and run the command named by 'command', return
silently without doing anything. If the command named by 'command'
doesn't even have a command object yet, create one. Then invoke
'run()' on that command object (or an existing one).
"""
# Already been here, done that? then return silently.
if self.have_run.get(command):
return
log.info("running %s", command)
cmd_obj = self.get_command_obj(command)
cmd_obj.ensure_finalized()
cmd_obj.run()
self.have_run[command] = 1
# -- Distribution query methods ------------------------------------
def has_pure_modules(self):
return len(self.packages or self.py_modules or []) > 0
def has_ext_modules(self):
return self.ext_modules and len(self.ext_modules) > 0
def has_c_libraries(self):
return self.libraries and len(self.libraries) > 0
def has_modules(self):
return self.has_pure_modules() or self.has_ext_modules()
def has_headers(self):
return self.headers and len(self.headers) > 0
def has_scripts(self):
return self.scripts and len(self.scripts) > 0
def has_data_files(self):
return self.data_files and len(self.data_files) > 0
def is_pure(self):
return (self.has_pure_modules() and
not self.has_ext_modules() and
not self.has_c_libraries())
# -- Metadata query methods ----------------------------------------
# If you're looking for 'get_name()', 'get_version()', and so forth,
# they are defined in a sneaky way: the constructor binds self.get_XXX
# to self.metadata.get_XXX. The actual code is in the
# DistributionMetadata class, below.
class DistributionMetadata:
"""Dummy class to hold the distribution meta-data: name, version,
author, and so forth.
"""
_METHOD_BASENAMES = ("name", "version", "author", "author_email",
"maintainer", "maintainer_email", "url",
"license", "description", "long_description",
"keywords", "platforms", "fullname", "contact",
"contact_email", "license", "classifiers",
"download_url",
# PEP 314
"provides", "requires", "obsoletes",
)
def __init__ (self):
self.name = None
self.version = None
self.author = None
self.author_email = None
self.maintainer = None
self.maintainer_email = None
self.url = None
self.license = None
self.description = None
self.long_description = None
self.keywords = None
self.platforms = None
self.classifiers = None
self.download_url = None
# PEP 314
self.provides = None
self.requires = None
self.obsoletes = None
def write_pkg_info(self, base_dir):
"""Write the PKG-INFO file into the release tree.
"""
pkg_info = open(os.path.join(base_dir, 'PKG-INFO'), 'w')
try:
self.write_pkg_file(pkg_info)
finally:
pkg_info.close()
def write_pkg_file(self, file):
"""Write the PKG-INFO format data to a file object.
"""
version = '1.0'
if self.provides or self.requires or self.obsoletes:
version = '1.1'
file.write('Metadata-Version: %s\n' % version)
file.write('Name: %s\n' % self.get_name() )
file.write('Version: %s\n' % self.get_version() )
file.write('Summary: %s\n' % self.get_description() )
file.write('Home-page: %s\n' % self.get_url() )
file.write('Author: %s\n' % self.get_contact() )
file.write('Author-email: %s\n' % self.get_contact_email() )
file.write('License: %s\n' % self.get_license() )
if self.download_url:
file.write('Download-URL: %s\n' % self.download_url)
long_desc = rfc822_escape(self.get_long_description())
file.write('Description: %s\n' % long_desc)
keywords = ','.join(self.get_keywords())
if keywords:
file.write('Keywords: %s\n' % keywords )
self._write_list(file, 'Platform', self.get_platforms())
self._write_list(file, 'Classifier', self.get_classifiers())
# PEP 314
self._write_list(file, 'Requires', self.get_requires())
self._write_list(file, 'Provides', self.get_provides())
self._write_list(file, 'Obsoletes', self.get_obsoletes())
def _write_list(self, file, name, values):
for value in values:
file.write('%s: %s\n' % (name, value))
# -- Metadata query methods ----------------------------------------
def get_name(self):
return self.name or "UNKNOWN"
def get_version(self):
return self.version or "0.0.0"
def get_fullname(self):
return "%s-%s" % (self.get_name(), self.get_version())
def get_author(self):
return self.author or "UNKNOWN"
def get_author_email(self):
return self.author_email or "UNKNOWN"
def get_maintainer(self):
return self.maintainer or "UNKNOWN"
def get_maintainer_email(self):
return self.maintainer_email or "UNKNOWN"
def get_contact(self):
return self.maintainer or self.author or "UNKNOWN"
def get_contact_email(self):
return self.maintainer_email or self.author_email or "UNKNOWN"
def get_url(self):
return self.url or "UNKNOWN"
def get_license(self):
return self.license or "UNKNOWN"
get_licence = get_license
def get_description(self):
return self.description or "UNKNOWN"
def get_long_description(self):
return self.long_description or "UNKNOWN"
def get_keywords(self):
return self.keywords or []
def get_platforms(self):
return self.platforms or ["UNKNOWN"]
def get_classifiers(self):
return self.classifiers or []
def get_download_url(self):
return self.download_url or "UNKNOWN"
# PEP 314
def get_requires(self):
return self.requires or []
def set_requires(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.requires = value
def get_provides(self):
return self.provides or []
def set_provides(self, value):
value = [v.strip() for v in value]
for v in value:
import distutils.versionpredicate
distutils.versionpredicate.split_provision(v)
self.provides = value
def get_obsoletes(self):
return self.obsoletes or []
def set_obsoletes(self, value):
import distutils.versionpredicate
for v in value:
distutils.versionpredicate.VersionPredicate(v)
self.obsoletes = value
def fix_help_options(options):
"""Convert a 4-tuple 'help_options' list as found in various command
classes to the 3-tuple form required by FancyGetopt.
"""
new_options = []
for help_tuple in options:
new_options.append(help_tuple[0:3])
return new_options
|
theheros/kbengine
|
kbe/res/scripts/common/Lib/distutils/dist.py
|
Python
|
lgpl-3.0
| 47,477
|
#!/usr/bin/python
''' Auto-generated ui widget plugin '''
from projexui.qt.QtDesigner import QPyDesignerCustomWidgetPlugin
from projexui.qt.QtGui import QIcon
import projex.resources
from projexui.widgets.xscintillaedit import XScintillaEdit as Base
setattr(Base, '__designer_mode__', True)
DEFAULT_XML = '''<ui language="c++" displayname="XScintillaEdit">
<widget class="XScintillaEdit" name="XScintillaEdit"/>
<customwidgets>
<customwidget>
<class>XScintillaEdit</class>
<header>projexui.widgets.xscintillaedit</header>
<addpagemethod>%(addpagemethod)s</addpagemethod>
<propertyspecifications>
%(propertyspecs)s
</propertyspecifications>
</customwidget>
</customwidgets>
</ui>'''
class XScintillaEditPlugin(QPyDesignerCustomWidgetPlugin):
def __init__(self, parent=None):
super(XScintillaEditPlugin, self).__init__(parent)
self.initialized = False
def initialize(self, core):
if self.initialized:
return
self.initialized = True
def isInitialized(self):
return self.initialized
def createWidget(self, parent):
return Base(parent)
def name(self):
return getattr(Base, '__designer_name__', Base.__name__)
def group(self):
return getattr(Base, '__designer_group__', 'ProjexUI')
def icon(self):
default = projex.resources.find('img/logo_16.png')
return QIcon(getattr(Base, '__designer_icon__', default))
def toolTip( self ):
docs = getattr(Base, '__doc__', '')
if docs is None:
docs = ''
return getattr(Base, '__designer_tooltip__', docs)
def whatsThis( self ):
return ''
def isContainer( self ):
return getattr(Base, '__designer_container__', False)
def includeFile( self ):
return 'projexui.widgets.xscintillaedit'
def domXml( self ):
opts = {}
specs = []
for prop, info in getattr(Base, '__designer_propspecs__', {}).items():
xml = '<%spropertyspecification name="%s" type="%s"/>'
xml %= (info[0], prop, info[1])
specs.append(xml)
opts['addpagemethod'] = getattr(Base, '__designer_addpage__', '')
opts['propertyspecs'] = ''.join(specs)
default = DEFAULT_XML % opts
return getattr(Base, '__designer_xml__', default)
|
bitesofcode/projexui
|
projexui/designer/build/xscintillaeditplugin.py
|
Python
|
lgpl-3.0
| 2,537
|
"""
Tri-Polar Grid Projected Plotting
=================================
This example demonstrates cell plots of data on the semi-structured ORCA2 model
grid.
First, the data is projected into the PlateCarree coordinate reference system.
Second four pcolormesh plots are created from this projected dataset,
using different projections for the output image.
"""
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import iris
import iris.analysis.cartography
import iris.plot as iplt
import iris.quickplot as qplt
def main():
# Load data
filepath = iris.sample_data_path("orca2_votemper.nc")
cube = iris.load_cube(filepath)
# Choose plot projections
projections = {}
projections["Mollweide"] = ccrs.Mollweide()
projections["PlateCarree"] = ccrs.PlateCarree()
projections["NorthPolarStereo"] = ccrs.NorthPolarStereo()
projections["Orthographic"] = ccrs.Orthographic(
central_longitude=-90, central_latitude=45
)
pcarree = projections["PlateCarree"]
# Transform cube to target projection
new_cube, extent = iris.analysis.cartography.project(
cube, pcarree, nx=400, ny=200
)
# Plot data in each projection
for name in sorted(projections):
fig = plt.figure()
fig.suptitle("ORCA2 Data Projected to {}".format(name))
# Set up axes and title
ax = plt.subplot(projection=projections[name])
# Set limits
ax.set_global()
# plot with Iris quickplot pcolormesh
qplt.pcolormesh(new_cube)
# Draw coastlines
ax.coastlines()
iplt.show()
if __name__ == "__main__":
main()
|
pp-mo/iris
|
docs/iris/example_code/General/orca_projection.py
|
Python
|
lgpl-3.0
| 1,647
|
import sys
from services.spawn import MobileTemplate
from services.spawn import WeaponTemplate
from resources.datatables import WeaponType
from resources.datatables import Difficulty
from resources.datatables import Options
from resources.datatables import FactionStatus
from java.util import Vector
def addTemplate(core):
mobileTemplate = MobileTemplate()
mobileTemplate.setCreatureName('crackdown_imperial_warrant_officer_ii_hard')
mobileTemplate.setLevel(33)
mobileTemplate.setDifficulty(Difficulty.ELITE)
mobileTemplate.setMinSpawnDistance(4)
mobileTemplate.setMaxSpawnDistance(8)
mobileTemplate.setDeathblow(True)
mobileTemplate.setScale(1)
mobileTemplate.setSocialGroup("imperial")
mobileTemplate.setAssistRange(6)
mobileTemplate.setStalker(False)
mobileTemplate.setFaction("imperial")
mobileTemplate.setFactionStatus(FactionStatus.Combatant)
templates = Vector()
templates.add('object/mobile/shared_dressed_npe_imperial_officer.iff')
mobileTemplate.setTemplates(templates)
weaponTemplates = Vector()
weapontemplate = WeaponTemplate('object/weapon/ranged/carbine/shared_carbine_e11.iff', WeaponType.CARBINE, 1.0, 15, 'energy')
weaponTemplates.add(weapontemplate)
mobileTemplate.setWeaponTemplateVector(weaponTemplates)
attacks = Vector()
mobileTemplate.setDefaultAttack('rangedShot')
mobileTemplate.setAttacks(attacks)
core.spawnService.addMobileTemplate('imp_warrant_offi_1st_class_ii_33', mobileTemplate)
return
|
ProjectSWGCore/NGECore2
|
scripts/mobiles/generic/faction/imperial/imp_warant_officer_ii_1st_class_33.py
|
Python
|
lgpl-3.0
| 1,458
|
# appnexus cryptography edited google pep8 smarkets
import ast
# This comment should not prevent the I201 below, it is not a newline.
import X # I201
import flake8_import_order # I201
|
public/flake8-import-order
|
tests/test_cases/missing_newline.py
|
Python
|
lgpl-3.0
| 184
|
# Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## A Mixin for applications that start InteractiveShell instances.
#
# Provides configurables for loading extensions and executing files as part of
# configuring a Shell environment.
#
# The following methods should be called by the :meth:`initialize` method of the
# subclass:
#
# - :meth:`init_path`
# - :meth:`init_shell` (to be implemented by the subclass)
# - :meth:`init_gui_pylab`
# - :meth:`init_extensions`
# - :meth:`init_code`
## Execute the given command string.
#c.InteractiveShellApp.code_to_run = ''
## Run the file referenced by the PYTHONSTARTUP environment variable at IPython
# startup.
#c.InteractiveShellApp.exec_PYTHONSTARTUP = True
## List of files to run at IPython startup.
#c.InteractiveShellApp.exec_files = []
## lines of code to run at IPython startup.
#c.InteractiveShellApp.exec_lines = []
## A list of dotted module names of IPython extensions to load.
#c.InteractiveShellApp.extensions = []
## dotted module name of an IPython extension to load.
#c.InteractiveShellApp.extra_extension = ''
## A file to be run
#c.InteractiveShellApp.file_to_run = ''
## Enable GUI event loop integration with any of ('glut', 'gtk', 'gtk2', 'gtk3',
# 'osx', 'pyglet', 'qt', 'qt4', 'qt5', 'tk', 'wx', 'gtk2', 'qt4').
#c.InteractiveShellApp.gui = None
## Should variables loaded at startup (by startup files, exec_lines, etc.) be
# hidden from tools like %who?
#c.InteractiveShellApp.hide_initial_ns = True
## Configure matplotlib for interactive use with the default matplotlib backend.
#c.InteractiveShellApp.matplotlib = None
## Run the module as a script.
#c.InteractiveShellApp.module_to_run = ''
## Pre-load matplotlib and numpy for interactive use, selecting a particular
# matplotlib backend and loop integration.
#c.InteractiveShellApp.pylab = None
## If true, IPython will populate the user namespace with numpy, pylab, etc. and
# an ``import *`` is done from numpy and pylab, when using pylab mode.
#
# When False, pylab mode should not import any names into the user namespace.
#c.InteractiveShellApp.pylab_import_all = True
## Reraise exceptions encountered loading IPython extensions?
#c.InteractiveShellApp.reraise_ipython_extension_failures = False
#------------------------------------------------------------------------------
# Application(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## This is an application.
## The date format used by logging formatters for %(asctime)s
#c.Application.log_datefmt = '%Y-%m-%d %H:%M:%S'
## The Logging format template
#c.Application.log_format = '[%(name)s]%(highlevel)s %(message)s'
## Set the log level by value or name.
#c.Application.log_level = 30
#------------------------------------------------------------------------------
# BaseIPythonApplication(Application) configuration
#------------------------------------------------------------------------------
## IPython: an enhanced interactive Python shell.
## Whether to create profile dir if it doesn't exist
#c.BaseIPythonApplication.auto_create = False
## Whether to install the default config files into the profile dir. If a new
# profile is being created, and IPython contains config files for that profile,
# then they will be staged into the new directory. Otherwise, default config
# files will be automatically generated.
#c.BaseIPythonApplication.copy_config_files = False
## Path to an extra config file to load.
#
# If specified, load this config file in addition to any other IPython config.
#c.BaseIPythonApplication.extra_config_file = ''
## The name of the IPython directory. This directory is used for logging
# configuration (through profiles), history storage, etc. The default is usually
# $HOME/.ipython. This option can also be specified through the environment
# variable IPYTHONDIR.
#c.BaseIPythonApplication.ipython_dir = ''
## Whether to overwrite existing config files when copying
#c.BaseIPythonApplication.overwrite = False
## The IPython profile to use.
#c.BaseIPythonApplication.profile = 'default'
## Create a massive crash report when IPython encounters what may be an internal
# error. The default is to append a short message to the usual traceback
#c.BaseIPythonApplication.verbose_crash = False
#------------------------------------------------------------------------------
# TerminalIPythonApp(BaseIPythonApplication,InteractiveShellApp) configuration
#------------------------------------------------------------------------------
## Whether to display a banner upon starting IPython.
#c.TerminalIPythonApp.display_banner = True
## If a command or file is given via the command-line, e.g. 'ipython foo.py',
# start an interactive shell after executing the file or command.
#c.TerminalIPythonApp.force_interact = False
## Class to use to instantiate the TerminalInteractiveShell object. Useful for
# custom Frontends
#c.TerminalIPythonApp.interactive_shell_class = 'IPython.terminal.interactiveshell.TerminalInteractiveShell'
## Start IPython quickly by skipping the loading of config files.
#c.TerminalIPythonApp.quick = False
#------------------------------------------------------------------------------
# InteractiveShell(SingletonConfigurable) configuration
#------------------------------------------------------------------------------
## An enhanced, interactive shell for Python.
## 'all', 'last', 'last_expr' or 'none', 'last_expr_or_assign' specifying which
# nodes should be run interactively (displaying output from expressions).
#c.InteractiveShell.ast_node_interactivity = 'last_expr'
## A list of ast.NodeTransformer subclass instances, which will be applied to
# user input before code is run.
#c.InteractiveShell.ast_transformers = []
## Make IPython automatically call any callable object even if you didn't type
# explicit parentheses. For example, 'str 43' becomes 'str(43)' automatically.
# The value can be '0' to disable the feature, '1' for 'smart' autocall, where
# it is not applied if there are no more arguments on the line, and '2' for
# 'full' autocall, where all callable objects are automatically called (even if
# no arguments are present).
#c.InteractiveShell.autocall = 0
## Autoindent IPython code entered interactively.
#c.InteractiveShell.autoindent = True
## Enable magic commands to be called without the leading %.
#c.InteractiveShell.automagic = True
## The part of the banner to be printed before the profile
#c.InteractiveShell.banner1 = "Python 3.5.2 (default, Nov 23 2017, 16:37:01) \nType 'copyright', 'credits' or 'license' for more information\nIPython 6.2.1 -- An enhanced Interactive Python. Type '?' for help.\n"
## The part of the banner to be printed after the profile
#c.InteractiveShell.banner2 = ''
## Set the size of the output cache. The default is 1000, you can change it
# permanently in your config file. Setting it to 0 completely disables the
# caching system, and the minimum value accepted is 3 (if you provide a value
# less than 3, it is reset to 0 and a warning is issued). This limit is defined
# because otherwise you'll spend more time re-flushing a too small cache than
# working
#c.InteractiveShell.cache_size = 1000
## Use colors for displaying information about objects. Because this information
# is passed through a pager (like 'less'), and some pagers get confused with
# color codes, this capability can be turned off.
#c.InteractiveShell.color_info = True
## Set the color scheme (NoColor, Neutral, Linux, or LightBG).
#c.InteractiveShell.colors = 'Neutral'
##
#c.InteractiveShell.debug = False
## Don't call post-execute functions that have failed in the past.
#c.InteractiveShell.disable_failing_post_execute = False
## If True, anything that would be passed to the pager will be displayed as
# regular output instead.
#c.InteractiveShell.display_page = False
## (Provisional API) enables html representation in mime bundles sent to pagers.
#c.InteractiveShell.enable_html_pager = False
## Total length of command history
#c.InteractiveShell.history_length = 10000
## The number of saved history entries to be loaded into the history buffer at
# startup.
#c.InteractiveShell.history_load_length = 1000
##
#c.InteractiveShell.ipython_dir = ''
## Start logging to the given file in append mode. Use `logfile` to specify a log
# file to **overwrite** logs to.
#c.InteractiveShell.logappend = ''
## The name of the logfile to use.
#c.InteractiveShell.logfile = ''
## Start logging to the default log file in overwrite mode. Use `logappend` to
# specify a log file to **append** logs to.
#c.InteractiveShell.logstart = False
##
#c.InteractiveShell.object_info_string_level = 0
## Automatically call the pdb debugger after every exception.
#c.InteractiveShell.pdb = False
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in1 = 'In [\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_in2 = ' .\\D.: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompt_out = 'Out[\\#]: '
## Deprecated since IPython 4.0 and ignored since 5.0, set
# TerminalInteractiveShell.prompts object directly.
#c.InteractiveShell.prompts_pad_left = True
##
#c.InteractiveShell.quiet = False
##
#c.InteractiveShell.separate_in = '\n'
##
#c.InteractiveShell.separate_out = ''
##
#c.InteractiveShell.separate_out2 = ''
## Show rewritten input, e.g. for autocall.
#c.InteractiveShell.show_rewritten_input = True
## Enables rich html representation of docstrings. (This requires the docrepr
# module).
#c.InteractiveShell.sphinxify_docstring = False
##
#c.InteractiveShell.wildcards_case_sensitive = True
## Switch modes for the IPython exception handlers.
#c.InteractiveShell.xmode = 'Context'
#------------------------------------------------------------------------------
# TerminalInteractiveShell(InteractiveShell) configuration
#------------------------------------------------------------------------------
## Set to confirm when you try to exit IPython with an EOF (Control-D in Unix,
# Control-Z/Enter in Windows). By typing 'exit' or 'quit', you can force a
# direct exit without any confirmation.
#c.TerminalInteractiveShell.confirm_exit = True
## Options for displaying tab completions, 'column', 'multicolumn', and
# 'readlinelike'. These options are for `prompt_toolkit`, see `prompt_toolkit`
# documentation for more information.
#c.TerminalInteractiveShell.display_completions = 'multicolumn'
## Shortcut style to use at the prompt. 'vi' or 'emacs'.
#c.TerminalInteractiveShell.editing_mode = 'emacs'
## Set the editor used by IPython (default to $EDITOR/vi/notepad).
#c.TerminalInteractiveShell.editor = 'nano'
## Enable vi (v) or Emacs (C-X C-E) shortcuts to open an external editor. This is
# in addition to the F2 binding, which is always enabled.
#c.TerminalInteractiveShell.extra_open_editor_shortcuts = False
## Provide an alternative handler to be called when the user presses Return. This
# is an advanced option intended for debugging, which may be changed or removed
# in later releases.
#c.TerminalInteractiveShell.handle_return = None
## Highlight matching brackets.
#c.TerminalInteractiveShell.highlight_matching_brackets = True
## The name or class of a Pygments style to use for syntax
# highlighting:
# rrt, abap, borland, monokai, trac, fruity, vs, autumn, igor, friendly, vim, bw, native, algol_nu, rainbow_dash, emacs, murphy, manni, paraiso-light, arduino, colorful, algol, lovelace, perldoc, tango, xcode, pastie, paraiso-dark, default
#c.TerminalInteractiveShell.highlighting_style = traitlets.Undefined
## Override highlighting format for specific tokens
#c.TerminalInteractiveShell.highlighting_style_overrides = {}
## Enable mouse support in the prompt (Note: prevents selecting text with the
# mouse)
#c.TerminalInteractiveShell.mouse_support = False
## Class used to generate Prompt token for prompt_toolkit
#c.TerminalInteractiveShell.prompts_class = 'IPython.terminal.prompts.Prompts'
## Use `raw_input` for the REPL, without completion and prompt colors.
#
# Useful when controlling IPython as a subprocess, and piping STDIN/OUT/ERR.
# Known usage are: IPython own testing machinery, and emacs inferior-shell
# integration through elpy.
#
# This mode default to `True` if the `IPY_TEST_SIMPLE_PROMPT` environment
# variable is set, or the current terminal is not a tty.
#c.TerminalInteractiveShell.simple_prompt = False
## Number of line at the bottom of the screen to reserve for the completion menu
#c.TerminalInteractiveShell.space_for_menu = 6
## Automatically set the terminal title
#c.TerminalInteractiveShell.term_title = True
## Customize the terminal title format. This is a python format string.
# Available substitutions are: {cwd}.
#c.TerminalInteractiveShell.term_title_format = 'IPython: {cwd}'
## Use 24bit colors instead of 256 colors in prompt highlighting. If your
# terminal supports true color, the following command should print 'TRUECOLOR'
# in orange: printf "\x1b[38;2;255;100;0mTRUECOLOR\x1b[0m\n"
#c.TerminalInteractiveShell.true_color = False
#------------------------------------------------------------------------------
# HistoryAccessor(HistoryAccessorBase) configuration
#------------------------------------------------------------------------------
## Access the history database without adding to it.
#
# This is intended for use by standalone history tools. IPython shells use
# HistoryManager, below, which is a subclass of this.
## Options for configuring the SQLite connection
#
# These options are passed as keyword args to sqlite3.connect when establishing
# database conenctions.
#c.HistoryAccessor.connection_options = {}
## enable the SQLite history
#
# set enabled=False to disable the SQLite history, in which case there will be
# no stored history, no SQLite connection, and no background saving thread.
# This may be necessary in some threaded environments where IPython is embedded.
#c.HistoryAccessor.enabled = True
## Path to file to use for SQLite history database.
#
# By default, IPython will put the history database in the IPython profile
# directory. If you would rather share one history among profiles, you can set
# this value in each, so that they are consistent.
#
# Due to an issue with fcntl, SQLite is known to misbehave on some NFS mounts.
# If you see IPython hanging, try setting this to something on a local disk,
# e.g::
#
# ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
#
# you can also use the specific value `:memory:` (including the colon at both
# end but not the back ticks), to avoid creating an history file.
#c.HistoryAccessor.hist_file = ''
#------------------------------------------------------------------------------
# HistoryManager(HistoryAccessor) configuration
#------------------------------------------------------------------------------
## A class to organize all history-related functionality in one place.
## Write to database every x commands (higher values save disk access & power).
# Values of 1 or less effectively disable caching.
#c.HistoryManager.db_cache_size = 0
## Should the history database include output? (default: no)
#c.HistoryManager.db_log_output = False
#------------------------------------------------------------------------------
# ProfileDir(LoggingConfigurable) configuration
#------------------------------------------------------------------------------
## An object to manage the profile directory and its resources.
#
# The profile directory is used by all IPython applications, to manage
# configuration, logging and security.
#
# This object knows how to find, create and manage these directories. This
# should be used by any code that wants to handle profiles.
## Set the profile location directly. This overrides the logic used by the
# `profile` option.
#c.ProfileDir.location = ''
#------------------------------------------------------------------------------
# BaseFormatter(Configurable) configuration
#------------------------------------------------------------------------------
## A base formatter class that is configurable.
#
# This formatter should usually be used as the base class of all formatters. It
# is a traited :class:`Configurable` class and includes an extensible API for
# users to determine how their objects are formatted. The following logic is
# used to find a function to format an given object.
#
# 1. The object is introspected to see if it has a method with the name
# :attr:`print_method`. If is does, that object is passed to that method
# for formatting.
# 2. If no print method is found, three internal dictionaries are consulted
# to find print method: :attr:`singleton_printers`, :attr:`type_printers`
# and :attr:`deferred_printers`.
#
# Users should use these dictionaries to register functions that will be used to
# compute the format data for their objects (if those objects don't have the
# special print methods). The easiest way of using these dictionaries is through
# the :meth:`for_type` and :meth:`for_type_by_name` methods.
#
# If no function/callable is found to compute the format data, ``None`` is
# returned and this format type is not used.
##
#c.BaseFormatter.deferred_printers = {}
##
#c.BaseFormatter.enabled = True
##
#c.BaseFormatter.singleton_printers = {}
##
#c.BaseFormatter.type_printers = {}
#------------------------------------------------------------------------------
# PlainTextFormatter(BaseFormatter) configuration
#------------------------------------------------------------------------------
## The default pretty-printer.
#
# This uses :mod:`IPython.lib.pretty` to compute the format data of the object.
# If the object cannot be pretty printed, :func:`repr` is used. See the
# documentation of :mod:`IPython.lib.pretty` for details on how to write pretty
# printers. Here is a simple example::
#
# def dtype_pprinter(obj, p, cycle):
# if cycle:
# return p.text('dtype(...)')
# if hasattr(obj, 'fields'):
# if obj.fields is None:
# p.text(repr(obj))
# else:
# p.begin_group(7, 'dtype([')
# for i, field in enumerate(obj.descr):
# if i > 0:
# p.text(',')
# p.breakable()
# p.pretty(field)
# p.end_group(7, '])')
##
#c.PlainTextFormatter.float_precision = ''
## Truncate large collections (lists, dicts, tuples, sets) to this size.
#
# Set to 0 to disable truncation.
#c.PlainTextFormatter.max_seq_length = 1000
##
#c.PlainTextFormatter.max_width = 79
##
#c.PlainTextFormatter.newline = '\n'
##
#c.PlainTextFormatter.pprint = True
##
#c.PlainTextFormatter.verbose = False
#------------------------------------------------------------------------------
# Completer(Configurable) configuration
#------------------------------------------------------------------------------
## Enable unicode completions, e.g. \alpha<tab> . Includes completion of latex
# commands, unicode names, and expanding unicode characters back to latex
# commands.
#c.Completer.backslash_combining_completions = True
## Enable debug for the Completer. Mostly print extra information for
# experimental jedi integration.
#c.Completer.debug = False
## Activate greedy completion PENDING DEPRECTION. this is now mostly taken care
# of with Jedi.
#
# This will enable completion on elements of lists, results of function calls,
# etc., but can be unsafe because the code is actually evaluated on TAB.
#c.Completer.greedy = False
## Experimental: restrict time (in milliseconds) during which Jedi can compute
# types. Set to 0 to stop computing types. Non-zero value lower than 100ms may
# hurt performance by preventing jedi to build its cache.
#c.Completer.jedi_compute_type_timeout = 400
## Experimental: Use Jedi to generate autocompletions. Default to True if jedi is
# installed
#c.Completer.use_jedi = True
#------------------------------------------------------------------------------
# IPCompleter(Completer) configuration
#------------------------------------------------------------------------------
## Extension of the completer class with IPython-specific features
## DEPRECATED as of version 5.0.
#
# Instruct the completer to use __all__ for the completion
#
# Specifically, when completing on ``object.<tab>``.
#
# When True: only those names in obj.__all__ will be included.
#
# When False [default]: the __all__ attribute is ignored
#c.IPCompleter.limit_to__all__ = False
## Whether to merge completion results into a single list
#
# If False, only the completion results from the first non-empty completer will
# be returned.
#c.IPCompleter.merge_completions = True
## Instruct the completer to omit private method names
#
# Specifically, when completing on ``object.<tab>``.
#
# When 2 [default]: all names that start with '_' will be excluded.
#
# When 1: all 'magic' names (``__foo__``) will be excluded.
#
# When 0: nothing will be excluded.
#c.IPCompleter.omit__names = 2
#------------------------------------------------------------------------------
# ScriptMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics for talking to scripts
#
# This defines a base `%%script` cell magic for running a cell with a program in
# a subprocess, and registers a few top-level magics that call %%script with
# common interpreters.
## Extra script cell magics to define
#
# This generates simple wrappers of `%%script foo` as `%%foo`.
#
# If you want to add script magics that aren't on your path, specify them in
# script_paths
#c.ScriptMagics.script_magics = []
## Dict mapping short 'ruby' names to full paths, such as '/opt/secret/bin/ruby'
#
# Only necessary for items in script_magics where the default path will not find
# the right interpreter.
#c.ScriptMagics.script_paths = {}
#------------------------------------------------------------------------------
# LoggingMagics(Magics) configuration
#------------------------------------------------------------------------------
## Magics related to all logging machinery.
## Suppress output of log state when logging is enabled
#c.LoggingMagics.quiet = False
#------------------------------------------------------------------------------
# StoreMagics(Magics) configuration
#------------------------------------------------------------------------------
## Lightweight persistence for python variables.
#
# Provides the %store magic.
## If True, any %store-d variables will be automatically restored when IPython
# starts.
#c.StoreMagics.autorestore = False
|
OpnSrcConstruction/OSCbashRCs
|
.ipython/profile_debug/ipython_config.py
|
Python
|
unlicense
| 23,357
|
# Copyright 2014 Google Inc. All Rights Reserved.
"""Command for describing target HTTPS proxies."""
from googlecloudsdk.compute.lib import base_classes
class Describe(base_classes.GlobalDescriber):
"""Display detailed information about a target HTTPS proxy."""
@staticmethod
def Args(parser):
cli = Describe.GetCLIGenerator()
base_classes.GlobalDescriber.Args(
parser, 'compute.targetHttpsProxies', cli,
'compute.target-https-proxies')
base_classes.AddFieldsFlag(parser, 'targetHttpsProxies')
@property
def service(self):
return self.compute.targetHttpsProxies
@property
def resource_type(self):
return 'targetHttpsProxies'
Describe.detailed_help = {
'brief': 'Display detailed information about a target HTTPS proxy',
'DESCRIPTION': """\
*{command}* displays all data associated with a target HTTPS proxy
in a project.
""",
}
|
wemanuel/smry
|
smry/server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/compute/subcommands/target_https_proxies/describe.py
|
Python
|
apache-2.0
| 917
|
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import json
from queue import Queue, Empty
import threading
import time
import requests
from mycroft.api import DeviceApi, is_paired
from mycroft.configuration import Configuration
from mycroft.session import SessionManager
from mycroft.util.log import LOG
from mycroft.version import CORE_VERSION_STR
from copy import copy
class _MetricSender(threading.Thread):
"""Thread responsible for sending metrics data."""
def __init__(self):
super().__init__()
self.queue = Queue()
self.daemon = True
self.start()
def run(self):
while True:
time.sleep(30)
try:
while True: # Try read the queue until it fails
report_metric(*self.queue.get_nowait())
time.sleep(0.5)
except Empty:
pass # If the queue is empty just continue the loop
except Exception as e:
LOG.error('Could not send Metrics: {}'.format(repr(e)))
_metric_uploader = _MetricSender()
def report_metric(name, data):
"""
Report a general metric to the Mycroft servers
Args:
name (str): Name of metric. Must use only letters and hyphens
data (dict): JSON dictionary to report. Must be valid JSON
"""
try:
if is_paired() and Configuration().get()['opt_in']:
DeviceApi().report_metric(name, data)
except requests.RequestException as e:
LOG.error('Metric couldn\'t be uploaded, due to a network error ({})'
.format(e))
def report_timing(ident, system, timing, additional_data=None):
"""Create standardized message for reporting timing.
Args:
ident (str): identifier of user interaction
system (str): system the that's generated the report
timing (stopwatch): Stopwatch object with recorded timing
additional_data (dict): dictionary with related data
"""
additional_data = additional_data or {}
report = copy(additional_data)
report['id'] = ident
report['system'] = system
report['start_time'] = timing.timestamp
report['time'] = timing.time
_metric_uploader.queue.put(('timing', report))
class Stopwatch:
"""
Simple time measuring class.
"""
def __init__(self):
self.timestamp = None
self.time = None
def start(self):
"""
Start a time measurement
"""
self.timestamp = time.time()
def lap(self):
cur_time = time.time()
start_time = self.timestamp
self.timestamp = cur_time
return cur_time - start_time
def stop(self):
"""
Stop a running time measurement. returns the measured time
"""
cur_time = time.time()
start_time = self.timestamp
self.time = cur_time - start_time
return self.time
def __enter__(self):
"""
Start stopwatch when entering with-block.
"""
self.start()
def __exit__(self, tpe, value, tb):
"""
Stop stopwatch when exiting with-block.
"""
self.stop()
def __str__(self):
cur_time = time.time()
if self.timestamp:
return str(self.time or cur_time - self.timestamp)
else:
return 'Not started'
class MetricsAggregator:
"""
MetricsAggregator is not threadsafe, and multiple clients writing the
same metric "concurrently" may result in data loss.
"""
def __init__(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def increment(self, name, value=1):
cur = self._counters.get(name, 0)
self._counters[name] = cur + value
def timer(self, name, value):
cur = self._timers.get(name)
if not cur:
self._timers[name] = []
cur = self._timers[name] = []
cur.append(value)
def level(self, name, value):
self._levels[name] = value
def clear(self):
self._counters = {}
self._timers = {}
self._levels = {}
self._attributes = {}
self.attr("version", CORE_VERSION_STR)
def attr(self, name, value):
self._attributes[name] = value
def flush(self):
publisher = MetricsPublisher()
payload = {
'counters': self._counters,
'timers': self._timers,
'levels': self._levels,
'attributes': self._attributes
}
self.clear()
count = (len(payload['counters']) + len(payload['timers']) +
len(payload['levels']))
if count > 0:
# LOG.debug(json.dumps(payload))
def publish():
publisher.publish(payload)
threading.Thread(target=publish).start()
class MetricsPublisher:
def __init__(self, url=None, enabled=False):
conf = Configuration().get()['server']
self.url = url or conf['url']
self.enabled = enabled or conf['metrics']
def publish(self, events):
if 'session_id' not in events:
session_id = SessionManager.get().session_id
events['session_id'] = session_id
if self.enabled:
requests.post(
self.url,
headers={'Content-Type': 'application/json'},
data=json.dumps(events), verify=False)
|
forslund/mycroft-core
|
mycroft/metrics/__init__.py
|
Python
|
apache-2.0
| 6,083
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import jsonschema
import mock
from rally.deploy.engines import devstack
from tests.unit import test
SAMPLE_CONFIG = {
"type": "DevstackEngine",
"provider": {
"name": "ExistingServers",
"credentials": [{"user": "root", "host": "example.com"}],
},
"localrc": {
"ADMIN_PASSWORD": "secret",
},
}
DEVSTACK_REPO = "https://git.openstack.org/cgit/openstack-dev/devstack.git"
class DevstackEngineTestCase(test.TestCase):
def setUp(self):
super(DevstackEngineTestCase, self).setUp()
self.deployment = {
"uuid": "de641026-dbe3-4abe-844a-ffef930a600a",
"config": SAMPLE_CONFIG,
}
self.engine = devstack.DevstackEngine(self.deployment)
def test_invalid_config(self):
self.deployment = SAMPLE_CONFIG.copy()
self.deployment["config"] = {"type": 42}
engine = devstack.DevstackEngine(self.deployment)
self.assertRaises(jsonschema.ValidationError,
engine.validate)
def test_construct(self):
self.assertEqual(self.engine.localrc["ADMIN_PASSWORD"], "secret")
@mock.patch("rally.deploy.engines.devstack.open", create=True)
def test_prepare_server(self, m_open):
m_open.return_value = "fake_file"
server = mock.Mock()
server.password = "secret"
self.engine.prepare_server(server)
calls = [
mock.call("/bin/sh -e", stdin="fake_file"),
mock.call("chpasswd", stdin="rally:secret"),
]
self.assertEqual(calls, server.ssh.run.mock_calls)
filename = m_open.mock_calls[0][1][0]
self.assertTrue(filename.endswith("rally/deploy/engines/"
"devstack/install.sh"))
self.assertEqual([mock.call(filename, "rb")], m_open.mock_calls)
@mock.patch("rally.deploy.engine.EngineFactory.get_provider")
@mock.patch("rally.deploy.engines.devstack.get_updated_server")
@mock.patch("rally.deploy.engines.devstack.get_script")
@mock.patch("rally.deploy.serverprovider.provider.Server")
@mock.patch("rally.deploy.engines.devstack.objects.Endpoint")
def test_deploy(self, m_endpoint, m_server, m_gs, m_gus, m_gp):
m_gp.return_value = fake_provider = mock.Mock()
server = mock.Mock()
server.host = "host"
m_endpoint.return_value = "fake_endpoint"
m_gus.return_value = ds_server = mock.Mock()
m_gs.return_value = "fake_script"
server.get_credentials.return_value = "fake_credentials"
fake_provider.create_servers.return_value = [server]
with mock.patch.object(self.engine, "deployment") as m_d:
endpoints = self.engine.deploy()
self.assertEqual({"admin": "fake_endpoint"}, endpoints)
m_endpoint.assert_called_once_with("http://host:5000/v2.0/", "admin",
"secret", "admin", "admin")
m_d.add_resource.assert_called_once_with(
info="fake_credentials",
provider_name="DevstackEngine",
type="credentials")
repo = "https://git.openstack.org/cgit/openstack-dev/devstack.git"
cmd = "/bin/sh -e -s %s master" % repo
server.ssh.run.assert_called_once_with(cmd, stdin="fake_script")
ds_calls = [
mock.call.ssh.run("cat > ~/devstack/localrc", stdin=mock.ANY),
mock.call.ssh.run("~/devstack/stack.sh")
]
self.assertEqual(ds_calls, ds_server.mock_calls)
localrc = ds_server.mock_calls[0][2]["stdin"]
self.assertIn("ADMIN_PASSWORD=secret", localrc)
|
varunarya10/rally
|
tests/unit/deploy/engines/test_devstack.py
|
Python
|
apache-2.0
| 4,325
|
"""
Support for IP Webcam settings.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.android_ip_webcam/
"""
from homeassistant.components.switch import SwitchDevice
from homeassistant.components.android_ip_webcam import (
KEY_MAP, ICON_MAP, DATA_IP_WEBCAM, AndroidIPCamEntity, CONF_HOST,
CONF_NAME, CONF_SWITCHES)
DEPENDENCIES = ['android_ip_webcam']
async def async_setup_platform(hass, config, async_add_entities,
discovery_info=None):
"""Set up the IP Webcam switch platform."""
if discovery_info is None:
return
host = discovery_info[CONF_HOST]
name = discovery_info[CONF_NAME]
switches = discovery_info[CONF_SWITCHES]
ipcam = hass.data[DATA_IP_WEBCAM][host]
all_switches = []
for setting in switches:
all_switches.append(IPWebcamSettingsSwitch(name, host, ipcam, setting))
async_add_entities(all_switches, True)
class IPWebcamSettingsSwitch(AndroidIPCamEntity, SwitchDevice):
"""An abstract class for an IP Webcam setting."""
def __init__(self, name, host, ipcam, setting):
"""Initialize the settings switch."""
super().__init__(host, ipcam)
self._setting = setting
self._mapped_name = KEY_MAP.get(self._setting, self._setting)
self._name = '{} {}'.format(name, self._mapped_name)
self._state = False
@property
def name(self):
"""Return the name of the node."""
return self._name
async def async_update(self):
"""Get the updated status of the switch."""
self._state = bool(self._ipcam.current_settings.get(self._setting))
@property
def is_on(self):
"""Return the boolean response if the node is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn device on."""
if self._setting == 'torch':
await self._ipcam.torch(activate=True)
elif self._setting == 'focus':
await self._ipcam.focus(activate=True)
elif self._setting == 'video_recording':
await self._ipcam.record(record=True)
else:
await self._ipcam.change_setting(self._setting, True)
self._state = True
self.async_schedule_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn device off."""
if self._setting == 'torch':
await self._ipcam.torch(activate=False)
elif self._setting == 'focus':
await self._ipcam.focus(activate=False)
elif self._setting == 'video_recording':
await self._ipcam.record(record=False)
else:
await self._ipcam.change_setting(self._setting, False)
self._state = False
self.async_schedule_update_ha_state()
@property
def icon(self):
"""Return the icon for the switch."""
return ICON_MAP.get(self._setting, 'mdi:flash')
|
tinloaf/home-assistant
|
homeassistant/components/switch/android_ip_webcam.py
|
Python
|
apache-2.0
| 2,983
|
#
# Copyright 2012, Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Client side of the heat engine RPC API.
"""
from heat.common import messaging
from heat.rpc import api as rpc_api
class EngineClient(object):
'''Client side of the heat engine rpc API.
API version history::
1.0 - Initial version.
1.1 - Add support_status argument to list_resource_types()
1.4 - Add support for service list
1.9 - Add template_type option to generate_template()
'''
BASE_RPC_API_VERSION = '1.0'
def __init__(self):
self._client = messaging.get_rpc_client(
topic=rpc_api.ENGINE_TOPIC,
version=self.BASE_RPC_API_VERSION)
@staticmethod
def make_msg(method, **kwargs):
return method, kwargs
def call(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.call(ctxt, method, **kwargs)
def cast(self, ctxt, msg, version=None):
method, kwargs = msg
if version is not None:
client = self._client.prepare(version=version)
else:
client = self._client
return client.cast(ctxt, method, **kwargs)
def local_error_name(self, error):
"""
Returns the name of the error with any _Remote postfix removed.
:param error: Remote raised error to derive the name from.
"""
error_name = error.__class__.__name__
return error_name.split('_Remote')[0]
def ignore_error_named(self, error, name):
"""
Raises the error unless its local name matches the supplied name
:param error: Remote raised error to derive the local name from.
:param name: Name to compare local name to.
"""
if self.local_error_name(error) != name:
raise error
def identify_stack(self, ctxt, stack_name):
"""
The identify_stack method returns the full stack identifier for a
single, live stack given the stack name.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('identify_stack',
stack_name=stack_name))
def list_stacks(self, ctxt, limit=None, marker=None, sort_keys=None,
sort_dir=None, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""
The list_stacks method returns attributes of all stacks. It supports
pagination (``limit`` and ``marker``), sorting (``sort_keys`` and
``sort_dir``) and filtering (``filters``) of the results.
:param ctxt: RPC context.
:param limit: the number of stacks to list (integer or string)
:param marker: the ID of the last item in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc')
:param filters: a dict with attribute:value to filter the list
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, show soft-deleted stacks
:param show_nested: if true, show nested stacks
:param show_hidden: if true, show hidden stacks
:param tags: show stacks containing these tags, combine multiple
tags using the boolean AND expression
:param tags_any: show stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: show stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: show stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a list of stacks
"""
return self.call(ctxt,
self.make_msg('list_stacks', limit=limit,
sort_keys=sort_keys, marker=marker,
sort_dir=sort_dir, filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags, tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def count_stacks(self, ctxt, filters=None, tenant_safe=True,
show_deleted=False, show_nested=False, show_hidden=False,
tags=None, tags_any=None, not_tags=None,
not_tags_any=None):
"""
Return the number of stacks that match the given filters
:param ctxt: RPC context.
:param filters: a dict of ATTR:VALUE to match against stacks
:param tenant_safe: if true, scope the request by the current tenant
:param show_deleted: if true, count will include the deleted stacks
:param show_nested: if true, count will include nested stacks
:param show_hidden: if true, count will include hidden stacks
:param tags: count stacks containing these tags, combine multiple tags
using the boolean AND expression
:param tags_any: count stacks containing these tags, combine multiple
tags using the boolean OR expression
:param not_tags: count stacks not containing these tags, combine
multiple tags using the boolean AND expression
:param not_tags_any: count stacks not containing these tags, combine
multiple tags using the boolean OR expression
:returns: a integer representing the number of matched stacks
"""
return self.call(ctxt, self.make_msg('count_stacks',
filters=filters,
tenant_safe=tenant_safe,
show_deleted=show_deleted,
show_nested=show_nested,
show_hidden=show_hidden,
tags=tags,
tags_any=tags_any,
not_tags=not_tags,
not_tags_any=not_tags_any),
version='1.8')
def show_stack(self, ctxt, stack_identity):
"""
Return detailed information about one or all stacks.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to show, or None to
show all
"""
return self.call(ctxt, self.make_msg('show_stack',
stack_identity=stack_identity))
def preview_stack(self, ctxt, stack_name, template, params, files, args):
"""
Simulates a new stack using the provided template.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
"""
return self.call(ctxt,
self.make_msg('preview_stack', stack_name=stack_name,
template=template,
params=params, files=files, args=args))
def create_stack(self, ctxt, stack_name, template, params, files, args):
"""
The create_stack method creates a new stack using the template
provided.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
"""
return self._create_stack(ctxt, stack_name, template, params, files,
args)
def _create_stack(self, ctxt, stack_name, template, params, files, args,
owner_id=None, nested_depth=0, user_creds_id=None,
stack_user_project_id=None, parent_resource_name=None):
"""
Internal create_stack interface for engine-to-engine communication via
RPC. Allows some additional options which should not be exposed to
users via the API:
:param owner_id: parent stack ID for nested stacks
:param nested_depth: nested depth for nested stacks
:param user_creds_id: user_creds record for nested stack
:param stack_user_project_id: stack user project for nested stack
:param parent_resource_name: the parent resource name
"""
return self.call(
ctxt, self.make_msg('create_stack', stack_name=stack_name,
template=template,
params=params, files=files, args=args,
owner_id=owner_id,
nested_depth=nested_depth,
user_creds_id=user_creds_id,
stack_user_project_id=stack_user_project_id,
parent_resource_name=parent_resource_name),
version='1.8')
def update_stack(self, ctxt, stack_identity, template, params,
files, args):
"""
The update_stack method updates an existing stack based on the
provided template and parameters.
Note that at this stage the template has already been fetched from the
heat-api process if using a template-url.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to create.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
:param files: files referenced from the environment.
:param args: Request parameters/args passed from API
"""
return self.call(ctxt, self.make_msg('update_stack',
stack_identity=stack_identity,
template=template,
params=params,
files=files,
args=args))
def validate_template(self, ctxt, template, params=None):
"""
The validate_template method uses the stack parser to check
the validity of a template.
:param ctxt: RPC context.
:param template: Template of stack you want to create.
:param params: Stack Input Params/Environment
"""
return self.call(ctxt, self.make_msg('validate_template',
template=template,
params=params))
def authenticated_to_backend(self, ctxt):
"""
Verify that the credentials in the RPC context are valid for the
current cloud backend.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('authenticated_to_backend'))
def get_template(self, ctxt, stack_identity):
"""
Get the template.
:param ctxt: RPC context.
:param stack_name: Name of the stack you want to see.
"""
return self.call(ctxt, self.make_msg('get_template',
stack_identity=stack_identity))
def delete_stack(self, ctxt, stack_identity, cast=True):
"""
The delete_stack method deletes a given stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to delete.
:param cast: cast the message or use call (default: True)
"""
rpc_method = self.cast if cast else self.call
return rpc_method(ctxt,
self.make_msg('delete_stack',
stack_identity=stack_identity))
def abandon_stack(self, ctxt, stack_identity):
"""
The abandon_stack method deletes a given stack but
resources would not be deleted.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to abandon.
"""
return self.call(ctxt,
self.make_msg('abandon_stack',
stack_identity=stack_identity))
def list_resource_types(self, ctxt, support_status=None):
"""
Get a list of valid resource types.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('list_resource_types',
support_status=support_status),
version='1.1')
def resource_schema(self, ctxt, type_name):
"""
Get the schema for a resource type.
:param ctxt: RPC context.
"""
return self.call(ctxt, self.make_msg('resource_schema',
type_name=type_name))
def generate_template(self, ctxt, type_name, template_type='cfn'):
"""
Generate a template based on the specified type.
:param ctxt: RPC context.
:param type_name: The resource type name to generate a template for.
:param template_type: the template type to generate, cfn or hot.
"""
return self.call(ctxt, self.make_msg('generate_template',
type_name=type_name,
template_type=template_type),
version='1.9')
def list_events(self, ctxt, stack_identity, filters=None, limit=None,
marker=None, sort_keys=None, sort_dir=None,):
"""
The list_events method lists all events associated with a given stack.
It supports pagination (``limit`` and ``marker``),
sorting (``sort_keys`` and ``sort_dir``) and filtering(filters)
of the results.
:param ctxt: RPC context.
:param stack_identity: Name of the stack you want to get events for
:param filters: a dict with attribute:value to filter the list
:param limit: the number of events to list (integer or string)
:param marker: the ID of the last event in the previous page
:param sort_keys: an array of fields used to sort the list
:param sort_dir: the direction of the sort ('asc' or 'desc').
"""
return self.call(ctxt, self.make_msg('list_events',
stack_identity=stack_identity,
filters=filters,
limit=limit,
marker=marker,
sort_keys=sort_keys,
sort_dir=sort_dir))
def describe_stack_resource(self, ctxt, stack_identity, resource_name,
with_attr=None):
"""
Get detailed resource information about a particular resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt,
self.make_msg('describe_stack_resource',
stack_identity=stack_identity,
resource_name=resource_name,
with_attr=with_attr),
version='1.2')
def find_physical_resource(self, ctxt, physical_resource_id):
"""
Return an identifier for the resource with the specified physical
resource ID.
:param ctxt RPC context.
:param physcial_resource_id The physical resource ID to look up.
"""
return self.call(ctxt,
self.make_msg(
'find_physical_resource',
physical_resource_id=physical_resource_id))
def describe_stack_resources(self, ctxt, stack_identity, resource_name):
"""
Get detailed resource information about one or more resources.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
"""
return self.call(ctxt, self.make_msg('describe_stack_resources',
stack_identity=stack_identity,
resource_name=resource_name))
def list_stack_resources(self, ctxt, stack_identity, nested_depth=0):
"""
List the resources belonging to a stack.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param nested_depth: Levels of nested stacks of which list resources.
"""
return self.call(ctxt, self.make_msg('list_stack_resources',
stack_identity=stack_identity,
nested_depth=nested_depth))
def stack_suspend(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_suspend',
stack_identity=stack_identity))
def stack_resume(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_resume',
stack_identity=stack_identity))
def stack_check(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_check',
stack_identity=stack_identity))
def stack_cancel_update(self, ctxt, stack_identity):
return self.call(ctxt, self.make_msg('stack_cancel_update',
stack_identity=stack_identity))
def metadata_update(self, ctxt, stack_identity, resource_name, metadata):
"""
Update the metadata for the given resource.
"""
return self.call(ctxt, self.make_msg('metadata_update',
stack_identity=stack_identity,
resource_name=resource_name,
metadata=metadata))
def resource_signal(self, ctxt, stack_identity, resource_name, details,
sync_call=False):
"""
Generate an alarm on the resource.
:param ctxt: RPC context.
:param stack_identity: Name of the stack.
:param resource_name: the Resource.
:param details: the details of the signal.
"""
return self.call(ctxt, self.make_msg('resource_signal',
stack_identity=stack_identity,
resource_name=resource_name,
details=details,
sync_call=sync_call),
version='1.3')
def create_watch_data(self, ctxt, watch_name, stats_data):
'''
This could be used by CloudWatch and WaitConditions
and treat HA service events like any other CloudWatch.
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm
:param stats_data: The data to post.
'''
return self.call(ctxt, self.make_msg('create_watch_data',
watch_name=watch_name,
stats_data=stats_data))
def show_watch(self, ctxt, watch_name):
"""
The show_watch method returns the attributes of one watch
or all watches if no watch_name is passed
:param ctxt: RPC context.
:param watch_name: Name of the watch/alarm you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch',
watch_name=watch_name))
def show_watch_metric(self, ctxt, metric_namespace=None, metric_name=None):
"""
The show_watch_metric method returns the datapoints associated
with a specified metric, or all metrics if no metric_name is passed
:param ctxt: RPC context.
:param metric_namespace: Name of the namespace you want to see,
or None to see all
:param metric_name: Name of the metric you want to see,
or None to see all
"""
return self.call(ctxt, self.make_msg('show_watch_metric',
metric_namespace=metric_namespace,
metric_name=metric_name))
def set_watch_state(self, ctxt, watch_name, state):
'''
Temporarily set the state of a given watch
:param ctxt: RPC context.
:param watch_name: Name of the watch
:param state: State (must be one defined in WatchRule class)
'''
return self.call(ctxt, self.make_msg('set_watch_state',
watch_name=watch_name,
state=state))
def get_revision(self, ctxt):
return self.call(ctxt, self.make_msg('get_revision'))
def show_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('show_software_config',
config_id=config_id))
def create_software_config(self, cnxt, group, name, config,
inputs=None, outputs=None, options=None):
inputs = inputs or []
outputs = outputs or []
options = options or {}
return self.call(cnxt, self.make_msg('create_software_config',
group=group,
name=name,
config=config,
inputs=inputs,
outputs=outputs,
options=options))
def delete_software_config(self, cnxt, config_id):
return self.call(cnxt, self.make_msg('delete_software_config',
config_id=config_id))
def list_software_deployments(self, cnxt, server_id=None):
return self.call(cnxt, self.make_msg('list_software_deployments',
server_id=server_id))
def metadata_software_deployments(self, cnxt, server_id):
return self.call(cnxt, self.make_msg('metadata_software_deployments',
server_id=server_id))
def show_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('show_software_deployment',
deployment_id=deployment_id))
def create_software_deployment(self, cnxt, server_id, config_id=None,
input_values=None, action='INIT',
status='COMPLETE', status_reason='',
stack_user_project_id=None):
input_values = input_values or {}
return self.call(cnxt, self.make_msg(
'create_software_deployment',
server_id=server_id,
config_id=config_id,
input_values=input_values,
action=action,
status=status,
status_reason=status_reason,
stack_user_project_id=stack_user_project_id))
def update_software_deployment(self, cnxt, deployment_id,
config_id=None, input_values=None,
output_values=None, action=None,
status=None, status_reason=None,
updated_at=None):
return self.call(
cnxt, self.make_msg('update_software_deployment',
deployment_id=deployment_id,
config_id=config_id,
input_values=input_values,
output_values=output_values,
action=action,
status=status,
status_reason=status_reason,
updated_at=updated_at),
version='1.5')
def delete_software_deployment(self, cnxt, deployment_id):
return self.call(cnxt, self.make_msg('delete_software_deployment',
deployment_id=deployment_id))
def signal_software_deployment(self, cnxt, deployment_id, details,
updated_at=None):
return self.call(
cnxt, self.make_msg('signal_software_deployment',
deployment_id=deployment_id,
details=details,
updated_at=updated_at),
version='1.6')
def stack_snapshot(self, ctxt, stack_identity, name):
return self.call(ctxt, self.make_msg('stack_snapshot',
stack_identity=stack_identity,
name=name))
def show_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('show_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def delete_snapshot(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('delete_snapshot',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def stack_list_snapshots(self, cnxt, stack_identity):
return self.call(cnxt, self.make_msg('stack_list_snapshots',
stack_identity=stack_identity))
def stack_restore(self, cnxt, stack_identity, snapshot_id):
return self.call(cnxt, self.make_msg('stack_restore',
stack_identity=stack_identity,
snapshot_id=snapshot_id))
def list_services(self, cnxt):
return self.call(cnxt, self.make_msg('list_services'), version='1.4')
|
pshchelo/heat
|
heat/rpc/client.py
|
Python
|
apache-2.0
| 28,301
|
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Mox, an object-mocking framework for Python.
Mox works in the record-replay-verify paradigm. When you first create
a mock object, it is in record mode. You then programmatically set
the expected behavior of the mock object (what methods are to be
called on it, with what parameters, what they should return, and in
what order).
Once you have set up the expected mock behavior, you put it in replay
mode. Now the mock responds to method calls just as you told it to.
If an unexpected method (or an expected method with unexpected
parameters) is called, then an exception will be raised.
Once you are done interacting with the mock, you need to verify that
all the expected interactions occured. (Maybe your code exited
prematurely without calling some cleanup method!) The verify phase
ensures that every expected method was called; otherwise, an exception
will be raised.
WARNING! Mock objects created by Mox are not thread-safe. If you are
call a mock in multiple threads, it should be guarded by a mutex.
TODO(user): Add the option to make mocks thread-safe!
Suggested usage / workflow:
# Create Mox factory
my_mox = Mox()
# Create a mock data access object
mock_dao = my_mox.CreateMock(DAOClass)
# Set up expected behavior
mock_dao.RetrievePersonWithIdentifier('1').AndReturn(person)
mock_dao.DeletePerson(person)
# Put mocks in replay mode
my_mox.ReplayAll()
# Inject mock object and run test
controller.SetDao(mock_dao)
controller.DeletePersonById('1')
# Verify all methods were called as expected
my_mox.VerifyAll()
"""
from collections import deque
import difflib
import inspect
import re
import types
import unittest
import stubout
class Error(AssertionError):
"""Base exception for this module."""
pass
class ExpectedMethodCallsError(Error):
"""Raised when Verify() is called before all expected methods have been called
"""
def __init__(self, expected_methods):
"""Init exception.
Args:
# expected_methods: A sequence of MockMethod objects that should have been
# called.
expected_methods: [MockMethod]
Raises:
ValueError: if expected_methods contains no methods.
"""
if not expected_methods:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_methods = expected_methods
def __str__(self):
calls = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_methods)])
return "Verify: Expected methods never called:\n%s" % (calls,)
class UnexpectedMethodCallError(Error):
"""Raised when an unexpected method is called.
This can occur if a method is called with incorrect parameters, or out of the
specified order.
"""
def __init__(self, unexpected_method, expected):
"""Init exception.
Args:
# unexpected_method: MockMethod that was called but was not at the head of
# the expected_method queue.
# expected: MockMethod or UnorderedGroup the method should have
# been in.
unexpected_method: MockMethod
expected: MockMethod or UnorderedGroup
"""
Error.__init__(self)
if expected is None:
self._str = "Unexpected method call %s" % (unexpected_method,)
else:
differ = difflib.Differ()
diff = differ.compare(str(unexpected_method).splitlines(True),
str(expected).splitlines(True))
self._str = ("Unexpected method call. unexpected:- expected:+\n%s"
% ("\n".join(line.rstrip() for line in diff),))
def __str__(self):
return self._str
class UnknownMethodCallError(Error):
"""Raised if an unknown method is requested of the mock object."""
def __init__(self, unknown_method_name):
"""Init exception.
Args:
# unknown_method_name: Method call that is not part of the mocked class's
# public interface.
unknown_method_name: str
"""
Error.__init__(self)
self._unknown_method_name = unknown_method_name
def __str__(self):
return "Method called is not a member of the object: %s" % \
self._unknown_method_name
class PrivateAttributeError(Error):
"""
Raised if a MockObject is passed a private additional attribute name.
"""
def __init__(self, attr):
Error.__init__(self)
self._attr = attr
def __str__(self):
return ("Attribute '%s' is private and should not be available in a mock "
"object." % self._attr)
class ExpectedMockCreationError(Error):
"""Raised if mocks should have been created by StubOutClassWithMocks."""
def __init__(self, expected_mocks):
"""Init exception.
Args:
# expected_mocks: A sequence of MockObjects that should have been
# created
Raises:
ValueError: if expected_mocks contains no methods.
"""
if not expected_mocks:
raise ValueError("There must be at least one expected method")
Error.__init__(self)
self._expected_mocks = expected_mocks
def __str__(self):
mocks = "\n".join(["%3d. %s" % (i, m)
for i, m in enumerate(self._expected_mocks)])
return "Verify: Expected mocks never created:\n%s" % (mocks,)
class UnexpectedMockCreationError(Error):
"""Raised if too many mocks were created by StubOutClassWithMocks."""
def __init__(self, instance, *params, **named_params):
"""Init exception.
Args:
# instance: the type of obejct that was created
# params: parameters given during instantiation
# named_params: named parameters given during instantiation
"""
Error.__init__(self)
self._instance = instance
self._params = params
self._named_params = named_params
def __str__(self):
args = ", ".join(["%s" % v for i, v in enumerate(self._params)])
error = "Unexpected mock creation: %s(%s" % (self._instance, args)
if self._named_params:
error += ", " + ", ".join(["%s=%s" % (k, v) for k, v in
self._named_params.iteritems()])
error += ")"
return error
class Mox(object):
"""Mox: a factory for creating mock objects."""
# A list of types that should be stubbed out with MockObjects (as
# opposed to MockAnythings).
_USE_MOCK_OBJECT = [types.ClassType, types.FunctionType, types.InstanceType,
types.ModuleType, types.ObjectType, types.TypeType,
types.MethodType, types.UnboundMethodType,
]
# A list of types that may be stubbed out with a MockObjectFactory.
_USE_MOCK_FACTORY = [types.ClassType, types.ObjectType, types.TypeType]
def __init__(self):
"""Initialize a new Mox."""
self._mock_objects = []
self.stubs = stubout.StubOutForTesting()
def CreateMock(self, class_to_mock, attrs=None):
"""Create a new mock object.
Args:
# class_to_mock: the class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Returns:
MockObject that can be used as the class_to_mock would be.
"""
if attrs is None:
attrs = {}
new_mock = MockObject(class_to_mock, attrs=attrs)
self._mock_objects.append(new_mock)
return new_mock
def CreateMockAnything(self, description=None):
"""Create a mock that will accept any method calls.
This does not enforce an interface.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
new_mock = MockAnything(description=description)
self._mock_objects.append(new_mock)
return new_mock
def ReplayAll(self):
"""Set all mock objects to replay mode."""
for mock_obj in self._mock_objects:
mock_obj._Replay()
def VerifyAll(self):
"""Call verify on all mock objects created."""
for mock_obj in self._mock_objects:
mock_obj._Verify()
def ResetAll(self):
"""Call reset on all mock objects. This does not unset stubs."""
for mock_obj in self._mock_objects:
mock_obj._Reset()
def StubOutWithMock(self, obj, attr_name, use_mock_anything=False):
"""Replace a method, attribute, etc. with a Mock.
This will replace a class or module with a MockObject, and everything else
(method, function, etc) with a MockAnything. This can be overridden to
always use a MockAnything by setting use_mock_anything to True.
Args:
obj: A Python object (class, module, instance, callable).
attr_name: str. The name of the attribute to replace with a mock.
use_mock_anything: bool. True if a MockAnything should be used regardless
of the type of attribute.
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type in self._USE_MOCK_OBJECT and not use_mock_anything:
stub = self.CreateMock(attr_to_replace)
else:
stub = self.CreateMockAnything(description='Stub for %s' % attr_to_replace)
stub.__name__ = attr_name
self.stubs.Set(obj, attr_name, stub)
def StubOutClassWithMocks(self, obj, attr_name):
"""Replace a class with a "mock factory" that will create mock objects.
This is useful if the code-under-test directly instantiates
dependencies. Previously some boilder plate was necessary to
create a mock that would act as a factory. Using
StubOutClassWithMocks, once you've stubbed out the class you may
use the stubbed class as you would any other mock created by mox:
during the record phase, new mock instances will be created, and
during replay, the recorded mocks will be returned.
In replay mode
# Example using StubOutWithMock (the old, clunky way):
mock1 = mox.CreateMock(my_import.FooClass)
mock2 = mox.CreateMock(my_import.FooClass)
foo_factory = mox.StubOutWithMock(my_import, 'FooClass',
use_mock_anything=True)
foo_factory(1, 2).AndReturn(mock1)
foo_factory(9, 10).AndReturn(mock2)
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
# Example using StubOutClassWithMocks:
mox.StubOutClassWithMocks(my_import, 'FooClass')
mock1 = my_import.FooClass(1, 2) # Returns a new mock of FooClass
mock2 = my_import.FooClass(9, 10) # Returns another mock instance
mox.ReplayAll()
my_import.FooClass(1, 2) # Returns mock1 again.
my_import.FooClass(9, 10) # Returns mock2 again.
mox.VerifyAll()
"""
attr_to_replace = getattr(obj, attr_name)
attr_type = type(attr_to_replace)
if attr_type == MockAnything or attr_type == MockObject:
raise TypeError('Cannot mock a MockAnything! Did you remember to '
'call UnsetStubs in your previous test?')
if attr_type not in self._USE_MOCK_FACTORY:
raise TypeError('Given attr is not a Class. Use StubOutWithMock.')
factory = _MockObjectFactory(attr_to_replace, self)
self._mock_objects.append(factory)
self.stubs.Set(obj, attr_name, factory)
def UnsetStubs(self):
"""Restore stubs to their original state."""
self.stubs.UnsetAll()
def Replay(*args):
"""Put mocks into Replay mode.
Args:
# args is any number of mocks to put into replay mode.
"""
for mock in args:
mock._Replay()
def Verify(*args):
"""Verify mocks.
Args:
# args is any number of mocks to be verified.
"""
for mock in args:
mock._Verify()
def Reset(*args):
"""Reset mocks.
Args:
# args is any number of mocks to be reset.
"""
for mock in args:
mock._Reset()
class MockAnything:
"""A mock that can be used to mock anything.
This is helpful for mocking classes that do not provide a public interface.
"""
def __init__(self, description=None):
"""Initialize a new MockAnything.
Args:
description: str. Optionally, a descriptive name for the mock object being
created, for debugging output purposes.
"""
self._description = description
self._Reset()
def __repr__(self):
if self._description:
return '<MockAnything instance of %s>' % self._description
else:
return '<MockAnything instance>'
def __getattr__(self, method_name):
"""Intercept method calls on this object.
A new MockMethod is returned that is aware of the MockAnything's
state (record or replay). The call will be recorded or replayed
by the MockMethod's __call__.
Args:
# method name: the name of the method being called.
method_name: str
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
if method_name == '__dir__':
return self.__class__.__dir__.__get__(self, self.__class__)
return self._CreateMockMethod(method_name)
def _CreateMockMethod(self, method_name, method_to_mock=None):
"""Create a new mock method call and return it.
Args:
# method_name: the name of the method being called.
# method_to_mock: The actual method being mocked, used for introspection.
method_name: str
method_to_mock: a method object
Returns:
A new MockMethod aware of MockAnything's state (record or replay).
"""
return MockMethod(method_name, self._expected_calls_queue,
self._replay_mode, method_to_mock=method_to_mock,
description=self._description)
def __nonzero__(self):
"""Return 1 for nonzero so the mock can be used as a conditional."""
return 1
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockAnything) and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __ne__(self, rhs):
"""Provide custom logic to compare objects."""
return not self == rhs
def _Replay(self):
"""Start replaying expected method calls."""
self._replay_mode = True
def _Verify(self):
"""Verify that all of the expected calls have been made.
Raises:
ExpectedMethodCallsError: if there are still more method calls in the
expected queue.
"""
# If the list of expected calls is not empty, raise an exception
if self._expected_calls_queue:
# The last MultipleTimesGroup is not popped from the queue.
if (len(self._expected_calls_queue) == 1 and
isinstance(self._expected_calls_queue[0], MultipleTimesGroup) and
self._expected_calls_queue[0].IsSatisfied()):
pass
else:
raise ExpectedMethodCallsError(self._expected_calls_queue)
def _Reset(self):
"""Reset the state of this mock to record mode with an empty queue."""
# Maintain a list of method calls we are expecting
self._expected_calls_queue = deque()
# Make sure we are in setup mode, not replay mode
self._replay_mode = False
class MockObject(MockAnything, object):
"""A mock object that simulates the public/protected interface of a class."""
def __init__(self, class_to_mock, attrs=None):
"""Initialize a mock object.
This determines the methods and properties of the class and stores them.
Args:
# class_to_mock: class to be mocked
class_to_mock: class
attrs: dict of attribute names to values that will be set on the mock
object. Only public attributes may be set.
Raises:
PrivateAttributeError: if a supplied attribute is not public.
ValueError: if an attribute would mask an existing method.
"""
if attrs is None:
attrs = {}
# This is used to hack around the mixin/inheritance of MockAnything, which
# is not a proper object (it can be anything. :-)
MockAnything.__dict__['__init__'](self)
# Get a list of all the public and special methods we should mock.
self._known_methods = set()
self._known_vars = set()
self._class_to_mock = class_to_mock
try:
if inspect.isclass(self._class_to_mock):
self._description = class_to_mock.__name__
else:
self._description = type(class_to_mock).__name__
except Exception:
pass
for method in dir(class_to_mock):
attr = getattr(class_to_mock, method)
if callable(attr):
self._known_methods.add(method)
elif not (type(attr) is property):
# treating properties as class vars makes little sense.
self._known_vars.add(method)
# Set additional attributes at instantiation time; this is quicker
# than manually setting attributes that are normally created in
# __init__.
for attr, value in attrs.items():
if attr.startswith("_"):
raise PrivateAttributeError(attr)
elif attr in self._known_methods:
raise ValueError("'%s' is a method of '%s' objects." % (attr,
class_to_mock))
else:
setattr(self, attr, value)
def __getattr__(self, name):
"""Intercept attribute request on this object.
If the attribute is a public class variable, it will be returned and not
recorded as a call.
If the attribute is not a variable, it is handled like a method
call. The method name is checked against the set of mockable
methods, and a new MockMethod is returned that is aware of the
MockObject's state (record or replay). The call will be recorded
or replayed by the MockMethod's __call__.
Args:
# name: the name of the attribute being requested.
name: str
Returns:
Either a class variable or a new MockMethod that is aware of the state
of the mock (record or replay).
Raises:
UnknownMethodCallError if the MockObject does not mock the requested
method.
"""
if name in self._known_vars:
return getattr(self._class_to_mock, name)
if name in self._known_methods:
return self._CreateMockMethod(
name,
method_to_mock=getattr(self._class_to_mock, name))
raise UnknownMethodCallError(name)
def __eq__(self, rhs):
"""Provide custom logic to compare objects."""
return (isinstance(rhs, MockObject) and
self._class_to_mock == rhs._class_to_mock and
self._replay_mode == rhs._replay_mode and
self._expected_calls_queue == rhs._expected_calls_queue)
def __setitem__(self, key, value):
"""Provide custom logic for mocking classes that support item assignment.
Args:
key: Key to set the value for.
value: Value to set.
Returns:
Expected return value in replay mode. A MockMethod object for the
__setitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not support item assignment.
UnexpectedMethodCallError if the object does not expect the call to
__setitem__.
"""
# Verify the class supports item assignment.
if '__setitem__' not in dir(self._class_to_mock):
raise TypeError('object does not support item assignment')
# If we are in replay mode then simply call the mock __setitem__ method.
if self._replay_mode:
return MockMethod('__setitem__', self._expected_calls_queue,
self._replay_mode)(key, value)
# Otherwise, create a mock method __setitem__.
return self._CreateMockMethod('__setitem__')(key, value)
def __getitem__(self, key):
"""Provide custom logic for mocking classes that are subscriptable.
Args:
key: Key to return the value for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__getitem__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not subscriptable.
UnexpectedMethodCallError if the object does not expect the call to
__getitem__.
"""
# Verify the class supports item assignment.
if '__getitem__' not in dir(self._class_to_mock):
raise TypeError('unsubscriptable object')
# If we are in replay mode then simply call the mock __getitem__ method.
if self._replay_mode:
return MockMethod('__getitem__', self._expected_calls_queue,
self._replay_mode)(key)
# Otherwise, create a mock method __getitem__.
return self._CreateMockMethod('__getitem__')(key)
def __iter__(self):
"""Provide custom logic for mocking classes that are iterable.
Returns:
Expected return value in replay mode. A MockMethod object for the
__iter__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class is not iterable.
UnexpectedMethodCallError if the object does not expect the call to
__iter__.
"""
methods = dir(self._class_to_mock)
# Verify the class supports iteration.
if '__iter__' not in methods:
# If it doesn't have iter method and we are in replay method, then try to
# iterate using subscripts.
if '__getitem__' not in methods or not self._replay_mode:
raise TypeError('not iterable object')
else:
results = []
index = 0
try:
while True:
results.append(self[index])
index += 1
except IndexError:
return iter(results)
# If we are in replay mode then simply call the mock __iter__ method.
if self._replay_mode:
return MockMethod('__iter__', self._expected_calls_queue,
self._replay_mode)()
# Otherwise, create a mock method __iter__.
return self._CreateMockMethod('__iter__')()
def __contains__(self, key):
"""Provide custom logic for mocking classes that contain items.
Args:
key: Key to look in container for.
Returns:
Expected return value in replay mode. A MockMethod object for the
__contains__ method that has already been called if not in replay mode.
Raises:
TypeError if the underlying class does not implement __contains__
UnexpectedMethodCaller if the object does not expect the call to
__contains__.
"""
contains = self._class_to_mock.__dict__.get('__contains__', None)
if contains is None:
raise TypeError('unsubscriptable object')
if self._replay_mode:
return MockMethod('__contains__', self._expected_calls_queue,
self._replay_mode)(key)
return self._CreateMockMethod('__contains__')(key)
def __call__(self, *params, **named_params):
"""Provide custom logic for mocking classes that are callable."""
# Verify the class we are mocking is callable.
callable = hasattr(self._class_to_mock, '__call__')
if not callable:
raise TypeError('Not callable')
# Because the call is happening directly on this object instead of a method,
# the call on the mock method is made right here
# If we are mocking a Function, then use the function, and not the
# __call__ method
method = None
if type(self._class_to_mock) in (types.FunctionType, types.MethodType):
method = self._class_to_mock;
else:
method = getattr(self._class_to_mock, '__call__')
mock_method = self._CreateMockMethod('__call__', method_to_mock=method)
return mock_method(*params, **named_params)
@property
def __class__(self):
"""Return the class that is being mocked."""
return self._class_to_mock
@property
def __name__(self):
"""Return the name that is being mocked."""
return self._description
class _MockObjectFactory(MockObject):
"""A MockObjectFactory creates mocks and verifies __init__ params.
A MockObjectFactory removes the boiler plate code that was previously
necessary to stub out direction instantiation of a class.
The MockObjectFactory creates new MockObjects when called and verifies the
__init__ params are correct when in record mode. When replaying, existing
mocks are returned, and the __init__ params are verified.
See StubOutWithMock vs StubOutClassWithMocks for more detail.
"""
def __init__(self, class_to_mock, mox_instance):
MockObject.__init__(self, class_to_mock)
self._mox = mox_instance
self._instance_queue = deque()
def __call__(self, *params, **named_params):
"""Instantiate and record that a new mock has been created."""
method = getattr(self._class_to_mock, '__init__')
mock_method = self._CreateMockMethod('__init__', method_to_mock=method)
# Note: calling mock_method() is deferred in order to catch the
# empty instance_queue first.
if self._replay_mode:
if not self._instance_queue:
raise UnexpectedMockCreationError(self._class_to_mock, *params,
**named_params)
mock_method(*params, **named_params)
return self._instance_queue.pop()
else:
mock_method(*params, **named_params)
instance = self._mox.CreateMock(self._class_to_mock)
self._instance_queue.appendleft(instance)
return instance
def _Verify(self):
"""Verify that all mocks have been created."""
if self._instance_queue:
raise ExpectedMockCreationError(self._instance_queue)
super(_MockObjectFactory, self)._Verify()
class MethodSignatureChecker(object):
"""Ensures that methods are called correctly."""
_NEEDED, _DEFAULT, _GIVEN = range(3)
def __init__(self, method):
"""Creates a checker.
Args:
# method: A method to check.
method: function
Raises:
ValueError: method could not be inspected, so checks aren't possible.
Some methods and functions like built-ins can't be inspected.
"""
try:
self._args, varargs, varkw, defaults = inspect.getargspec(method)
except TypeError:
raise ValueError('Could not get argument specification for %r'
% (method,))
if inspect.ismethod(method):
self._args = self._args[1:] # Skip 'self'.
self._method = method
self._instance = None # May contain the instance this is bound to.
self._has_varargs = varargs is not None
self._has_varkw = varkw is not None
if defaults is None:
self._required_args = self._args
self._default_args = []
else:
self._required_args = self._args[:-len(defaults)]
self._default_args = self._args[-len(defaults):]
def _RecordArgumentGiven(self, arg_name, arg_status):
"""Mark an argument as being given.
Args:
# arg_name: The name of the argument to mark in arg_status.
# arg_status: Maps argument names to one of _NEEDED, _DEFAULT, _GIVEN.
arg_name: string
arg_status: dict
Raises:
AttributeError: arg_name is already marked as _GIVEN.
"""
if arg_status.get(arg_name, None) == MethodSignatureChecker._GIVEN:
raise AttributeError('%s provided more than once' % (arg_name,))
arg_status[arg_name] = MethodSignatureChecker._GIVEN
def Check(self, params, named_params):
"""Ensures that the parameters used while recording a call are valid.
Args:
# params: A list of positional parameters.
# named_params: A dict of named parameters.
params: list
named_params: dict
Raises:
AttributeError: the given parameters don't work with the given method.
"""
arg_status = dict((a, MethodSignatureChecker._NEEDED)
for a in self._required_args)
for arg in self._default_args:
arg_status[arg] = MethodSignatureChecker._DEFAULT
# WARNING: Suspect hack ahead.
#
# Check to see if this is an unbound method, where the instance
# should be bound as the first argument. We try to determine if
# the first argument (param[0]) is an instance of the class, or it
# is equivalent to the class (used to account for Comparators).
#
# NOTE: If a Func() comparator is used, and the signature is not
# correct, this will cause extra executions of the function.
if inspect.ismethod(self._method):
# The extra param accounts for the bound instance.
if len(params) > len(self._required_args):
expected = getattr(self._method, 'im_class', None)
# Check if the param is an instance of the expected class,
# or check equality (useful for checking Comparators).
# This is a hack to work around the fact that the first
# parameter can be a Comparator, and the comparison may raise
# an exception during this comparison, which is OK.
try:
param_equality = (params[0] == expected)
except:
param_equality = False;
if isinstance(params[0], expected) or param_equality:
params = params[1:]
# If the IsA() comparator is being used, we need to check the
# inverse of the usual case - that the given instance is a subclass
# of the expected class. For example, the code under test does
# late binding to a subclass.
elif isinstance(params[0], IsA) and params[0]._IsSubClass(expected):
params = params[1:]
# Check that each positional param is valid.
for i in range(len(params)):
try:
arg_name = self._args[i]
except IndexError:
if not self._has_varargs:
raise AttributeError('%s does not take %d or more positional '
'arguments' % (self._method.__name__, i))
else:
self._RecordArgumentGiven(arg_name, arg_status)
# Check each keyword argument.
for arg_name in named_params:
if arg_name not in arg_status and not self._has_varkw:
raise AttributeError('%s is not expecting keyword argument %s'
% (self._method.__name__, arg_name))
self._RecordArgumentGiven(arg_name, arg_status)
# Ensure all the required arguments have been given.
still_needed = [k for k, v in arg_status.iteritems()
if v == MethodSignatureChecker._NEEDED]
if still_needed:
raise AttributeError('No values given for arguments: %s'
% (' '.join(sorted(still_needed))))
class MockMethod(object):
"""Callable mock method.
A MockMethod should act exactly like the method it mocks, accepting parameters
and returning a value, or throwing an exception (as specified). When this
method is called, it can optionally verify whether the called method (name and
signature) matches the expected method.
"""
def __init__(self, method_name, call_queue, replay_mode,
method_to_mock=None, description=None):
"""Construct a new mock method.
Args:
# method_name: the name of the method
# call_queue: deque of calls, verify this call against the head, or add
# this call to the queue.
# replay_mode: False if we are recording, True if we are verifying calls
# against the call queue.
# method_to_mock: The actual method being mocked, used for introspection.
# description: optionally, a descriptive name for this method. Typically
# this is equal to the descriptive name of the method's class.
method_name: str
call_queue: list or deque
replay_mode: bool
method_to_mock: a method object
description: str or None
"""
self._name = method_name
self.__name__ = method_name
self._call_queue = call_queue
if not isinstance(call_queue, deque):
self._call_queue = deque(self._call_queue)
self._replay_mode = replay_mode
self._description = description
self._params = None
self._named_params = None
self._return_value = None
self._exception = None
self._side_effects = None
try:
self._checker = MethodSignatureChecker(method_to_mock)
except ValueError:
self._checker = None
def __call__(self, *params, **named_params):
"""Log parameters and return the specified return value.
If the Mock(Anything/Object) associated with this call is in record mode,
this MockMethod will be pushed onto the expected call queue. If the mock
is in replay mode, this will pop a MockMethod off the top of the queue and
verify this call is equal to the expected call.
Raises:
UnexpectedMethodCall if this call is supposed to match an expected method
call and it does not.
"""
self._params = params
self._named_params = named_params
if not self._replay_mode:
if self._checker is not None:
self._checker.Check(params, named_params)
self._call_queue.append(self)
return self
expected_method = self._VerifyMethodCall()
if expected_method._side_effects:
result = expected_method._side_effects(*params, **named_params)
if expected_method._return_value is None:
expected_method._return_value = result
if expected_method._exception:
raise expected_method._exception
return expected_method._return_value
def __getattr__(self, name):
"""Raise an AttributeError with a helpful message."""
raise AttributeError('MockMethod has no attribute "%s". '
'Did you remember to put your mocks in replay mode?' % name)
def __iter__(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def next(self):
"""Raise a TypeError with a helpful message."""
raise TypeError('MockMethod cannot be iterated. '
'Did you remember to put your mocks in replay mode?')
def _PopNextMethod(self):
"""Pop the next method from our call queue."""
try:
return self._call_queue.popleft()
except IndexError:
raise UnexpectedMethodCallError(self, None)
def _VerifyMethodCall(self):
"""Verify the called method is expected.
This can be an ordered method, or part of an unordered set.
Returns:
The expected mock method.
Raises:
UnexpectedMethodCall if the method called was not expected.
"""
expected = self._PopNextMethod()
# Loop here, because we might have a MethodGroup followed by another
# group.
while isinstance(expected, MethodGroup):
expected, method = expected.MethodCalled(self)
if method is not None:
return method
# This is a mock method, so just check equality.
if expected != self:
raise UnexpectedMethodCallError(self, expected)
return expected
def __str__(self):
params = ', '.join(
[repr(p) for p in self._params or []] +
['%s=%r' % x for x in sorted((self._named_params or {}).items())])
full_desc = "%s(%s) -> %r" % (self._name, params, self._return_value)
if self._description:
full_desc = "%s.%s" % (self._description, full_desc)
return full_desc
def __eq__(self, rhs):
"""Test whether this MockMethod is equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return (isinstance(rhs, MockMethod) and
self._name == rhs._name and
self._params == rhs._params and
self._named_params == rhs._named_params)
def __ne__(self, rhs):
"""Test whether this MockMethod is not equivalent to another MockMethod.
Args:
# rhs: the right hand side of the test
rhs: MockMethod
"""
return not self == rhs
def GetPossibleGroup(self):
"""Returns a possible group from the end of the call queue or None if no
other methods are on the stack.
"""
# Remove this method from the tail of the queue so we can add it to a group.
this_method = self._call_queue.pop()
assert this_method == self
# Determine if the tail of the queue is a group, or just a regular ordered
# mock method.
group = None
try:
group = self._call_queue[-1]
except IndexError:
pass
return group
def _CheckAndCreateNewGroup(self, group_name, group_class):
"""Checks if the last method (a possible group) is an instance of our
group_class. Adds the current method to this group or creates a new one.
Args:
group_name: the name of the group.
group_class: the class used to create instance of this new group
"""
group = self.GetPossibleGroup()
# If this is a group, and it is the correct group, add the method.
if isinstance(group, group_class) and group.group_name() == group_name:
group.AddMethod(self)
return self
# Create a new group and add the method.
new_group = group_class(group_name)
new_group.AddMethod(self)
self._call_queue.append(new_group)
return self
def InAnyOrder(self, group_name="default"):
"""Move this method into a group of unordered calls.
A group of unordered calls must be defined together, and must be executed
in full before the next expected method can be called. There can be
multiple groups that are expected serially, if they are given
different group names. The same group name can be reused if there is a
standard method call, or a group with a different name, spliced between
usages.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, UnorderedGroup)
def MultipleTimes(self, group_name="default"):
"""Move this method into group of calls which may be called multiple times.
A group of repeating calls must be defined together, and must be executed in
full before the next expected method can be called.
Args:
group_name: the name of the unordered group.
Returns:
self
"""
return self._CheckAndCreateNewGroup(group_name, MultipleTimesGroup)
def AndReturn(self, return_value):
"""Set the value to return when this method is called.
Args:
# return_value can be anything.
"""
self._return_value = return_value
return return_value
def AndRaise(self, exception):
"""Set the exception to raise when this method is called.
Args:
# exception: the exception to raise when this method is called.
exception: Exception
"""
self._exception = exception
def WithSideEffects(self, side_effects):
"""Set the side effects that are simulated when this method is called.
Args:
side_effects: A callable which modifies the parameters or other relevant
state which a given test case depends on.
Returns:
Self for chaining with AndReturn and AndRaise.
"""
self._side_effects = side_effects
return self
class Comparator:
"""Base class for all Mox comparators.
A Comparator can be used as a parameter to a mocked method when the exact
value is not known. For example, the code you are testing might build up a
long SQL string that is passed to your mock DAO. You're only interested that
the IN clause contains the proper primary keys, so you can set your mock
up as follows:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
Now whatever query is passed in must contain the string 'IN (1, 2, 4, 5)'.
A Comparator may replace one or more parameters, for example:
# return at most 10 rows
mock_dao.RunQuery(StrContains('SELECT'), 10)
or
# Return some non-deterministic number of rows
mock_dao.RunQuery(StrContains('SELECT'), IsA(int))
"""
def equals(self, rhs):
"""Special equals method that all comparators must implement.
Args:
rhs: any python object
"""
raise NotImplementedError, 'method must be implemented by a subclass.'
def __eq__(self, rhs):
return self.equals(rhs)
def __ne__(self, rhs):
return not self.equals(rhs)
class Is(Comparator):
"""Comparison class used to check identity, instead of equality."""
def __init__(self, obj):
self._obj = obj
def equals(self, rhs):
return rhs is self._obj
def __repr__(self):
return "<is %r (%s)>" % (self._obj, id(self._obj))
class IsA(Comparator):
"""This class wraps a basic Python type or class. It is used to verify
that a parameter is of the given type or class.
Example:
mock_dao.Connect(IsA(DbConnectInfo))
"""
def __init__(self, class_name):
"""Initialize IsA
Args:
class_name: basic python type or a class
"""
self._class_name = class_name
def equals(self, rhs):
"""Check to see if the RHS is an instance of class_name.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return isinstance(rhs, self._class_name)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(rhs) == type(self._class_name)
def _IsSubClass(self, clazz):
"""Check to see if the IsA comparators class is a subclass of clazz.
Args:
# clazz: a class object
Returns:
bool
"""
try:
return issubclass(self._class_name, clazz)
except TypeError:
# Check raw types if there was a type error. This is helpful for
# things like cStringIO.StringIO.
return type(clazz) == type(self._class_name)
def __repr__(self):
return 'mox.IsA(%s) ' % str(self._class_name)
class IsAlmost(Comparator):
"""Comparison class used to check whether a parameter is nearly equal
to a given value. Generally useful for floating point numbers.
Example mock_dao.SetTimeout((IsAlmost(3.9)))
"""
def __init__(self, float_value, places=7):
"""Initialize IsAlmost.
Args:
float_value: The value for making the comparison.
places: The number of decimal places to round to.
"""
self._float_value = float_value
self._places = places
def equals(self, rhs):
"""Check to see if RHS is almost equal to float_value
Args:
rhs: the value to compare to float_value
Returns:
bool
"""
try:
return round(rhs-self._float_value, self._places) == 0
except Exception:
# This is probably because either float_value or rhs is not a number.
return False
def __repr__(self):
return str(self._float_value)
class StrContains(Comparator):
"""Comparison class used to check whether a substring exists in a
string parameter. This can be useful in mocking a database with SQL
passed in as a string parameter, for example.
Example:
mock_dao.RunQuery(StrContains('IN (1, 2, 4, 5)')).AndReturn(mock_result)
"""
def __init__(self, search_string):
"""Initialize.
Args:
# search_string: the string you are searching for
search_string: str
"""
self._search_string = search_string
def equals(self, rhs):
"""Check to see if the search_string is contained in the rhs string.
Args:
# rhs: the right hand side of the test
rhs: object
Returns:
bool
"""
try:
return rhs.find(self._search_string) > -1
except Exception:
return False
def __repr__(self):
return '<str containing \'%s\'>' % self._search_string
class Regex(Comparator):
"""Checks if a string matches a regular expression.
This uses a given regular expression to determine equality.
"""
def __init__(self, pattern, flags=0):
"""Initialize.
Args:
# pattern is the regular expression to search for
pattern: str
# flags passed to re.compile function as the second argument
flags: int
"""
self.regex = re.compile(pattern, flags=flags)
def equals(self, rhs):
"""Check to see if rhs matches regular expression pattern.
Returns:
bool
"""
try:
return self.regex.search(rhs) is not None
except Exception:
return False
def __repr__(self):
s = '<regular expression \'%s\'' % self.regex.pattern
if self.regex.flags:
s += ', flags=%d' % self.regex.flags
s += '>'
return s
class In(Comparator):
"""Checks whether an item (or key) is in a list (or dict) parameter.
Example:
mock_dao.GetUsersInfo(In('expectedUserName')).AndReturn(mock_result)
"""
def __init__(self, key):
"""Initialize.
Args:
# key is any thing that could be in a list or a key in a dict
"""
self._key = key
def equals(self, rhs):
"""Check to see whether key is in rhs.
Args:
rhs: dict
Returns:
bool
"""
try:
return self._key in rhs
except Exception:
return False
def __repr__(self):
return '<sequence or map containing \'%s\'>' % str(self._key)
class Not(Comparator):
"""Checks whether a predicates is False.
Example:
mock_dao.UpdateUsers(Not(ContainsKeyValue('stevepm', stevepm_user_info)))
"""
def __init__(self, predicate):
"""Initialize.
Args:
# predicate: a Comparator instance.
"""
assert isinstance(predicate, Comparator), ("predicate %r must be a"
" Comparator." % predicate)
self._predicate = predicate
def equals(self, rhs):
"""Check to see whether the predicate is False.
Args:
rhs: A value that will be given in argument of the predicate.
Returns:
bool
"""
try:
return not self._predicate.equals(rhs)
except Exception:
return False
def __repr__(self):
return '<not \'%s\'>' % self._predicate
class ContainsKeyValue(Comparator):
"""Checks whether a key/value pair is in a dict parameter.
Example:
mock_dao.UpdateUsers(ContainsKeyValue('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: a key in a dict
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given key/value pair is in the rhs dict.
Returns:
bool
"""
try:
return rhs[self._key] == self._value
except Exception:
return False
def __repr__(self):
return '<map containing the entry \'%s: %s\'>' % (str(self._key),
str(self._value))
class ContainsAttributeValue(Comparator):
"""Checks whether a passed parameter contains attributes with a given value.
Example:
mock_dao.UpdateSomething(ContainsAttribute('stevepm', stevepm_user_info))
"""
def __init__(self, key, value):
"""Initialize.
Args:
# key: an attribute name of an object
# value: the corresponding value
"""
self._key = key
self._value = value
def equals(self, rhs):
"""Check whether the given attribute has a matching value in the rhs object.
Returns:
bool
"""
try:
return getattr(rhs, self._key) == self._value
except Exception:
return False
class SameElementsAs(Comparator):
"""Checks whether sequences contain the same elements (ignoring order).
Example:
mock_dao.ProcessUsers(SameElementsAs('stevepm', 'salomaki'))
"""
def __init__(self, expected_seq):
"""Initialize.
Args:
expected_seq: a sequence
"""
# Store in case expected_seq is an iterator.
self._expected_list = list(expected_seq)
def equals(self, actual_seq):
"""Check to see whether actual_seq has same elements as expected_seq.
Args:
actual_seq: sequence
Returns:
bool
"""
try:
# Store in case actual_seq is an iterator. We potentially iterate twice:
# once to make the dict, once in the list fallback.
actual_list = list(actual_seq)
except TypeError:
# actual_seq cannot be read as a sequence.
#
# This happens because Mox uses __eq__ both to check object equality (in
# MethodSignatureChecker) and to invoke Comparators.
return False
try:
expected = dict([(element, None) for element in self._expected_list])
actual = dict([(element, None) for element in actual_list])
except TypeError:
# Fall back to slower list-compare if any of the objects are unhashable.
expected = self._expected_list
actual = actual_list
expected.sort()
actual.sort()
return expected == actual
def __repr__(self):
return '<sequence with same elements as \'%s\'>' % self._expected_list
class And(Comparator):
"""Evaluates one or more Comparators on RHS and returns an AND of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Comparator
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether all Comparators are equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if not comparator.equals(rhs):
return False
return True
def __repr__(self):
return '<AND %s>' % str(self._comparators)
class Or(Comparator):
"""Evaluates one or more Comparators on RHS and returns an OR of the results.
"""
def __init__(self, *args):
"""Initialize.
Args:
*args: One or more Mox comparators
"""
self._comparators = args
def equals(self, rhs):
"""Checks whether any Comparator is equal to rhs.
Args:
# rhs: can be anything
Returns:
bool
"""
for comparator in self._comparators:
if comparator.equals(rhs):
return True
return False
def __repr__(self):
return '<OR %s>' % str(self._comparators)
class Func(Comparator):
"""Call a function that should verify the parameter passed in is correct.
You may need the ability to perform more advanced operations on the parameter
in order to validate it. You can use this to have a callable validate any
parameter. The callable should return either True or False.
Example:
def myParamValidator(param):
# Advanced logic here
return True
mock_dao.DoSomething(Func(myParamValidator), true)
"""
def __init__(self, func):
"""Initialize.
Args:
func: callable that takes one parameter and returns a bool
"""
self._func = func
def equals(self, rhs):
"""Test whether rhs passes the function test.
rhs is passed into func.
Args:
rhs: any python object
Returns:
the result of func(rhs)
"""
return self._func(rhs)
def __repr__(self):
return str(self._func)
class IgnoreArg(Comparator):
"""Ignore an argument.
This can be used when we don't care about an argument of a method call.
Example:
# Check if CastMagic is called with 3 as first arg and 'disappear' as third.
mymock.CastMagic(3, IgnoreArg(), 'disappear')
"""
def equals(self, unused_rhs):
"""Ignores arguments and returns True.
Args:
unused_rhs: any python object
Returns:
always returns True
"""
return True
def __repr__(self):
return '<IgnoreArg>'
class Value(Comparator):
"""Compares argument against a remembered value.
To be used in conjunction with Remember comparator. See Remember()
for example.
"""
def __init__(self):
self._value = None
self._has_value = False
def store_value(self, rhs):
self._value = rhs
self._has_value = True
def equals(self, rhs):
if not self._has_value:
return False
else:
return rhs == self._value
def __repr__(self):
if self._has_value:
return "<Value %r>" % self._value
else:
return "<Value>"
class Remember(Comparator):
"""Remembers the argument to a value store.
To be used in conjunction with Value comparator.
Example:
# Remember the argument for one method call.
users_list = Value()
mock_dao.ProcessUsers(Remember(users_list))
# Check argument against remembered value.
mock_dao.ReportUsers(users_list)
"""
def __init__(self, value_store):
if not isinstance(value_store, Value):
raise TypeError("value_store is not an instance of the Value class")
self._value_store = value_store
def equals(self, rhs):
self._value_store.store_value(rhs)
return True
def __repr__(self):
return "<Remember %d>" % id(self._value_store)
class MethodGroup(object):
"""Base class containing common behaviour for MethodGroups."""
def __init__(self, group_name):
self._group_name = group_name
def group_name(self):
return self._group_name
def __str__(self):
return '<%s "%s">' % (self.__class__.__name__, self._group_name)
def AddMethod(self, mock_method):
raise NotImplementedError
def MethodCalled(self, mock_method):
raise NotImplementedError
def IsSatisfied(self):
raise NotImplementedError
class UnorderedGroup(MethodGroup):
"""UnorderedGroup holds a set of method calls that may occur in any order.
This construct is helpful for non-deterministic events, such as iterating
over the keys of a dict.
"""
def __init__(self, group_name):
super(UnorderedGroup, self).__init__(group_name)
self._methods = []
def __str__(self):
return '%s "%s" pending calls:\n%s' % (
self.__class__.__name__,
self._group_name,
"\n".join(str(method) for method in self._methods))
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.append(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so, remove it from the set
# and return it.
for method in self._methods:
if method == mock_method:
# Remove the called mock_method instead of the method in the group.
# The called method will match any comparators when equality is checked
# during removal. The method in the group could pass a comparator to
# another comparator during the equality check.
self._methods.remove(mock_method)
# If this group is not empty, put it back at the head of the queue.
if not self.IsSatisfied():
mock_method._call_queue.appendleft(self)
return self, method
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if there are not any methods in this group."""
return len(self._methods) == 0
class MultipleTimesGroup(MethodGroup):
"""MultipleTimesGroup holds methods that may be called any number of times.
Note: Each method must be called at least once.
This is helpful, if you don't know or care how many times a method is called.
"""
def __init__(self, group_name):
super(MultipleTimesGroup, self).__init__(group_name)
self._methods = set()
self._methods_left = set()
def AddMethod(self, mock_method):
"""Add a method to this group.
Args:
mock_method: A mock method to be added to this group.
"""
self._methods.add(mock_method)
self._methods_left.add(mock_method)
def MethodCalled(self, mock_method):
"""Remove a method call from the group.
If the method is not in the set, an UnexpectedMethodCallError will be
raised.
Args:
mock_method: a mock method that should be equal to a method in the group.
Returns:
The mock method from the group
Raises:
UnexpectedMethodCallError if the mock_method was not in the group.
"""
# Check to see if this method exists, and if so add it to the set of
# called methods.
for method in self._methods:
if method == mock_method:
self._methods_left.discard(method)
# Always put this group back on top of the queue, because we don't know
# when we are done.
mock_method._call_queue.appendleft(self)
return self, method
if self.IsSatisfied():
next_method = mock_method._PopNextMethod();
return next_method, None
else:
raise UnexpectedMethodCallError(mock_method, self)
def IsSatisfied(self):
"""Return True if all methods in this group are called at least once."""
return len(self._methods_left) == 0
class MoxMetaTestBase(type):
"""Metaclass to add mox cleanup and verification to every test.
As the mox unit testing class is being constructed (MoxTestBase or a
subclass), this metaclass will modify all test functions to call the
CleanUpMox method of the test class after they finish. This means that
unstubbing and verifying will happen for every test with no additional code,
and any failures will result in test failures as opposed to errors.
"""
def __init__(cls, name, bases, d):
type.__init__(cls, name, bases, d)
# also get all the attributes from the base classes to account
# for a case when test class is not the immediate child of MoxTestBase
for base in bases:
for attr_name in dir(base):
if attr_name not in d:
d[attr_name] = getattr(base, attr_name)
for func_name, func in d.items():
if func_name.startswith('test') and callable(func):
setattr(cls, func_name, MoxMetaTestBase.CleanUpTest(cls, func))
@staticmethod
def CleanUpTest(cls, func):
"""Adds Mox cleanup code to any MoxTestBase method.
Always unsets stubs after a test. Will verify all mocks for tests that
otherwise pass.
Args:
cls: MoxTestBase or subclass; the class whose test method we are altering.
func: method; the method of the MoxTestBase test class we wish to alter.
Returns:
The modified method.
"""
def new_method(self, *args, **kwargs):
mox_obj = getattr(self, 'mox', None)
stubout_obj = getattr(self, 'stubs', None)
cleanup_mox = False
cleanup_stubout = False
if mox_obj and isinstance(mox_obj, Mox):
cleanup_mox = True
if stubout_obj and isinstance(stubout_obj, stubout.StubOutForTesting):
cleanup_stubout = True
try:
func(self, *args, **kwargs)
finally:
if cleanup_mox:
mox_obj.UnsetStubs()
if cleanup_stubout:
stubout_obj.UnsetAll()
stubout_obj.SmartUnsetAll()
if cleanup_mox:
mox_obj.VerifyAll()
new_method.__name__ = func.__name__
new_method.__doc__ = func.__doc__
new_method.__module__ = func.__module__
return new_method
class MoxTestBase(unittest.TestCase):
"""Convenience test class to make stubbing easier.
Sets up a "mox" attribute which is an instance of Mox (any mox tests will
want this), and a "stubs" attribute that is an instance of StubOutForTesting
(needed at times). Also automatically unsets any stubs and verifies that all
mock methods have been called at the end of each test, eliminating boilerplate
code.
"""
__metaclass__ = MoxMetaTestBase
def setUp(self):
super(MoxTestBase, self).setUp()
self.mox = Mox()
self.stubs = stubout.StubOutForTesting()
|
cloudysunny14/lakshmi
|
test/testlib/mox.py
|
Python
|
apache-2.0
| 60,051
|
#!/usr/bin/env python
# Populate package directories.
#
# python PopulatePackages.py < packages.csv
#
# The input packages.csv table must have these columns:
#
# Package Name,Directory Name,Prefixes,File Numbers,File Names,Globals
#
# Rows with an empty package name specify additional prefixes and
# globals for the most recently named package. Prepend '!' to exclude
# a prefix.
#
#---------------------------------------------------------------------------
# Copyright 2011 The Open Source Electronic Health Record Agent
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
import sys
import os
import csv
import glob
class Package:
def __init__(self, name, path):
self.name = name
self.path = path.strip().replace('/',os.path.sep)
self.included = set()
self.excluded = set()
self.globals = set()
def add_namespace(self, ns):
if ns:
if ns[0] in ('-','!'):
self.excluded.add(ns[1:])
else:
self.included.add(ns)
def add_number(self, n):
if n:
if n[0] == '.':
n = '0' + n
self.globals.add(n) # numbers work just like globals
def add_global(self, g):
if g:
self.globals.add(g)
def order_long_to_short(l,r):
if len(l) > len(r):
return -1
elif len(l) < len(r):
return +1
else:
return cmp(l,r)
def place(src,dst):
sys.stdout.write('%s => %s\n' % (src,dst))
d = os.path.dirname(dst)
if d:
try: os.makedirs(d)
except OSError: pass
os.rename(src,dst)
#-----------------------------------------------------------------------------
def populate(input):
packages_csv = csv.DictReader(input)
# Parse packages and namespaces from CSV table on stdin.
packages = []
pkg = None
for fields in packages_csv:
if fields['Package Name']:
pkg = Package(fields['Package Name'], fields['Directory Name'])
packages.append(pkg)
if pkg:
pkg.add_namespace(fields['Prefixes'])
pkg.add_number(fields['File Numbers'])
pkg.add_global(fields['Globals'])
# Construct "namespace => path" map.
namespaces = {}
for p in packages:
for ns in p.included:
namespaces[ns] = p.path
for ns in p.excluded:
if not namespaces.has_key(ns):
namespaces[ns] = None
#-----------------------------------------------------------------------------
# Collect routines and globals in current directory.
routines = set(glob.glob('*.m'))
globals = set(glob.glob('*.zwr'))
#-----------------------------------------------------------------------------
# Map by package namespace (prefix).
for ns in sorted(namespaces.keys(),order_long_to_short):
path = namespaces[ns]
gbls = [gbl for gbl in globals if gbl.startswith(ns)]
rtns = [rtn for rtn in routines if rtn.startswith(ns)]
if (rtns or gbls) and not path:
sys.stderr.write('Namespace "%s" has no path!\n' % ns)
continue
routines.difference_update(rtns)
globals.difference_update(gbls)
for src in sorted(rtns):
place(src,os.path.join(path,'Routines',src))
for src in sorted(gbls):
place(src,os.path.join(path,'Globals',src))
# Map globals explicitly listed in each package.
for p in packages:
gbls = [gbl for gbl in globals
if gbl[:-4].split('+')[0].split('-')[0] in p.globals]
globals.difference_update(gbls)
for src in sorted(gbls):
place(src,os.path.join(p.path,'Globals',src))
# Put leftover routines and globals in Uncategorized package.
for src in routines:
place(src,os.path.join('Uncategorized','Routines',src))
for src in globals:
place(src,os.path.join('Uncategorized','Globals',src))
def main():
populate(sys.stdin)
if __name__ == '__main__':
main()
|
ChristopherEdwards/VistA-FOIA
|
Scripts/PopulatePackages.py
|
Python
|
apache-2.0
| 4,599
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright 2013-2015 clowwindy
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# from ssloop
# https://github.com/clowwindy/ssloop
from __future__ import absolute_import, division, print_function, \
with_statement
import os
import socket
import select
import errno
import logging
from collections import defaultdict
import shell
__all__ = ['EventLoop', 'POLL_NULL', 'POLL_IN', 'POLL_OUT', 'POLL_ERR',
'POLL_HUP', 'POLL_NVAL', 'EVENT_NAMES']
POLL_NULL = 0x00
POLL_IN = 0x01
POLL_OUT = 0x04
POLL_ERR = 0x08
POLL_HUP = 0x10
POLL_NVAL = 0x20
EVENT_NAMES = {
POLL_NULL: 'POLL_NULL',
POLL_IN: 'POLL_IN',
POLL_OUT: 'POLL_OUT',
POLL_ERR: 'POLL_ERR',
POLL_HUP: 'POLL_HUP',
POLL_NVAL: 'POLL_NVAL',
}
class EpollLoop(object):
def __init__(self):
self._epoll = select.epoll()
def poll(self, timeout):
return self._epoll.poll(timeout)
def add_fd(self, fd, mode):
self._epoll.register(fd, mode)
def remove_fd(self, fd):
self._epoll.unregister(fd)
def modify_fd(self, fd, mode):
self._epoll.modify(fd, mode)
class KqueueLoop(object):
MAX_EVENTS = 1024
def __init__(self):
self._kqueue = select.kqueue()
self._fds = {}
def _control(self, fd, mode, flags):
events = []
if mode & POLL_IN:
events.append(select.kevent(fd, select.KQ_FILTER_READ, flags))
if mode & POLL_OUT:
events.append(select.kevent(fd, select.KQ_FILTER_WRITE, flags))
for e in events:
self._kqueue.control([e], 0)
def poll(self, timeout):
if timeout < 0:
timeout = None # kqueue behaviour
events = self._kqueue.control(None, KqueueLoop.MAX_EVENTS, timeout)
results = defaultdict(lambda: POLL_NULL)
for e in events:
fd = e.ident
if e.filter == select.KQ_FILTER_READ:
results[fd] |= POLL_IN
elif e.filter == select.KQ_FILTER_WRITE:
results[fd] |= POLL_OUT
return results.items()
def add_fd(self, fd, mode):
self._fds[fd] = mode
self._control(fd, mode, select.KQ_EV_ADD)
def remove_fd(self, fd):
self._control(fd, self._fds[fd], select.KQ_EV_DELETE)
del self._fds[fd]
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class SelectLoop(object):
def __init__(self):
self._r_list = set()
self._w_list = set()
self._x_list = set()
def poll(self, timeout):
r, w, x = select.select(self._r_list, self._w_list, self._x_list,
timeout)
results = defaultdict(lambda: POLL_NULL)
for p in [(r, POLL_IN), (w, POLL_OUT), (x, POLL_ERR)]:
for fd in p[0]:
results[fd] |= p[1]
return results.items()
def add_fd(self, fd, mode):
if mode & POLL_IN:
self._r_list.add(fd)
if mode & POLL_OUT:
self._w_list.add(fd)
if mode & POLL_ERR:
self._x_list.add(fd)
def remove_fd(self, fd):
if fd in self._r_list:
self._r_list.remove(fd)
if fd in self._w_list:
self._w_list.remove(fd)
if fd in self._x_list:
self._x_list.remove(fd)
def modify_fd(self, fd, mode):
self.remove_fd(fd)
self.add_fd(fd, mode)
class EventLoop(object):
def __init__(self):
self._iterating = False
if hasattr(select, 'epoll'):
self._impl = EpollLoop()
model = 'epoll'
elif hasattr(select, 'kqueue'):
self._impl = KqueueLoop()
model = 'kqueue'
elif hasattr(select, 'select'):
self._impl = SelectLoop()
model = 'select'
else:
raise Exception('can not find any available functions in select '
'package')
self._fd_to_f = {}
self._handlers = []
self._ref_handlers = []
self._handlers_to_remove = []
logging.debug('using event model: %s', model)
def poll(self, timeout=None):
events = self._impl.poll(timeout)
return [(self._fd_to_f[fd], fd, event) for fd, event in events]
def add(self, f, mode):
fd = f.fileno()
self._fd_to_f[fd] = f
self._impl.add_fd(fd, mode)
def remove(self, f):
fd = f.fileno()
del self._fd_to_f[fd]
self._impl.remove_fd(fd)
def modify(self, f, mode):
fd = f.fileno()
self._impl.modify_fd(fd, mode)
def add_handler(self, handler, ref=True):
self._handlers.append(handler)
if ref:
# when all ref handlers are removed, loop stops
self._ref_handlers.append(handler)
def remove_handler(self, handler):
if handler in self._ref_handlers:
self._ref_handlers.remove(handler)
if self._iterating:
self._handlers_to_remove.append(handler)
else:
self._handlers.remove(handler)
def run(self):
events = []
while self._ref_handlers:
try:
events = self.poll(1)
except (OSError, IOError) as e:
if errno_from_exception(e) in (errno.EPIPE, errno.EINTR):
# EPIPE: Happens when the client closes the connection
# EINTR: Happens when received a signal
# handles them as soon as possible
logging.debug('poll:%s', e)
else:
logging.error('poll:%s', e)
import traceback
traceback.print_exc()
continue
self._iterating = True
for handler in self._handlers:
# TODO when there are a lot of handlers
try:
handler(events)
except (OSError, IOError) as e:
shell.print_exception(e)
if self._handlers_to_remove:
for handler in self._handlers_to_remove:
self._handlers.remove(handler)
self._handlers_to_remove = []
self._iterating = False
# from tornado
def errno_from_exception(e):
"""Provides the errno from an Exception object.
There are cases that the errno attribute was not set so we pull
the errno out of the args but if someone instatiates an Exception
without any args you will get a tuple error. So this function
abstracts all that behavior to give you a safe way to get the
errno.
"""
if hasattr(e, 'errno'):
return e.errno
elif e.args:
return e.args[0]
else:
return None
# from tornado
def get_sock_error(sock):
error_number = sock.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
return socket.error(error_number, os.strerror(error_number))
|
ilikecola/Shadowsocks-combine-manyuser
|
shadowsocks/eventloop.py
|
Python
|
apache-2.0
| 7,495
|
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
from thrift.Thrift import *
from ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Iface:
def ping(self, name):
"""
Parameters:
- name
"""
pass
class Client(Iface):
def __init__(self, iprot, oprot=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
def ping(self, name):
"""
Parameters:
- name
"""
self.send_ping(name)
return self.recv_ping()
def send_ping(self, name):
self._oprot.writeMessageBegin('ping', TMessageType.CALL, self._seqid)
args = ping_args()
args.name = name
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_ping(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = ping_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "ping failed: unknown result");
class Processor(Iface, TProcessor):
def __init__(self, handler):
self._handler = handler
self._processMap = {}
self._processMap["ping"] = Processor.process_ping
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_ping(self, seqid, iprot, oprot):
args = ping_args()
args.read(iprot)
iprot.readMessageEnd()
result = ping_result()
result.success = self._handler.ping(args.name)
oprot.writeMessageBegin("ping", TMessageType.REPLY, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class ping_args:
"""
Attributes:
- name
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'name', None, None, ), # 1
)
def __init__(self, name=None,):
self.name = name
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.name = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_args')
if self.name != None:
oprot.writeFieldBegin('name', TType.STRING, 1)
oprot.writeString(self.name)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class ping_result:
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRING, 'success', None, None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRING:
self.success = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('ping_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.STRING, 0)
oprot.writeString(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
|
lenn0x/Milo-Tracing-Framework
|
src/py/examples/helloworld/HelloWorld.py
|
Python
|
apache-2.0
| 5,966
|
## @file
# This file contained the parser for [Sources] sections in INF file
#
# Copyright (c) 2011 - 2018, Intel Corporation. All rights reserved.<BR>
#
# This program and the accompanying materials are licensed and made available
# under the terms and conditions of the BSD License which accompanies this
# distribution. The full text of the license may be found at
# http://opensource.org/licenses/bsd-license.php
#
# THE PROGRAM IS DISTRIBUTED UNDER THE BSD LICENSE ON AN "AS IS" BASIS,
# WITHOUT WARRANTIES OR REPRESENTATIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED.
#
'''
InfSourceSectionParser
'''
##
# Import Modules
#
import Logger.Log as Logger
from Logger import StringTable as ST
from Logger.ToolError import FORMAT_INVALID
from Parser.InfParserMisc import InfExpandMacro
from Library import DataType as DT
from Library.Parsing import MacroParser
from Library.Misc import GetSplitValueList
from Object.Parser.InfCommonObject import InfLineCommentObject
from Parser.InfParserMisc import InfParserSectionRoot
class InfSourceSectionParser(InfParserSectionRoot):
## InfSourceParser
#
#
def InfSourceParser(self, SectionString, InfSectionObject, FileName):
SectionMacros = {}
ValueList = []
SourceList = []
StillCommentFalg = False
HeaderComments = []
LineComment = None
SectionContent = ''
for Line in SectionString:
SrcLineContent = Line[0]
SrcLineNo = Line[1]
if SrcLineContent.strip() == '':
continue
#
# Found Header Comments
#
if SrcLineContent.strip().startswith(DT.TAB_COMMENT_SPLIT):
#
# Last line is comments, and this line go on.
#
if StillCommentFalg:
HeaderComments.append(Line)
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
#
# First time encounter comment
#
else:
#
# Clear original data
#
HeaderComments = []
HeaderComments.append(Line)
StillCommentFalg = True
SectionContent += SrcLineContent + DT.END_OF_LINE
continue
else:
StillCommentFalg = False
if len(HeaderComments) >= 1:
LineComment = InfLineCommentObject()
LineCommentContent = ''
for Item in HeaderComments:
LineCommentContent += Item[0] + DT.END_OF_LINE
LineComment.SetHeaderComments(LineCommentContent)
#
# Find Tail comment.
#
if SrcLineContent.find(DT.TAB_COMMENT_SPLIT) > -1:
TailComments = SrcLineContent[SrcLineContent.find(DT.TAB_COMMENT_SPLIT):]
SrcLineContent = SrcLineContent[:SrcLineContent.find(DT.TAB_COMMENT_SPLIT)]
if LineComment is None:
LineComment = InfLineCommentObject()
LineComment.SetTailComments(TailComments)
#
# Find Macro
#
Name, Value = MacroParser((SrcLineContent, SrcLineNo),
FileName,
DT.MODEL_EFI_SOURCE_FILE,
self.FileLocalMacros)
if Name is not None:
SectionMacros[Name] = Value
LineComment = None
HeaderComments = []
continue
#
# Replace with Local section Macro and [Defines] section Macro.
#
SrcLineContent = InfExpandMacro(SrcLineContent,
(FileName, SrcLineContent, SrcLineNo),
self.FileLocalMacros,
SectionMacros)
TokenList = GetSplitValueList(SrcLineContent, DT.TAB_VALUE_SPLIT, 4)
ValueList[0:len(TokenList)] = TokenList
#
# Store section content string after MACRO replaced.
#
SectionContent += SrcLineContent + DT.END_OF_LINE
SourceList.append((ValueList, LineComment,
(SrcLineContent, SrcLineNo, FileName)))
ValueList = []
LineComment = None
TailComments = ''
HeaderComments = []
continue
#
# Current section archs
#
ArchList = []
for Item in self.LastSectionHeaderContent:
if Item[1] not in ArchList:
ArchList.append(Item[1])
InfSectionObject.SetSupArchList(Item[1])
InfSectionObject.SetAllContent(SectionContent)
if not InfSectionObject.SetSources(SourceList, Arch = ArchList):
Logger.Error('InfParser',
FORMAT_INVALID,
ST.ERR_INF_PARSER_MODULE_SECTION_TYPE_ERROR % ("[Sources]"),
File=FileName,
Line=Item[3])
|
google/google-ctf
|
third_party/edk2/BaseTools/Source/Python/UPT/Parser/InfSourceSectionParser.py
|
Python
|
apache-2.0
| 5,413
|
# -*- coding: utf-8 -*-
'''
Novell ASAM Runner
==================
.. versionadded:: Beryllium
Runner to interact with Novell ASAM Fan-Out Driver
:codeauthor: Nitin Madhok <nmadhok@clemson.edu>
To use this runner, set up the Novell Fan-Out Driver URL, username and password in the
master configuration at ``/etc/salt/master`` or ``/etc/salt/master.d/asam.conf``:
.. code-block:: yaml
asam:
prov1.domain.com
username: "testuser"
password: "verybadpass"
prov2.domain.com
username: "testuser"
password: "verybadpass"
.. note::
Optionally, ``protocol`` and ``port`` can be specified if the Fan-Out Driver server
is not using the defaults. Default is ``protocol: https`` and ``port: 3451``.
'''
from __future__ import absolute_import
# Import python libs
import logging
# Import third party libs
HAS_LIBS = False
HAS_SIX = False
try:
import requests
from salt.ext.six.moves.html_parser import HTMLParser # pylint: disable=E0611
try:
import salt.ext.six as six
HAS_SIX = True
except ImportError:
# Salt version <= 2014.7.0
try:
import six
except ImportError:
pass
HAS_LIBS = True
class ASAMHTMLParser(HTMLParser): # fix issue #30477
def __init__(self):
HTMLParser.__init__(self)
self.data = []
def handle_starttag(self, tag, attrs):
if tag != "a":
return
for attr in attrs:
if attr[0] != "href":
return
self.data.append(attr[1])
except ImportError:
pass
log = logging.getLogger(__name__)
def __virtual__():
'''
Check for ASAM Fan-Out driver configuration in master config file
or directory and load runner only if it is specified
'''
if not HAS_LIBS or not HAS_SIX:
return False
if _get_asam_configuration() is False:
return False
return True
def _get_asam_configuration(driver_url=''):
'''
Return the configuration read from the master configuration
file or directory
'''
asam_config = __opts__['asam'] if 'asam' in __opts__ else None
if asam_config:
try:
for asam_server, service_config in six.iteritems(asam_config):
username = service_config.get('username', None)
password = service_config.get('password', None)
protocol = service_config.get('protocol', 'https')
port = service_config.get('port', 3451)
if not username or not password:
log.error(
"Username or Password has not been specified in the master "
"configuration for {0}".format(asam_server)
)
return False
ret = {
'platform_edit_url': "{0}://{1}:{2}/config/PlatformEdit.html".format(protocol, asam_server, port),
'platform_config_url': "{0}://{1}:{2}/config/PlatformConfig.html".format(protocol, asam_server, port),
'platformset_edit_url': "{0}://{1}:{2}/config/PlatformSetEdit.html".format(protocol, asam_server, port),
'platformset_config_url': "{0}://{1}:{2}/config/PlatformSetConfig.html".format(protocol, asam_server, port),
'username': username,
'password': password
}
if (not driver_url) or (driver_url == asam_server):
return ret
except Exception as exc:
log.error(
"Exception encountered: {0}".format(exc)
)
return False
if driver_url:
log.error(
"Configuration for {0} has not been specified in the master "
"configuration".format(driver_url)
)
return False
return False
def _make_post_request(url, data, auth, verify=True):
r = requests.post(url, data=data, auth=auth, verify=verify)
if r.status_code != requests.codes.ok:
r.raise_for_status()
else:
return r.text.split('\n')
def _parse_html_content(html_content):
parser = ASAMHTMLParser()
for line in html_content:
if line.startswith("<META"):
html_content.remove(line)
else:
parser.feed(line)
return parser
def _get_platformset_name(data, platform_name):
for item in data:
if platform_name in item and item.startswith('PlatformEdit.html?'):
parameter_list = item.split('&')
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
return parameter.split('=')[1]
return None
def _get_platforms(data):
platform_list = []
for item in data:
if item.startswith('PlatformEdit.html?'):
parameter_list = item.split('PlatformEdit.html?', 1)[1].split('&')
for parameter in parameter_list:
if parameter.startswith("platformName"):
platform_list.append(parameter.split('=')[1])
return platform_list
def _get_platform_sets(data):
platform_set_list = []
for item in data:
if item.startswith('PlatformSetEdit.html?'):
parameter_list = item.split('PlatformSetEdit.html?', 1)[1].split('&')
for parameter in parameter_list:
if parameter.startswith("platformSetName"):
platform_set_list.append(parameter.split('=')[1].replace('%20', ' '))
return platform_set_list
def remove_platform(name, server_url):
'''
To remove specified ASAM platform from the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.remove_platform my-test-vm prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
url = config['platform_config_url']
data = {
'manual': 'false',
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to look up existing platforms on {0}".format(server_url)
log.error("{0}:\n{1}".format(err_msg, exc))
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
log.debug(platformset_name)
data['platformName'] = name
data['platformSetName'] = str(platformset_name)
data['postType'] = 'platformRemove'
data['Submit'] = 'Yes'
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to delete platform from {1}".format(server_url)
log.error("{0}:\n{1}".format(err_msg, exc))
return {name: err_msg}
parser = _parse_html_content(html_content)
platformset_name = _get_platformset_name(parser.data, name)
if platformset_name:
return {name: "Failed to delete platform from {0}".format(server_url)}
else:
return {name: "Successfully deleted platform from {0}".format(server_url)}
else:
return {name: "Specified platform name does not exist on {0}".format(server_url)}
def list_platforms(server_url):
'''
To list all ASAM platforms present on the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.list_platforms prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
url = config['platform_config_url']
data = {
'manual': 'false',
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to look up existing platforms"
log.error("{0}:\n{1}".format(err_msg, exc))
return {server_url: err_msg}
parser = _parse_html_content(html_content)
platform_list = _get_platforms(parser.data)
if platform_list:
return {server_url: platform_list}
else:
return {server_url: "No existing platforms found"}
def list_platform_sets(server_url):
'''
To list all ASAM platform sets present on the Novell Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.list_platform_sets prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
url = config['platformset_config_url']
data = {
'manual': 'false',
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to look up existing platform sets"
log.error("{0}:\n{1}".format(err_msg, exc))
return {server_url: err_msg}
parser = _parse_html_content(html_content)
platform_set_list = _get_platform_sets(parser.data)
if platform_set_list:
return {server_url: platform_set_list}
else:
return {server_url: "No existing platform sets found"}
def add_platform(name, platform_set, server_url):
'''
To add an ASAM platform using the specified ASAM platform set on the Novell
Fan-Out Driver
CLI Example:
.. code-block:: bash
salt-run asam.add_platform my-test-vm test-platform-set prov1.domain.com
'''
config = _get_asam_configuration(server_url)
if not config:
return False
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Specified platform already exists on {0}".format(server_url)}
platform_sets = list_platform_sets(server_url)
if platform_set not in platform_sets[server_url]:
return {name: "Specified platform set does not exist on {0}".format(server_url)}
url = config['platform_edit_url']
data = {
'platformName': name,
'platformSetName': platform_set,
'manual': 'false',
'previousURL': '/config/platformAdd.html',
'postType': 'PlatformAdd',
'Submit': 'Apply'
}
auth = (
config['username'],
config['password']
)
try:
html_content = _make_post_request(url, data, auth, verify=False)
except Exception as exc:
err_msg = "Failed to add platform on {0}".format(server_url)
log.error("{0}:\n{1}".format(err_msg, exc))
return {name: err_msg}
platforms = list_platforms(server_url)
if name in platforms[server_url]:
return {name: "Successfully added platform on {0}".format(server_url)}
else:
return {name: "Failed to add platform on {0}".format(server_url)}
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/salt/runners/asam.py
|
Python
|
apache-2.0
| 11,014
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import numpy as np
import paddle.fluid.core as core
from paddle.fluid.op import Operator
def create_op(scope, op_type, inputs, outputs, attrs):
kwargs = dict()
op_maker = core.op_proto_and_checker_maker
op_role_attr_name = op_maker.kOpRoleAttrName()
if op_role_attr_name not in attrs:
attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)
def __create_var__(name, var_name):
scope.var(var_name).get_tensor()
kwargs[name].append(var_name)
for in_name, in_dup in Operator.get_op_inputs(op_type):
if in_name in inputs:
kwargs[in_name] = []
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, _ = item[0], item[1]
__create_var__(in_name, sub_in_name)
else:
__create_var__(in_name, in_name)
for out_name, out_dup in Operator.get_op_outputs(op_type):
if out_name in outputs:
kwargs[out_name] = []
if out_dup:
sub_out = outputs[out_name]
for item in sub_out:
sub_out_name, _ = item[0], item[1]
__create_var__(out_name, sub_out_name)
else:
__create_var__(out_name, out_name)
for attr_name in Operator.get_op_attr_names(op_type):
if attr_name in attrs:
kwargs[attr_name] = attrs[attr_name]
return Operator(op_type, **kwargs)
def set_input(scope, op, inputs, place):
def np_value_to_fluid_value(input):
if input.dtype == np.float16:
input = input.view(np.uint16)
return input
def __set_input__(var_name, var):
if isinstance(var, tuple) or isinstance(var, np.ndarray):
tensor = scope.find_var(var_name).get_tensor()
if isinstance(var, tuple):
tensor.set_recursive_sequence_lengths(var[1])
var = var[0]
tensor._set_dims(var.shape)
tensor.set(np_value_to_fluid_value(var), place)
elif isinstance(var, float):
scope.find_var(var_name).set_float(var)
elif isinstance(var, int):
scope.find_var(var_name).set_int(var)
for in_name, in_dup in Operator.get_op_inputs(op.type()):
if in_name in inputs:
if in_dup:
sub_in = inputs[in_name]
for item in sub_in:
sub_in_name, sub_in_val = item[0], item[1]
__set_input__(sub_in_name, sub_in_val)
else:
__set_input__(in_name, inputs[in_name])
def append_input_output(block, op_proto, np_list, is_input, dtype):
'''Insert VarDesc and generate Python variable instance'''
proto_list = op_proto.inputs if is_input else op_proto.outputs
def create_var(block, name, np_list, var_proto):
dtype = None
shape = None
lod_level = None
if name not in np_list:
assert var_proto.intermediate, "{} not found".format(name)
else:
# inferece the dtype from numpy value.
np_value = np_list[name]
if isinstance(np_value, tuple):
dtype = np_value[0].dtype
# output shape, lod should be infered from input.
if is_input:
shape = list(np_value[0].shape)
lod_level = len(np_value[1])
else:
dtype = np_value.dtype
if is_input:
shape = list(np_value.shape)
lod_level = 0
# NOTE(dzhwinter): type hacking
# numpy float16 is binded to paddle::platform::float16
# in tensor_py.h via the help of uint16 datatype. Because
# the internal memory representation of float16 is
# actually uint16_t in paddle. So we use np.uint16 in numpy for
# raw memory, it can pass through the pybind. So in the testcase,
# we feed data use data.view(uint16), but the dtype is float16 in fact.
# The data.view(uint16) means do not cast the data type, but process data as the uint16
if dtype == np.uint16:
dtype = np.float16
return block.create_var(
dtype=dtype, shape=shape, lod_level=lod_level, name=name)
var_dict = {}
for var_proto in proto_list:
var_name = str(var_proto.name)
if is_input:
if (var_name not in np_list) and var_proto.dispensable:
continue
assert (var_name in np_list) or (var_proto.dispensable), \
"Missing {} as input".format(var_name)
if var_proto.duplicable:
assert isinstance(np_list[var_name], list), \
"Duplicable {} should be set as list".format(var_name)
var_list = []
for (name, np_value) in np_list[var_name]:
var_list.append(
create_var(block, name, {name: np_value}, var_proto))
var_dict[var_name] = var_list
else:
var_dict[var_name] = create_var(block, var_name, np_list, var_proto)
return var_dict
def append_loss_ops(block, output_names):
mean_inputs = list(map(block.var, output_names))
if len(mean_inputs) == 1:
loss = block.create_var(dtype=mean_inputs[0].dtype, shape=[1])
op = block.append_op(
inputs={"X": mean_inputs}, outputs={"Out": loss}, type='mean')
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
else:
avg_sum = []
for cur_loss in mean_inputs:
cur_avg_loss = block.create_var(dtype=cur_loss.dtype, shape=[1])
op = block.append_op(
inputs={"X": [cur_loss]},
outputs={"Out": [cur_avg_loss]},
type="mean")
op.desc.infer_var_type(block.desc)
op.desc.infer_shape(block.desc)
avg_sum.append(cur_avg_loss)
loss_sum = block.create_var(dtype=avg_sum[0].dtype, shape=[1])
op_sum = block.append_op(
inputs={"X": avg_sum}, outputs={"Out": loss_sum}, type='sum')
op_sum.desc.infer_var_type(block.desc)
op_sum.desc.infer_shape(block.desc)
loss = block.create_var(dtype=loss_sum.dtype, shape=[1])
op_loss = block.append_op(
inputs={"X": loss_sum},
outputs={"Out": loss},
type='scale',
attrs={'scale': 1.0 / float(len(avg_sum))})
op_loss.desc.infer_var_type(block.desc)
op_loss.desc.infer_shape(block.desc)
return loss
|
QiJune/Paddle
|
python/paddle/fluid/tests/unittests/testsuite.py
|
Python
|
apache-2.0
| 7,265
|
# Copyright (c) 2013 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import flask
from oslo_config import cfg
from oslo_log import log
import oslo_middleware.cors as cors_middleware
from oslo_middleware import request_id
from oslo_service import systemd
import six
import stevedore
from werkzeug import exceptions as werkzeug_exceptions
from sahara.api import acl
from sahara.api.middleware import auth_valid
from sahara.api.middleware import log_exchange
from sahara.api import v10 as api_v10
from sahara.api import v11 as api_v11
from sahara import config
from sahara import context
from sahara.i18n import _LI
from sahara.i18n import _LW
from sahara.plugins import base as plugins_base
from sahara.service import api as service_api
from sahara.service.edp import api as edp_api
from sahara.service import ops as service_ops
from sahara.service import periodic
from sahara.utils import api as api_utils
from sahara.utils.openstack import cinder
from sahara.utils import remote
from sahara.utils import rpc as messaging
from sahara.utils import wsgi
LOG = log.getLogger(__name__)
opts = [
cfg.StrOpt('os_region_name',
help='Region name used to get services endpoints.'),
cfg.StrOpt('infrastructure_engine',
default='heat',
help='An engine which will be used to provision '
'infrastructure for Hadoop cluster.'),
cfg.StrOpt('remote',
default='ssh',
help='A method for Sahara to execute commands '
'on VMs.'),
cfg.IntOpt('api_workers', default=0,
help="Number of workers for Sahara API service (0 means "
"all-in-one-thread configuration).")
]
CONF = cfg.CONF
CONF.register_opts(opts)
def setup_common(possible_topdir, service_name):
dev_conf = os.path.join(possible_topdir,
'etc',
'sahara',
'sahara.conf')
config_files = None
if os.path.exists(dev_conf):
config_files = [dev_conf]
config.parse_configs(config_files)
log.setup(CONF, "sahara")
# Validate other configurations (that may produce logs) here
cinder.validate_config()
if service_name != 'all-in-one' or cfg.CONF.enable_notifications:
messaging.setup()
plugins_base.setup_plugins()
LOG.info(_LI('Sahara {service} started').format(service=service_name))
def setup_sahara_api(mode):
ops = _get_ops_driver(mode)
service_api.setup_service_api(ops)
edp_api.setup_edp_api(ops)
def setup_sahara_engine():
periodic.setup()
engine = _get_infrastructure_engine()
service_ops.setup_ops(engine)
remote_driver = _get_remote_driver()
remote.setup_remote(remote_driver, engine)
def setup_auth_policy():
acl.setup_policy()
def make_app():
"""App builder (wsgi)
Entry point for Sahara REST API server
"""
app = flask.Flask('sahara.api')
@app.route('/', methods=['GET'])
def version_list():
context.set_ctx(None)
return api_utils.render({
"versions": [
{"id": "v1.0", "status": "SUPPORTED"},
{"id": "v1.1", "status": "CURRENT"}
]
})
@app.teardown_request
def teardown_request(_ex=None):
context.set_ctx(None)
app.register_blueprint(api_v10.rest, url_prefix='/v1.0')
app.register_blueprint(api_v10.rest, url_prefix='/v1.1')
app.register_blueprint(api_v11.rest, url_prefix='/v1.1')
def make_json_error(ex):
status_code = (ex.code
if isinstance(ex, werkzeug_exceptions.HTTPException)
else 500)
description = (ex.description
if isinstance(ex, werkzeug_exceptions.HTTPException)
else str(ex))
return api_utils.render({'error': status_code,
'error_message': description},
status=status_code)
for code in six.iterkeys(werkzeug_exceptions.default_exceptions):
app.error_handler_spec[None][code] = make_json_error
if CONF.debug and not CONF.log_exchange:
LOG.debug('Logging of request/response exchange could be enabled using'
' flag --log-exchange')
# Create a CORS wrapper, and attach sahara-specific defaults that must be
# included in all CORS responses.
app.wsgi_app = cors_middleware.CORS(app.wsgi_app, CONF)
app.wsgi_app.set_latent(
allow_headers=['X-Auth-Token', 'X-Server-Management-Url'],
allow_methods=['GET', 'PUT', 'POST', 'DELETE', 'PATCH'],
expose_headers=['X-Auth-Token', 'X-Server-Management-Url']
)
if CONF.log_exchange:
app.wsgi_app = log_exchange.LogExchange.factory(CONF)(app.wsgi_app)
app.wsgi_app = auth_valid.wrap(app.wsgi_app)
app.wsgi_app = acl.wrap(app.wsgi_app)
app.wsgi_app = request_id.RequestId(app.wsgi_app)
return app
def _load_driver(namespace, name):
extension_manager = stevedore.DriverManager(
namespace=namespace,
name=name,
invoke_on_load=True
)
LOG.info(_LI("Driver {name} successfully loaded").format(name=name))
return extension_manager.driver
def _get_infrastructure_engine():
"""Import and return one of sahara.service.*_engine.py modules."""
LOG.debug("Infrastructure engine {engine} is loading".format(
engine=CONF.infrastructure_engine))
if CONF.infrastructure_engine == "direct":
LOG.warning(_LW("Direct infrastructure engine is deprecated in Liberty"
" release and will be removed after that release."
" Use Heat infrastructure engine instead."))
return _load_driver('sahara.infrastructure.engine',
CONF.infrastructure_engine)
def _get_remote_driver():
LOG.debug("Remote {remote} is loading".format(remote=CONF.remote))
return _load_driver('sahara.remote', CONF.remote)
def _get_ops_driver(driver_name):
LOG.debug("Ops {driver} is loading".format(driver=driver_name))
return _load_driver('sahara.run.mode', driver_name)
def start_server(app):
server = wsgi.Server()
server.start(app)
systemd.notify_once()
server.wait()
|
zhangjunli177/sahara
|
sahara/main.py
|
Python
|
apache-2.0
| 6,825
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
# ----------------------------------------------------------------------------
# Generate Grafeas GAPIC layer
# ----------------------------------------------------------------------------
library = gapic.py_library(
"grafeas", "v1", config_path="/grafeas/artman_grafeas_v1.yaml", include_protos=True
)
excludes = ["README.rst", "nox.py", "setup.py", "docs/index.rst"]
# Make 'grafeas' a namespace
s.move(library / "grafeas", excludes=["__init__.py"])
s.move(library / "docs", excludes=["conf.py", "index.rst"])
s.move(
library / "google/cloud/grafeas_v1/proto",
"grafeas/grafeas_v1/proto",
excludes=excludes,
)
s.move(library / "tests")
# Fix proto imports
s.replace(
["grafeas/**/*.py", "tests/**/*.py"],
"from grafeas\.v1( import \w*_pb2)",
"from grafeas.grafeas_v1.proto\g<1>",
)
s.replace(
"grafeas/**/*_pb2.py",
"from grafeas_v1\.proto( import \w*_pb2)",
"from grafeas.grafeas_v1.proto\g<1>",
)
s.replace(
"grafeas/**/grafeas_pb2_grpc.py",
"from grafeas_v1\.proto",
"from grafeas.grafeas_v1.proto",
)
# Make package name 'grafeas'
s.replace(
"grafeas/grafeas_v1/gapic/grafeas_client.py", "google-cloud-grafeas", "grafeas"
)
# Fix docstrings with no summary lines
s.replace(
"grafeas/grafeas_v1/proto/vulnerability_pb2.py",
r"""(\s+)__doc__ = \"\"\"Attributes:""",
"""\g<1>__doc__=\"\"\"
Attributes:""",
)
# Replace mentions of 'Container Analysis' with 'Grafeas' in the docs
s.replace("docs/**/v*/*.rst", "Container Analysis", "Grafeas")
# ----------------------------------------------------------------------------
# Remove google-specific portions of library
# ----------------------------------------------------------------------------
# Please see this PR https://github.com/googleapis/google-cloud-python/pull/8186/
# Remove default service address, default scopes, default credentials
# Update tests and code in docstrings showing client instantiation.
s.replace(
"grafeas/**/grafeas_client.py",
r""" SERVICE_ADDRESS = 'containeranalysis\.googleapis\.com:443'
\"\"\"The default address of the service\.\"\"\"""",
"",
)
s.replace(
"grafeas/**/grafeas_client.py",
r""" def __init__\(self, transport=None, channel=None, credentials=None,
client_config=None, client_info=None, client_options=None\):""",
" def __init__(self, transport, client_config=None, client_info=None):",
)
s.replace(
"grafeas/**/grafeas_client.py",
r"""Union\[~\.GrafeasGrpcTransport,
Callable\[\[~\.Credentials, type], ~\.GrafeasGrpcTransport\]""",
"""~.GrafeasGrpcTransport""",
)
s.replace(
"grafeas/**/grafeas_client.py",
r""" channel \(grpc\.Channel\): DEPRECATED\. A ``Channel`` instance
through which to make calls\. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception\.
credentials \(google\.auth\.credentials\.Credentials\): The
authorization credentials to attach to requests\. These
credentials identify this application to the service\. If none
are specified, the client will attempt to ascertain the
credentials from the environment\.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception\.""",
"",
)
# Remove client_options
# api_endpoint is currently the only option and doesn't make sense for Grafeas.
s.replace("grafeas/**/grafeas_client.py", "import google.api_core.client_options\n", "")
s.replace(
"grafeas/**/grafeas_client.py",
r""" client_options \(Union\[dict, google\.api_core\.client_options\.ClientOptions\]\):
Client options used to set user options on the client\. API Endpoint
should be set through client_options\.
\"\"\"""",
" \"\"\""
)
s.replace(
"grafeas/**/grafeas_client.py",
r"""if channel:
warnings\.warn\('The `channel` argument is deprecated; use '
'`transport` instead\.',
PendingDeprecationWarning, stacklevel=2\)
api_endpoint = self\.SERVICE_ADDRESS
if client_options:
if type\(client_options\) == dict:
client_options = google\.api_core\.client_options\.from_dict\(client_options\)
if client_options\.api_endpoint:
api_endpoint = client_options\.api_endpoint
\# Instantiate the transport\.
\# The transport is responsible for handling serialization and
\# deserialization and actually sending data to the service\.
if transport:
if callable\(transport\):
self\.transport = transport\(
credentials=credentials,
default_class=grafeas_grpc_transport\.GrafeasGrpcTransport,
address=api_endpoint,
\)
else:
if credentials:
raise ValueError\(
'Received both a transport instance and '
'credentials; these are mutually exclusive\.'
\)
self\.transport = transport
else:
self\.transport = grafeas_grpc_transport\.GrafeasGrpcTransport\(
address=api_endpoint,
channel=channel,
credentials=credentials,
\)""",
"""# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
self.transport = transport""",
)
s.replace(
"grafeas/**/grafeas_client.py",
r""" Example:
>>> from grafeas import grafeas_v1
>>>
>>> client = grafeas_v1\.GrafeasClient\(\)""",
""" Example:
>>> from grafeas import grafeas_v1
>>> from grafeas.grafeas_v1.gapic.transports import grafeas_grpc_transport
>>>
>>> address = "[SERVICE_ADDRESS]"
>>> scopes = ("[SCOPE]")
>>> transport = grafeas_grpc_transport.GrafeasGrpcTransport(address, scopes)
>>> client = grafeas_v1.GrafeasClient(transport)""",
)
s.replace(
"grafeas/**/grafeas_client.py",
r''' @classmethod
def from_service_account_file\(cls, filename, \*args, \*\*kwargs\):
"""Creates an instance of this client using the provided credentials
file\.
Args:
filename \(str\): The path to the service account private key json
file\.
args: Additional arguments to pass to the constructor\.
kwargs: Additional arguments to pass to the constructor\.
Returns:
GrafeasClient: The constructed client\.
"""
credentials = service_account\.Credentials\.from_service_account_file\(
filename\)
kwargs\['credentials'\] = credentials
return cls\(\*args, \*\*kwargs\)
from_service_account_json = from_service_account_file''',
"")
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" \# The scopes needed to make gRPC calls to all of the methods defined
\# in this service\.
_OAUTH_SCOPES = \(
'https://www\.googleapis\.com/auth/cloud-platform',
\)""",
"",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" def __init__\(self, channel=None, credentials=None,
address='containeranalysis\.googleapis\.com:443'\):""",
""" def __init__(self, address, scopes, channel=None, credentials=None):""",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" \# Create the channel\.
if channel is None:
channel = self\.create_channel\(
address=address,
credentials=credentials,
""",
""" # Create the channel.
if channel is None:
channel = self.create_channel(
address,
scopes,
credentials=credentials,
""",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" def create_channel\(
cls,
address='containeranalysis\.googleapis\.com:443',
credentials=None,
\*\*kwargs\):""",
""" def create_channel(
cls,
address,
scopes,
credentials=None,
**kwargs):""",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" Args:
address \(str\): The host for the channel to use\.
credentials \(~\.Credentials\): The
authorization credentials to attach to requests\. These
credentials identify this application to the service\. If
none are specified, the client will attempt to ascertain
the credentials from the environment\.""",
""" Args:
address (str): The host for the channel to use.
scopes (Sequence[str]): The scopes needed to make gRPC calls.
credentials (~.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If
none are specified, the client will attempt to ascertain
the credentials from the environment.""",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" return google\.api_core\.grpc_helpers\.create_channel\(
address,
credentials=credentials,
scopes=cls\._OAUTH_SCOPES,
\*\*kwargs
\)""",
""" return google.api_core.grpc_helpers.create_channel(
address,
credentials=credentials,
scopes=scopes,
**kwargs
)""",
)
s.replace(
"grafeas/**/grafeas_grpc_transport.py",
r""" \"\"\"Instantiate the transport class\.
Args:
channel \(grpc\.Channel\): A ``Channel`` instance through
which to make calls\. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception\.
credentials \(google\.auth\.credentials\.Credentials\): The
authorization credentials to attach to requests\. These
credentials identify this application to the service\. If none
are specified, the client will attempt to ascertain the
credentials from the environment\.
address \(str\): The address where the service is hosted\.""",
''' """Instantiate the transport class.
Args:
address (str): The address where the service is hosted.
scopes (Sequence[str]): The scopes needed to make gRPC calls.
channel (grpc.Channel): A ``Channel`` instance through
which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
''',
)
s.replace(
"tests/**/test_grafeas_client_v1.py",
r"""from grafeas\.grafeas_v1\.proto import grafeas_pb2""",
r"""from grafeas.grafeas_v1.proto import grafeas_pb2
from grafeas.grafeas_v1.gapic.transports import grafeas_grpc_transport""",
)
s.replace(
"tests/**/test_grafeas_client_v1.py",
r"(\s+)client = grafeas_v1\.GrafeasClient\(\)",
r"""\g<1>address = "[SERVICE_ADDRESS]"
\g<1>scopes = ("SCOPE")
\g<1>transport = grafeas_grpc_transport.GrafeasGrpcTransport(address, scopes)
\g<1>client=grafeas_v1.GrafeasClient(transport)""",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=78, cov_level=78)
s.move(templated_files, excludes=["noxfile.py"])
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
tswast/google-cloud-python
|
grafeas/synth.py
|
Python
|
apache-2.0
| 13,298
|
# Copyright (c) 2012 OpenStack Foundation.
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import copy
import errno
import gc
import os
import pprint
import socket
import sys
import traceback
import eventlet
import eventlet.backdoor
import greenlet
from oslo.config import cfg
from neutron_fwaas.openstack.common._i18n import _LI
from neutron_fwaas.openstack.common import log as logging
help_for_backdoor_port = (
"Acceptable values are 0, <port>, and <start>:<end>, where 0 results "
"in listening on a random tcp port number; <port> results in listening "
"on the specified port number (and not enabling backdoor if that port "
"is in use); and <start>:<end> results in listening on the smallest "
"unused port number within the specified range of port numbers. The "
"chosen port is displayed in the service's log file.")
eventlet_backdoor_opts = [
cfg.StrOpt('backdoor_port',
help="Enable eventlet backdoor. %s" % help_for_backdoor_port)
]
CONF = cfg.CONF
CONF.register_opts(eventlet_backdoor_opts)
LOG = logging.getLogger(__name__)
def list_opts():
"""Entry point for oslo.config-generator.
"""
return [(None, copy.deepcopy(eventlet_backdoor_opts))]
class EventletBackdoorConfigValueError(Exception):
def __init__(self, port_range, help_msg, ex):
msg = ('Invalid backdoor_port configuration %(range)s: %(ex)s. '
'%(help)s' %
{'range': port_range, 'ex': ex, 'help': help_msg})
super(EventletBackdoorConfigValueError, self).__init__(msg)
self.port_range = port_range
def _dont_use_this():
print("Don't use this, just disconnect instead")
def _find_objects(t):
return [o for o in gc.get_objects() if isinstance(o, t)]
def _print_greenthreads():
for i, gt in enumerate(_find_objects(greenlet.greenlet)):
print(i, gt)
traceback.print_stack(gt.gr_frame)
print()
def _print_nativethreads():
for threadId, stack in sys._current_frames().items():
print(threadId)
traceback.print_stack(stack)
print()
def _parse_port_range(port_range):
if ':' not in port_range:
start, end = port_range, port_range
else:
start, end = port_range.split(':', 1)
try:
start, end = int(start), int(end)
if end < start:
raise ValueError
return start, end
except ValueError as ex:
raise EventletBackdoorConfigValueError(port_range, ex,
help_for_backdoor_port)
def _listen(host, start_port, end_port, listen_func):
try_port = start_port
while True:
try:
return listen_func((host, try_port))
except socket.error as exc:
if (exc.errno != errno.EADDRINUSE or
try_port >= end_port):
raise
try_port += 1
def initialize_if_enabled():
backdoor_locals = {
'exit': _dont_use_this, # So we don't exit the entire process
'quit': _dont_use_this, # So we don't exit the entire process
'fo': _find_objects,
'pgt': _print_greenthreads,
'pnt': _print_nativethreads,
}
if CONF.backdoor_port is None:
return None
start_port, end_port = _parse_port_range(str(CONF.backdoor_port))
# NOTE(johannes): The standard sys.displayhook will print the value of
# the last expression and set it to __builtin__._, which overwrites
# the __builtin__._ that gettext sets. Let's switch to using pprint
# since it won't interact poorly with gettext, and it's easier to
# read the output too.
def displayhook(val):
if val is not None:
pprint.pprint(val)
sys.displayhook = displayhook
sock = _listen('localhost', start_port, end_port, eventlet.listen)
# In the case of backdoor port being zero, a port number is assigned by
# listen(). In any case, pull the port number out here.
port = sock.getsockname()[1]
LOG.info(
_LI('Eventlet backdoor listening on %(port)s for process %(pid)d') %
{'port': port, 'pid': os.getpid()}
)
eventlet.spawn_n(eventlet.backdoor.backdoor_server, sock,
locals=backdoor_locals)
return port
|
citrix-openstack-build/neutron-fwaas
|
neutron_fwaas/openstack/common/eventlet_backdoor.py
|
Python
|
apache-2.0
| 4,924
|
#!/usr/bin/env python
#
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Invokes Asset Inventory API to export resources, and IAM policies.
For more information on the Cloud Asset Inventory API see:
https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview
"""
from __future__ import print_function
import argparse
import logging
import pprint
from concurrent import futures
from google.cloud.exceptions import GoogleCloudError
from google.cloud import asset_v1
class Clients(object):
"""Holds API client objects."""
_cloudasset = None
@classmethod
def cloudasset(cls):
if cls._cloudasset:
return cls._cloudasset
cls._cloudasset = asset_v1.AssetServiceClient()
return cls._cloudasset
def export_to_gcs(parent, gcs_destination, content_type, asset_types):
"""Exports assets to GCS destination.
Invoke either the cloudasset.organizations.exportAssets or
cloudasset.projects.exportAssets method depending on if parent is a project
or orgniaztion.
Args:
parent: Either `project/<project-id>` or `organization/<organization#>`.
gcs_destination: GCS uri to export to.
content_type: Either `RESOURCE` or `IAM_POLICY` or
None/`CONTENT_TYPE_UNSPECIFIED` for just asset names.
asset_types: None for all asset types or a list of asset names to
export.
Returns:
The result of the successfully completed export operation.
"""
output_config = asset_v1.types.OutputConfig()
output_config.gcs_destination.uri = gcs_destination
operation = Clients.cloudasset().export_assets(
parent,
output_config,
content_type=content_type,
asset_types=asset_types)
return operation.result()
def export_to_gcs_content_types(parent, gcs_destination, content_types,
asset_types):
"""Export each asset type into a GCS object with the GCS prefix.
Will call `export_to_gcs concurrently` to perform an export, once for each
content_type.
Args:
parent: Project id or organization number.
gcs_destination: GCS object prefix to export to (gs://bucket/prefix)
content_types: List of [RESOURCE, NAME, IAM_POLICY, NAME] to export.
Defaults to [RESOURCE, NAME, IAM_POLICY]
asset_types: List of asset_types to export. Supply `None` to get
everything.
Returns:
A dict of content_types and export result objects.
"""
logging.info('performing export from %s to %s of content_types %s',
parent, gcs_destination, str(content_types))
if asset_types == ['*']:
asset_types = None
if content_types is None:
content_types = ['RESOURCE', 'IAM_POLICY']
with futures.ThreadPoolExecutor(max_workers=3) as executor:
export_futures = {
executor.submit(export_to_gcs, parent, '{}/{}.json'.format(
gcs_destination, content_type), content_type, asset_types):
content_type
for content_type in content_types
}
operation_results = {}
for future in futures.as_completed(export_futures):
try:
content_type = export_futures[future]
operation_results[content_type] = future.result()
except GoogleCloudError:
content_type = export_futures[future]
logging.exception('Error exporting %s', content_type)
raise
logging.info('export results: %s', pprint.pformat(operation_results))
return operation_results
def add_argparse_args(ap, required=False):
"""Configure the `argparse.ArgumentParser`."""
ap.formatter_class = argparse.RawTextHelpFormatter
# pylint: disable=line-too-long
ap.description = (
'Exports google cloud organization or project assets '
'to a gcs bucket or bigquery. See:\n'
'https://cloud.google.com/resource-manager/docs/cloud-asset-inventory/overview\n\n'
'This MUST be run with a service account owned by a project with the '
'Cloud Asset API enabled. The gcloud generated user credentials'
' do not work. This requires:\n\n'
' 1. Enable the Cloud Asset Inventory API on a project (https://console.cloud.google.com/apis/api/cloudasset.googleapis.com/overview)\n'
' 2. Create a service acocunt owned by this project\n'
' 3. Give the service account roles/cloudasset.viewer at the organization layer\n'
' 4. Run on a GCE instance started with this service account,\n'
' or downloadthe private key and set GOOGLE_APPLICATION_CREDENTIALS to the file name\n'
' 5. Run this command.\n\n'
'If the GCS bucket being written to is owned by a different project then'
' the project that you enabled the API on, then you must also grant the'
' "service-<project-id>@gcp-sa-cloudasset.iam.gserviceaccount.com" account'
' objectAdmin privleges to the bucket:\n'
' gsutil iam ch serviceAccount:service-<project-id>@gcp-sa-cloudasset.iam.gserviceaccount.com:objectAdmin gs://<bucket>\n'
'\n\n')
ap.add_argument(
'--parent',
required=required,
help=('Organization number (organizations/123)'
'or project id (projects/id) or number (projects/123).'))
ap.add_argument(
'--gcs-destination', help='URL of the gcs file to write to.',
required=required)
def content_types_argument(string):
valid_content_types = [
'CONTENT_TYPE_UNSPECIFIED', 'RESOURCE', 'IAM_POLICY'
]
content_types = [x.strip() for x in string.split(',')]
for content_type in content_types:
if content_type not in valid_content_types:
raise argparse.ArgumentTypeError(
'invalid content_type {}'.format(content_type))
return content_types
ap.add_argument(
'--content-types',
help=('Type content to output for each asset a comma seperated list '
' of `CONTENT_TYPE_UNSPECIFIED`, `RESOURCE`, `IAM_POLICY` '
'defaults to `RESOURCE, IAM_POLICY`.'),
type=content_types_argument,
default='RESOURCE, IAM_POLICY',
nargs='?')
ap.add_argument(
'--asset-types',
help=('Comma seprated list of asset types to export such as '
'"google.compute.Firewall,google.compute.HealthCheck"'
' default is `*` for everything'),
type=lambda x: [y.strip() for y in x.split(',')],
nargs='?')
def main():
logging.basicConfig()
logging.getLogger().setLevel(logging.INFO)
ap = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
add_argparse_args(ap, required=True)
args = ap.parse_args()
logging.info('Exporting assets.')
export_result = export_to_gcs_content_types(
args.parent,
args.gcs_destination,
args.content_types,
asset_types=args.asset_types.split(',') if args.asset_types else None)
logging.info('Export results %s.', pprint.pformat(export_result))
if __name__ == '__main__':
main()
|
CloudVLab/professional-services
|
tools/asset-inventory/asset_inventory/export.py
|
Python
|
apache-2.0
| 7,690
|
"""Provides a UPNP discovery method that mimicks Hue hubs."""
import threading
import socket
import logging
import select
from aiohttp import web
from homeassistant import core
from homeassistant.components.http import HomeAssistantView
_LOGGER = logging.getLogger(__name__)
class DescriptionXmlView(HomeAssistantView):
"""Handles requests for the description.xml file."""
url = '/description.xml'
name = 'description:xml'
requires_auth = False
def __init__(self, config):
"""Initialize the instance of the view."""
self.config = config
@core.callback
def get(self, request):
"""Handle a GET request."""
xml_template = """<?xml version="1.0" encoding="UTF-8" ?>
<root xmlns="urn:schemas-upnp-org:device-1-0">
<specVersion>
<major>1</major>
<minor>0</minor>
</specVersion>
<URLBase>http://{0}:{1}/</URLBase>
<device>
<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>
<friendlyName>HASS Bridge ({0})</friendlyName>
<manufacturer>Royal Philips Electronics</manufacturer>
<manufacturerURL>http://www.philips.com</manufacturerURL>
<modelDescription>Philips hue Personal Wireless Lighting</modelDescription>
<modelName>Philips hue bridge 2015</modelName>
<modelNumber>BSB002</modelNumber>
<modelURL>http://www.meethue.com</modelURL>
<serialNumber>1234</serialNumber>
<UDN>uuid:2f402f80-da50-11e1-9b23-001788255acc</UDN>
</device>
</root>
"""
resp_text = xml_template.format(
self.config.advertise_ip, self.config.advertise_port)
return web.Response(text=resp_text, content_type='text/xml')
class UPNPResponderThread(threading.Thread):
"""Handle responding to UPNP/SSDP discovery requests."""
_interrupted = False
def __init__(self, host_ip_addr, listen_port, upnp_bind_multicast,
advertise_ip, advertise_port):
"""Initialize the class."""
threading.Thread.__init__(self)
self.host_ip_addr = host_ip_addr
self.listen_port = listen_port
self.upnp_bind_multicast = upnp_bind_multicast
# Note that the double newline at the end of
# this string is required per the SSDP spec
resp_template = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://{0}:{1}/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/0.1
hue-bridgeid: 1234
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:Socket-1_0-221438K0100073::urn:schemas-upnp-org:device:basic:1
"""
self.upnp_response = resp_template.format(
advertise_ip, advertise_port).replace("\n", "\r\n") \
.encode('utf-8')
def run(self):
"""Run the server."""
# Listen for UDP port 1900 packets sent to SSDP multicast address
ssdp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ssdp_socket.setblocking(False)
# Required for receiving multicast
ssdp_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_MULTICAST_IF,
socket.inet_aton(self.host_ip_addr))
ssdp_socket.setsockopt(
socket.SOL_IP,
socket.IP_ADD_MEMBERSHIP,
socket.inet_aton("239.255.255.250") +
socket.inet_aton(self.host_ip_addr))
if self.upnp_bind_multicast:
ssdp_socket.bind(("", 1900))
else:
ssdp_socket.bind((self.host_ip_addr, 1900))
while True:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
try:
read, _, _ = select.select(
[ssdp_socket], [],
[ssdp_socket], 2)
if ssdp_socket in read:
data, addr = ssdp_socket.recvfrom(1024)
else:
# most likely the timeout, so check for interupt
continue
except socket.error as ex:
if self._interrupted:
clean_socket_close(ssdp_socket)
return
_LOGGER.error("UPNP Responder socket exception occured: %s",
ex.__str__)
# without the following continue, a second exception occurs
# because the data object has not been initialized
continue
if "M-SEARCH" in data.decode('utf-8'):
# SSDP M-SEARCH method received, respond to it with our info
resp_socket = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
resp_socket.sendto(self.upnp_response, addr)
resp_socket.close()
def stop(self):
"""Stop the server."""
# Request for server
self._interrupted = True
self.join()
def clean_socket_close(sock):
"""Close a socket connection and logs its closure."""
_LOGGER.info("UPNP responder shutting down.")
sock.close()
|
ct-23/home-assistant
|
homeassistant/components/emulated_hue/upnp.py
|
Python
|
apache-2.0
| 5,032
|
import os.path
RNASEQ_PIPELINE_DIR = os.path.dirname(__file__)
|
montilab/Hydra
|
hydra_pkg/__init__.py
|
Python
|
apache-2.0
| 65
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utility functions to save/load keras Model to/from SavedModel."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import six
from tensorflow.python.client import session
from tensorflow.python.estimator import keras as estimator_keras_util
from tensorflow.python.estimator import model_fn as model_fn_lib
from tensorflow.python.estimator.export import export as export_helpers
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import models as models_lib
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.engine import sequential
from tensorflow.python.keras.metrics import Metric
from tensorflow.python.keras.models import model_from_json
from tensorflow.python.lib.io import file_io
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.saved_model import builder as saved_model_builder
from tensorflow.python.saved_model import constants
from tensorflow.python.saved_model import utils_impl as saved_model_utils
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training.checkpointable import util as checkpointable_utils
from tensorflow.python.util import compat
def save_keras_model(
model, saved_model_path, custom_objects=None, as_text=None):
"""Save a `tf.keras.Model` into Tensorflow SavedModel format.
`save_model` generates new files/folders under the `saved_model_path` folder:
1) an asset folder containing the json string of the model's
configuration (topology).
2) a checkpoint containing the model weights.
3) a saved_model.pb file containing the model's MetaGraphs. The prediction
graph is always exported. The evaluaton and training graphs are exported
if the following conditions are met:
- Evaluation: model loss is defined.
- Training: model is compiled with an optimizer defined under `tf.train`.
This is because `tf.keras.optimizers.Optimizer` instances cannot be
saved to checkpoints.
Model Requirements:
- Model must be a sequential model or functional model. Subclassed models can
not be saved via this function, unless you provide an implementation for
get_config() and from_config().
- All variables must be saveable by the model. In general, this condition is
met through the use of layers defined in the keras library. However,
there is currently a bug with variables created in Lambda layer functions
not being saved correctly (see
https://github.com/keras-team/keras/issues/9740).
Note that each mode is exported in separate graphs, so different modes do not
share variables. To use the train graph with evaluation or prediction graphs,
create a new checkpoint if variable values have been updated.
Example:
```python
import tensorflow as tf
# Create a tf.keras model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1, input_shape=[10]))
model.summary()
# Save the tf.keras model in the SavedModel format.
saved_to_path = tf.contrib.saved_model.save_keras_model(
model, '/tmp/my_simple_tf_keras_saved_model')
# Load the saved keras model back.
model_prime = tf.contrib.saved_model.load_keras_model(saved_to_path)
model_prime.summary()
```
Args:
model: A `tf.keras.Model` to be saved.
saved_model_path: a string specifying the path to the SavedModel directory.
The SavedModel will be saved to a timestamped folder created within this
directory.
custom_objects: Optional dictionary mapping string names to custom classes
or functions (e.g. custom loss functions).
as_text: whether to write the `SavedModel` proto in text format.
Returns:
String path to the SavedModel folder, a subdirectory of `saved_model_path`.
Raises:
NotImplementedError: If the model is a subclassed model.
ValueError: If a Sequential model does not have input shapes defined by the
user, and is not built.
"""
if not model._is_graph_network:
if isinstance(model, sequential.Sequential):
# If input shape is not directly set in the model, the exported model
# will assume that the inputs have the same shape as the shape the model
# was built model with.
if not model.built:
raise ValueError(
'Sequential model must be built before it can be exported.')
else:
raise NotImplementedError(
'Exporting subclassed models is not yet supported.')
export_dir = export_helpers.get_timestamped_export_dir(saved_model_path)
temp_export_dir = export_helpers.get_temp_export_dir(export_dir)
builder = saved_model_builder.SavedModelBuilder(temp_export_dir)
# Manually save variables to export them in an object-based checkpoint. This
# skips the `builder.add_meta_graph_and_variables()` step, which saves a
# named-based checkpoint.
# TODO(b/113134168): Add fn to Builder to save with object-based saver.
# TODO(b/113178242): This should only export the model json structure. Only
# one save is needed once the weights can be copied from the model to clone.
checkpoint_path = _export_model_json_and_variables(model, temp_export_dir)
# Export each mode. Use ModeKeys enums defined for `Estimator` to ensure that
# Keras models and `Estimator`s are exported with the same format.
# Every time a mode is exported, the code checks to see if new variables have
# been created (e.g. optimizer slot variables). If that is the case, the
# checkpoint is re-saved to include the new variables.
export_args = {'builder': builder,
'model': model,
'custom_objects': custom_objects,
'checkpoint_path': checkpoint_path}
has_saved_vars = False
if model.optimizer:
if isinstance(model.optimizer, optimizers.TFOptimizer):
_export_mode(model_fn_lib.ModeKeys.TRAIN, has_saved_vars, **export_args)
has_saved_vars = True
_export_mode(model_fn_lib.ModeKeys.EVAL, has_saved_vars, **export_args)
else:
logging.warning(
'Model was compiled with an optimizer, but the optimizer is not from '
'`tf.train` (e.g. `tf.train.AdagradOptimizer`). Only the serving '
'graph was exported. The train and evaluate graphs were not added to '
'the SavedModel.')
_export_mode(model_fn_lib.ModeKeys.PREDICT, has_saved_vars, **export_args)
builder.save(as_text)
gfile.Rename(temp_export_dir, export_dir)
return export_dir
def _export_model_json_and_variables(model, saved_model_path):
"""Save model variables and json structure into SavedModel subdirectories."""
# Save model configuration as a json string under assets folder.
model_json = model.to_json()
model_json_filepath = os.path.join(
saved_model_utils.get_or_create_assets_dir(saved_model_path),
compat.as_text(constants.SAVED_MODEL_FILENAME_JSON))
file_io.write_string_to_file(model_json_filepath, model_json)
# Save model weights in checkpoint format under variables folder.
saved_model_utils.get_or_create_variables_dir(saved_model_path)
checkpoint_prefix = saved_model_utils.get_variables_path(saved_model_path)
model.save_weights(checkpoint_prefix, save_format='tf', overwrite=True)
return checkpoint_prefix
def _get_var_list(model):
"""Return list of all checkpointed saveable objects in the model."""
return checkpointable_utils.named_saveables(model)
def _export_mode(
mode, has_saved_vars, builder, model, custom_objects, checkpoint_path):
"""Export a model, and optionally save new vars from the clone model.
Args:
mode: A `tf.estimator.ModeKeys` string.
has_saved_vars: A `boolean` indicating whether the SavedModel has already
exported variables.
builder: A `SavedModelBuilder` object.
model: A `tf.keras.Model` object.
custom_objects: A dictionary mapping string names to custom classes
or functions.
checkpoint_path: String path to checkpoint.
Raises:
ValueError: If the train/eval mode is being exported, but the model does
not have an optimizer.
"""
compile_clone = (mode != model_fn_lib.ModeKeys.PREDICT)
if compile_clone and not model.optimizer:
raise ValueError(
'Model does not have an optimizer. Cannot export mode %s' % mode)
model_graph = ops.get_default_graph()
with ops.Graph().as_default() as g:
K.set_learning_phase(mode == model_fn_lib.ModeKeys.TRAIN)
# Clone the model into blank graph. This will create placeholders for inputs
# and targets.
clone = models_lib.clone_and_build_model(
model, custom_objects=custom_objects, compile_clone=compile_clone)
# Make sure that iterations variable is added to the global step collection,
# to ensure that, when the SavedModel graph is loaded, the iterations
# variable is returned by `tf.train.get_global_step()`. This is required for
# compatibility with the SavedModelEstimator.
if compile_clone:
g.add_to_collection(ops.GraphKeys.GLOBAL_STEP, clone.optimizer.iterations)
# Extract update and train ops from train/test/predict functions.
if mode == model_fn_lib.ModeKeys.TRAIN:
clone._make_train_function()
builder._add_train_op(clone.train_function.updates_op)
elif mode == model_fn_lib.ModeKeys.EVAL:
clone._make_test_function()
else:
clone._make_predict_function()
g.get_collection_ref(ops.GraphKeys.UPDATE_OPS).extend(clone.state_updates)
clone_var_list = checkpointable_utils.named_saveables(clone)
with session.Session().as_default():
if has_saved_vars:
# Confirm all variables in the clone have an entry in the checkpoint.
status = clone.load_weights(checkpoint_path)
status.assert_existing_objects_matched()
else:
# Confirm that variables between the clone and model match up exactly,
# not counting optimizer objects. Optimizer objects are ignored because
# if the model has not trained, the slot variables will not have been
# created yet.
# TODO(b/113179535): Replace with checkpointable equivalence.
_assert_same_non_optimizer_objects(model, model_graph, clone, g)
# TODO(b/113178242): Use value transfer for checkpointable objects.
clone.load_weights(checkpoint_path)
# Add graph and variables to SavedModel.
# TODO(b/113134168): Switch to add_meta_graph_and_variables.
clone.save_weights(checkpoint_path, save_format='tf', overwrite=True)
builder._has_saved_variables = True
# Add graph to the SavedModel builder.
builder.add_meta_graph(
model_fn_lib.EXPORT_TAG_MAP[mode],
signature_def_map=_create_signature_def_map(clone, mode),
saver=saver_lib.Saver(clone_var_list),
main_op=variables.local_variables_initializer())
return None
def _create_signature_def_map(model, mode):
"""Create a SignatureDef map from a Keras model."""
inputs_dict = {name: x for name, x in zip(model.input_names, model.inputs)}
if model.optimizer:
targets_dict = {x.name.split(':')[0]: x
for x in model.targets if x is not None}
inputs_dict.update(targets_dict)
outputs_dict = {name: x
for name, x in zip(model.output_names, model.outputs)}
metrics = estimator_keras_util._convert_keras_metrics_to_estimator(model)
# Add metric variables to the `LOCAL_VARIABLES` collection. Metric variables
# are by default not added to any collections. We are doing this here, so
# that metric variables get initialized.
local_vars = set(ops.get_collection(ops.GraphKeys.LOCAL_VARIABLES))
vars_to_add = set()
if metrics is not None:
for key, value in six.iteritems(metrics):
if isinstance(value, Metric):
vars_to_add.update(value.variables)
# Convert Metric instances to (value_tensor, update_op) tuple.
metrics[key] = (value.result(), value.updates[0])
# Remove variables that are in the local variables collection already.
vars_to_add = vars_to_add.difference(local_vars)
for v in vars_to_add:
ops.add_to_collection(ops.GraphKeys.LOCAL_VARIABLES, v)
export_outputs = model_fn_lib.export_outputs_for_mode(
mode,
predictions=outputs_dict,
loss=model.total_loss if model.optimizer else None,
metrics=metrics)
return export_helpers.build_all_signature_defs(
inputs_dict,
export_outputs=export_outputs,
serving_only=(mode == model_fn_lib.ModeKeys.PREDICT))
def _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph):
"""Assert model and clone contain the same checkpointable objects."""
def get_non_optimizer_objects(m, g):
"""Gather set of model and optimizer checkpointable objects."""
# Set default graph because optimizer.variables() returns optimizer
# variables defined in the default graph.
with g.as_default():
all_objects = set(checkpointable_utils.list_objects(m))
optimizer_and_variables = set()
for obj in all_objects:
if isinstance(obj, optimizers.TFOptimizer):
optimizer_and_variables.update(checkpointable_utils.list_objects(obj))
optimizer_and_variables.update(set(obj.optimizer.variables()))
return all_objects - optimizer_and_variables
model_objects = get_non_optimizer_objects(model, model_graph)
clone_objects = get_non_optimizer_objects(clone, clone_graph)
if len(model_objects) != len(clone_objects):
raise errors.InternalError(
None, None,
'Model and clone must use the same variables.'
'\n\tModel variables: %s\n\t Clone variables: %s'
% (model_objects, clone_objects))
def load_keras_model(saved_model_path):
"""Load a keras.Model from SavedModel.
load_model reinstantiates model state by:
1) loading model topology from json (this will eventually come
from metagraph).
2) loading model weights from checkpoint.
Example:
```python
import tensorflow as tf
# Create a tf.keras model.
model = tf.keras.Sequential()
model.add(tf.keras.layers.Dense(1, input_shape=[10]))
model.summary()
# Save the tf.keras model in the SavedModel format.
saved_to_path = tf.contrib.saved_model.save_keras_model(
model, '/tmp/my_simple_tf_keras_saved_model')
# Load the saved keras model back.
model_prime = tf.contrib.saved_model.load_keras_model(saved_to_path)
model_prime.summary()
```
Args:
saved_model_path: a string specifying the path to an existing SavedModel.
Returns:
a keras.Model instance.
"""
# restore model topology from json string
model_json_filepath = os.path.join(
compat.as_bytes(saved_model_path),
compat.as_bytes(constants.ASSETS_DIRECTORY),
compat.as_bytes(constants.SAVED_MODEL_FILENAME_JSON))
model_json = file_io.read_file_to_string(model_json_filepath)
model = model_from_json(model_json)
# restore model weights
checkpoint_prefix = os.path.join(
compat.as_text(saved_model_path),
compat.as_text(constants.VARIABLES_DIRECTORY),
compat.as_text(constants.VARIABLES_FILENAME))
model.load_weights(checkpoint_prefix)
return model
|
seanli9jan/tensorflow
|
tensorflow/contrib/saved_model/python/saved_model/keras_saved_model.py
|
Python
|
apache-2.0
| 16,089
|
"""Config flow for Google Maps Travel Time integration."""
from __future__ import annotations
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_MODE, CONF_NAME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from .const import (
ALL_LANGUAGES,
ARRIVAL_TIME,
AVOID,
CONF_ARRIVAL_TIME,
CONF_AVOID,
CONF_DEPARTURE_TIME,
CONF_DESTINATION,
CONF_LANGUAGE,
CONF_ORIGIN,
CONF_TIME,
CONF_TIME_TYPE,
CONF_TRAFFIC_MODEL,
CONF_TRANSIT_MODE,
CONF_TRANSIT_ROUTING_PREFERENCE,
CONF_UNITS,
DEFAULT_NAME,
DEPARTURE_TIME,
DOMAIN,
TIME_TYPES,
TRANSIT_PREFS,
TRANSPORT_TYPE,
TRAVEL_MODE,
TRAVEL_MODEL,
UNITS,
)
from .helpers import is_valid_config_entry
_LOGGER = logging.getLogger(__name__)
class GoogleOptionsFlow(config_entries.OptionsFlow):
"""Handle an options flow for Google Travel Time."""
def __init__(self, config_entry: config_entries.ConfigEntry) -> None:
"""Initialize google options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle the initial step."""
if user_input is not None:
time_type = user_input.pop(CONF_TIME_TYPE)
if time := user_input.pop(CONF_TIME, None):
if time_type == ARRIVAL_TIME:
user_input[CONF_ARRIVAL_TIME] = time
else:
user_input[CONF_DEPARTURE_TIME] = time
return self.async_create_entry(
title="",
data={k: v for k, v in user_input.items() if v not in (None, "")},
)
if CONF_ARRIVAL_TIME in self.config_entry.options:
default_time_type = ARRIVAL_TIME
default_time = self.config_entry.options[CONF_ARRIVAL_TIME]
else:
default_time_type = DEPARTURE_TIME
default_time = self.config_entry.options.get(CONF_ARRIVAL_TIME, "")
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_MODE, default=self.config_entry.options[CONF_MODE]
): vol.In(TRAVEL_MODE),
vol.Optional(
CONF_LANGUAGE,
default=self.config_entry.options.get(CONF_LANGUAGE),
): vol.In([None, *ALL_LANGUAGES]),
vol.Optional(
CONF_AVOID, default=self.config_entry.options.get(CONF_AVOID)
): vol.In([None, *AVOID]),
vol.Optional(
CONF_UNITS, default=self.config_entry.options[CONF_UNITS]
): vol.In(UNITS),
vol.Optional(CONF_TIME_TYPE, default=default_time_type): vol.In(
TIME_TYPES
),
vol.Optional(CONF_TIME, default=default_time): cv.string,
vol.Optional(
CONF_TRAFFIC_MODEL,
default=self.config_entry.options.get(CONF_TRAFFIC_MODEL),
): vol.In([None, *TRAVEL_MODEL]),
vol.Optional(
CONF_TRANSIT_MODE,
default=self.config_entry.options.get(CONF_TRANSIT_MODE),
): vol.In([None, *TRANSPORT_TYPE]),
vol.Optional(
CONF_TRANSIT_ROUTING_PREFERENCE,
default=self.config_entry.options.get(
CONF_TRANSIT_ROUTING_PREFERENCE
),
): vol.In([None, *TRANSIT_PREFS]),
}
),
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Google Maps Travel Time."""
VERSION = 1
@staticmethod
@callback
def async_get_options_flow(
config_entry: config_entries.ConfigEntry,
) -> GoogleOptionsFlow:
"""Get the options flow for this handler."""
return GoogleOptionsFlow(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
user_input = user_input or {}
if user_input:
if await self.hass.async_add_executor_job(
is_valid_config_entry,
self.hass,
_LOGGER,
user_input[CONF_API_KEY],
user_input[CONF_ORIGIN],
user_input[CONF_DESTINATION],
):
return self.async_create_entry(
title=user_input.get(CONF_NAME, DEFAULT_NAME),
data=user_input,
)
# If we get here, it's because we couldn't connect
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(
CONF_NAME, default=user_input.get(CONF_NAME, DEFAULT_NAME)
): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DESTINATION): cv.string,
vol.Required(CONF_ORIGIN): cv.string,
}
),
errors=errors,
)
|
home-assistant/home-assistant
|
homeassistant/components/google_travel_time/config_flow.py
|
Python
|
apache-2.0
| 5,526
|
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class ConfigurationError(Exception):
"""Raised when the cache isn't configured correctly."""
class QueueEmpty(Exception):
"""Raised when a connection cannot be acquired."""
|
citrix-openstack-build/oslo.cache
|
oslo_cache/exception.py
|
Python
|
apache-2.0
| 770
|
# Copyright 2015 FUJITSU LIMITED
# (C) Copyright 2016 Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
class BaseRepo(object):
def __init__(self, config):
self._find_alarm_action_sql = \
"""SELECT id, type, name, address, period
FROM alarm_action as aa
JOIN notification_method as nm ON aa.action_id = nm.id
WHERE aa.alarm_definition_id = %s and aa.alarm_state = %s"""
self._find_alarm_state_sql = \
"""SELECT state
FROM alarm
WHERE alarm.id = %s"""
self._insert_notification_types_sql = \
"""INSERT INTO notification_method_type (name) VALUES ( %s)"""
self._find_all_notification_types_sql = """SELECT name from notification_method_type """
self._get_notification_sql = """SELECT name, type, address, period
FROM notification_method
WHERE id = %s"""
|
openstack/monasca-notification
|
monasca_notification/common/repositories/base/base_repo.py
|
Python
|
apache-2.0
| 1,519
|
# -*- encoding: utf-8 -*-
#
# Copyright © 2014-2016 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import functools
import math
import operator
import fixtures
import iso8601
import numpy
import six
from gnocchi import carbonara
from gnocchi.tests import base
def datetime64(*args):
return numpy.datetime64(datetime.datetime(*args))
class TestBoundTimeSerie(base.BaseTestCase):
def test_benchmark(self):
self.useFixture(fixtures.Timeout(300, gentle=True))
carbonara.BoundTimeSerie.benchmark()
@staticmethod
def test_base():
carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
def test_block_size(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 5),
datetime64(2014, 1, 1, 12, 0, 9)],
[5, 6],
block_size=numpy.timedelta64(5, 's'))
self.assertEqual(2, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3),
(datetime64(2014, 1, 1, 12, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(2, len(ts))
def test_block_size_back_window(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6],
block_size=numpy.timedelta64(5, 's'),
back_window=1)
self.assertEqual(3, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 10), 3),
(datetime64(2014, 1, 1, 12, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(3, len(ts))
def test_block_size_unordered(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 5),
datetime64(2014, 1, 1, 12, 0, 9)],
[5, 23],
block_size=numpy.timedelta64(5, 's'))
self.assertEqual(2, len(ts))
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 11), 3),
(datetime64(2014, 1, 1, 12, 0, 10), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(2, len(ts))
def test_duplicate_timestamps(self):
ts = carbonara.BoundTimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 9)],
[10, 23])
self.assertEqual(2, len(ts))
self.assertEqual(10.0, ts[0][1])
self.assertEqual(23.0, ts[1][1])
ts.set_values(numpy.array([(datetime64(2014, 1, 1, 13, 0, 10), 3),
(datetime64(2014, 1, 1, 13, 0, 11), 9),
(datetime64(2014, 1, 1, 13, 0, 11), 8),
(datetime64(2014, 1, 1, 13, 0, 11), 7),
(datetime64(2014, 1, 1, 13, 0, 11), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE))
self.assertEqual(4, len(ts))
self.assertEqual(10.0, ts[0][1])
self.assertEqual(23.0, ts[1][1])
self.assertEqual(3.0, ts[2][1])
self.assertEqual(9.0, ts[3][1])
class TestAggregatedTimeSerie(base.BaseTestCase):
def test_benchmark(self):
self.useFixture(fixtures.Timeout(300, gentle=True))
carbonara.AggregatedTimeSerie.benchmark()
def test_fetch_basic(self):
ts = carbonara.AggregatedTimeSerie.from_data(
timestamps=[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
values=[3, 5, 6],
aggregation=carbonara.Aggregation(
"mean", numpy.timedelta64(1, 's'), None))
self.assertEqual(
[(datetime64(2014, 1, 1, 12), 3),
(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch()))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12, 0, 4))))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=numpy.datetime64(iso8601.parse_date(
"2014-01-01 12:00:04")))))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 4), 5),
(datetime64(2014, 1, 1, 12, 0, 9), 6)],
list(ts.fetch(
from_timestamp=numpy.datetime64(iso8601.parse_date(
"2014-01-01 13:00:04+01:00")))))
def test_before_epoch(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(1950, 1, 1, 12),
datetime64(2014, 1, 1, 12),
datetime64(2014, 1, 1, 12)],
[3, 5, 6])
self.assertRaises(carbonara.BeforeEpochError,
ts.group_serie, 60)
@staticmethod
def _resample(ts, sampling, agg, derived=False):
aggregation = carbonara.Aggregation(agg, sampling, None)
grouped = ts.group_serie(sampling)
if derived:
grouped = grouped.derived()
return carbonara.AggregatedTimeSerie.from_grouped_serie(
grouped, aggregation)
def test_derived_mean(self):
ts = carbonara.TimeSerie.from_data(
[datetime.datetime(2014, 1, 1, 12, 0, 0),
datetime.datetime(2014, 1, 1, 12, 0, 4),
datetime.datetime(2014, 1, 1, 12, 1, 2),
datetime.datetime(2014, 1, 1, 12, 1, 14),
datetime.datetime(2014, 1, 1, 12, 1, 24),
datetime.datetime(2014, 1, 1, 12, 2, 4),
datetime.datetime(2014, 1, 1, 12, 2, 35),
datetime.datetime(2014, 1, 1, 12, 2, 42),
datetime.datetime(2014, 1, 1, 12, 3, 2),
datetime.datetime(2014, 1, 1, 12, 3, 22), # Counter reset
datetime.datetime(2014, 1, 1, 12, 3, 42),
datetime.datetime(2014, 1, 1, 12, 4, 9)],
[50, 55, 65, 66, 70, 83, 92, 103, 105, 5, 7, 23])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean',
derived=True)
self.assertEqual(5, len(ts))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 0), 5),
(datetime64(2014, 1, 1, 12, 1, 0), 5),
(datetime64(2014, 1, 1, 12, 2, 0), 11),
(datetime64(2014, 1, 1, 12, 3, 0), -32),
(datetime64(2014, 1, 1, 12, 4, 0), 16)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12))))
def test_derived_hole(self):
ts = carbonara.TimeSerie.from_data(
[datetime.datetime(2014, 1, 1, 12, 0, 0),
datetime.datetime(2014, 1, 1, 12, 0, 4),
datetime.datetime(2014, 1, 1, 12, 1, 2),
datetime.datetime(2014, 1, 1, 12, 1, 14),
datetime.datetime(2014, 1, 1, 12, 1, 24),
datetime.datetime(2014, 1, 1, 12, 3, 2),
datetime.datetime(2014, 1, 1, 12, 3, 22),
datetime.datetime(2014, 1, 1, 12, 3, 42),
datetime.datetime(2014, 1, 1, 12, 4, 9)],
[50, 55, 65, 66, 70, 105, 108, 200, 202])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'last',
derived=True)
self.assertEqual(4, len(ts))
self.assertEqual(
[(datetime64(2014, 1, 1, 12, 0, 0), 5),
(datetime64(2014, 1, 1, 12, 1, 0), 4),
(datetime64(2014, 1, 1, 12, 3, 0), 92),
(datetime64(2014, 1, 1, 12, 4, 0), 2)],
list(ts.fetch(
from_timestamp=datetime64(2014, 1, 1, 12))))
def test_74_percentile_serialized(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct')
self.assertEqual(1, len(ts))
self.assertEqual(5.48, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
# Serialize and unserialize
key = ts.get_split_key()
o, s = ts.serialize(key)
saved_ts = carbonara.AggregatedTimeSerie.unserialize(
s, key, ts.aggregation)
self.assertEqual(ts.aggregation, saved_ts.aggregation)
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
ts = self._resample(ts, numpy.timedelta64(60, 's'), '74pct')
saved_ts.merge(ts)
self.assertEqual(1, len(ts))
self.assertEqual(5.48, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
def test_95_percentile(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
ts = self._resample(ts, numpy.timedelta64(60, 's'), '95pct')
self.assertEqual(1, len(ts))
self.assertEqual(5.9000000000000004,
ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
def _do_test_aggregation(self, name, v1, v2, v3):
# NOTE(gordc): test data must have a group of odd count to properly
# test 50pct test case.
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 10),
datetime64(2014, 1, 1, 12, 0, 20),
datetime64(2014, 1, 1, 12, 0, 30),
datetime64(2014, 1, 1, 12, 0, 40),
datetime64(2014, 1, 1, 12, 1, 0),
datetime64(2014, 1, 1, 12, 1, 10),
datetime64(2014, 1, 1, 12, 1, 20),
datetime64(2014, 1, 1, 12, 1, 30),
datetime64(2014, 1, 1, 12, 1, 40),
datetime64(2014, 1, 1, 12, 1, 50),
datetime64(2014, 1, 1, 12, 2, 0),
datetime64(2014, 1, 1, 12, 2, 10)],
[3, 5, 2, 3, 5, 8, 11, 22, 10, 42, 9, 4, 2])
ts = self._resample(ts, numpy.timedelta64(60, 's'), name)
self.assertEqual(3, len(ts))
self.assertEqual(v1, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
self.assertEqual(v2, ts[datetime64(2014, 1, 1, 12, 1, 0)][1])
self.assertEqual(v3, ts[datetime64(2014, 1, 1, 12, 2, 0)][1])
def test_aggregation_first(self):
self._do_test_aggregation('first', 3, 8, 4)
def test_aggregation_last(self):
self._do_test_aggregation('last', 5, 9, 2)
def test_aggregation_count(self):
self._do_test_aggregation('count', 5, 6, 2)
def test_aggregation_sum(self):
self._do_test_aggregation('sum', 18, 102, 6)
def test_aggregation_mean(self):
self._do_test_aggregation('mean', 3.6, 17, 3)
def test_aggregation_median(self):
self._do_test_aggregation('median', 3.0, 10.5, 3)
def test_aggregation_50pct(self):
self._do_test_aggregation('50pct', 3.0, 10.5, 3)
def test_aggregation_56pct(self):
self._do_test_aggregation('56pct', 3.4800000000000004,
10.8, 3.120000000000001)
def test_aggregation_min(self):
self._do_test_aggregation('min', 2, 8, 2)
def test_aggregation_max(self):
self._do_test_aggregation('max', 5, 42, 4)
def test_aggregation_std(self):
self._do_test_aggregation('std', 1.3416407864998738,
13.266499161421599, 1.4142135623730951)
def test_aggregation_std_with_unique(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0)], [3])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'std')
self.assertEqual(0, len(ts), ts.values)
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9),
datetime64(2014, 1, 1, 12, 1, 6)],
[3, 6, 5, 9])
ts = self._resample(ts, numpy.timedelta64(60, 's'), "std")
self.assertEqual(1, len(ts))
self.assertEqual(1.5275252316519465,
ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
def test_different_length_in_timestamps_and_data(self):
self.assertRaises(
ValueError,
carbonara.AggregatedTimeSerie.from_data,
carbonara.Aggregation('mean', numpy.timedelta64(3, 's'), None),
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5])
def test_truncate(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 6])
ts = self._resample(ts, numpy.timedelta64(1, 's'), 'mean')
ts.truncate(datetime64(2014, 1, 1, 12, 0, 0))
self.assertEqual(2, len(ts))
self.assertEqual(5, ts[0][1])
self.assertEqual(6, ts[1][1])
def test_down_sampling(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9)],
[3, 5, 7])
ts = self._resample(ts, numpy.timedelta64(300, 's'), 'mean')
self.assertEqual(1, len(ts))
self.assertEqual(5, ts[datetime64(2014, 1, 1, 12, 0, 0)][1])
def test_down_sampling_and_truncate(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 1, 4),
datetime64(2014, 1, 1, 12, 1, 9),
datetime64(2014, 1, 1, 12, 2, 12)],
[3, 5, 7, 1])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'mean')
ts.truncate(datetime64(2014, 1, 1, 12, 0, 59))
self.assertEqual(2, len(ts))
self.assertEqual(6, ts[datetime64(2014, 1, 1, 12, 1, 0)][1])
self.assertEqual(1, ts[datetime64(2014, 1, 1, 12, 2, 0)][1])
def test_down_sampling_and_truncate_and_method_max(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 1, 4),
datetime64(2014, 1, 1, 12, 1, 9),
datetime64(2014, 1, 1, 12, 2, 12)],
[3, 5, 70, 1])
ts = self._resample(ts, numpy.timedelta64(60, 's'), 'max')
ts.truncate(datetime64(2014, 1, 1, 12, 0, 59))
self.assertEqual(2, len(ts))
self.assertEqual(70, ts[datetime64(2014, 1, 1, 12, 1, 0)][1])
self.assertEqual(1, ts[datetime64(2014, 1, 1, 12, 2, 0)][1])
@staticmethod
def _resample_and_merge(ts, agg_dict):
"""Helper method that mimics _compute_splits_operations workflow."""
grouped = ts.group_serie(agg_dict['sampling'])
existing = agg_dict.get('return')
agg_dict['return'] = carbonara.AggregatedTimeSerie.from_grouped_serie(
grouped, carbonara.Aggregation(
agg_dict['agg'], agg_dict['sampling'], None))
if existing:
existing.merge(agg_dict['return'])
agg_dict['return'] = existing
def test_fetch(self):
ts = {'sampling': numpy.timedelta64(60, 's'),
'size': 10, 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 11, 46, 4), 4),
(datetime64(2014, 1, 1, 11, 47, 34), 8),
(datetime64(2014, 1, 1, 11, 50, 54), 50),
(datetime64(2014, 1, 1, 11, 54, 45), 4),
(datetime64(2014, 1, 1, 11, 56, 49), 4),
(datetime64(2014, 1, 1, 11, 57, 22), 6),
(datetime64(2014, 1, 1, 11, 58, 22), 5),
(datetime64(2014, 1, 1, 12, 1, 4), 4),
(datetime64(2014, 1, 1, 12, 1, 9), 7),
(datetime64(2014, 1, 1, 12, 2, 1), 15),
(datetime64(2014, 1, 1, 12, 2, 12), 1),
(datetime64(2014, 1, 1, 12, 3, 0), 3),
(datetime64(2014, 1, 1, 12, 4, 9), 7),
(datetime64(2014, 1, 1, 12, 5, 1), 15),
(datetime64(2014, 1, 1, 12, 5, 12), 1),
(datetime64(2014, 1, 1, 12, 6, 0, 2), 3)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 6), 5)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(numpy.datetime64('2014-01-01T11:46:00.000000000'), 4.0),
(numpy.datetime64('2014-01-01T11:47:00.000000000'), 8.0),
(numpy.datetime64('2014-01-01T11:50:00.000000000'), 50.0),
(datetime64(2014, 1, 1, 11, 54), 4.0),
(datetime64(2014, 1, 1, 11, 56), 4.0),
(datetime64(2014, 1, 1, 11, 57), 6.0),
(datetime64(2014, 1, 1, 11, 58), 5.0),
(datetime64(2014, 1, 1, 12, 1), 5.5),
(datetime64(2014, 1, 1, 12, 2), 8.0),
(datetime64(2014, 1, 1, 12, 3), 3.0),
(datetime64(2014, 1, 1, 12, 4), 7.0),
(datetime64(2014, 1, 1, 12, 5), 8.0),
(datetime64(2014, 1, 1, 12, 6), 4.0)
], list(ts['return'].fetch()))
self.assertEqual([
(datetime64(2014, 1, 1, 12, 1), 5.5),
(datetime64(2014, 1, 1, 12, 2), 8.0),
(datetime64(2014, 1, 1, 12, 3), 3.0),
(datetime64(2014, 1, 1, 12, 4), 7.0),
(datetime64(2014, 1, 1, 12, 5), 8.0),
(datetime64(2014, 1, 1, 12, 6), 4.0)
], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))))
def test_fetch_agg_pct(self):
ts = {'sampling': numpy.timedelta64(1, 's'),
'size': 3600 * 24, 'agg': '90pct'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3),
(datetime64(2014, 1, 1, 12, 0, 0, 123), 4),
(datetime64(2014, 1, 1, 12, 0, 2), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
result = ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))
reference = [
(datetime64(
2014, 1, 1, 12, 0, 0
), 3.9),
(datetime64(
2014, 1, 1, 12, 0, 2
), 4)
]
self.assertEqual(len(reference), len(list(result)))
for ref, res in zip(reference, result):
self.assertEqual(ref[0], res[0])
# Rounding \o/
self.assertAlmostEqual(ref[1], res[1])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 2, 113), 110)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
result = ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))
reference = [
(datetime64(
2014, 1, 1, 12, 0, 0
), 3.9),
(datetime64(
2014, 1, 1, 12, 0, 2
), 99.4)
]
self.assertEqual(len(reference), len(list(result)))
for ref, res in zip(reference, result):
self.assertEqual(ref[0], res[0])
# Rounding \o/
self.assertAlmostEqual(ref[1], res[1])
def test_fetch_nano(self):
ts = {'sampling': numpy.timedelta64(200, 'ms'),
'size': 10, 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 11, 46, 0, 200123), 4),
(datetime64(2014, 1, 1, 11, 46, 0, 340000), 8),
(datetime64(2014, 1, 1, 11, 47, 0, 323154), 50),
(datetime64(2014, 1, 1, 11, 48, 0, 590903), 4),
(datetime64(2014, 1, 1, 11, 48, 0, 903291), 4)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 11, 48, 0, 821312), 5)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(datetime64(2014, 1, 1, 11, 46, 0, 200000), 6.0),
(datetime64(2014, 1, 1, 11, 47, 0, 200000), 50.0),
(datetime64(2014, 1, 1, 11, 48, 0, 400000), 4.0),
(datetime64(2014, 1, 1, 11, 48, 0, 800000), 4.5)
], list(ts['return'].fetch()))
self.assertEqual(numpy.timedelta64(200000000, 'ns'),
ts['return'].aggregation.granularity)
def test_fetch_agg_std(self):
# NOTE (gordc): this is a good test to ensure we drop NaN entries
# 2014-01-01 12:00:00 will appear if we don't dropna()
ts = {'sampling': numpy.timedelta64(60, 's'),
'size': 60, 'agg': 'std'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3),
(datetime64(2014, 1, 1, 12, 1, 4), 4),
(datetime64(2014, 1, 1, 12, 1, 9), 7),
(datetime64(2014, 1, 1, 12, 2, 1), 15),
(datetime64(2014, 1, 1, 12, 2, 12), 1)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(datetime64(2014, 1, 1, 12, 1, 0), 2.1213203435596424),
(datetime64(2014, 1, 1, 12, 2, 0), 9.8994949366116654),
], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))))
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(datetime64(2014, 1, 1, 12, 1, 0), 2.1213203435596424),
(datetime64(2014, 1, 1, 12, 2, 0), 59.304300012730948),
], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))))
def test_fetch_agg_max(self):
ts = {'sampling': numpy.timedelta64(60, 's'),
'size': 60, 'agg': 'max'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 0, 0), 3),
(datetime64(2014, 1, 1, 12, 1, 4), 4),
(datetime64(2014, 1, 1, 12, 1, 9), 7),
(datetime64(2014, 1, 1, 12, 2, 1), 15),
(datetime64(2014, 1, 1, 12, 2, 12), 1)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(datetime64(2014, 1, 1, 12, 0, 0), 3),
(datetime64(2014, 1, 1, 12, 1, 0), 7),
(datetime64(2014, 1, 1, 12, 2, 0), 15),
], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))))
tsb.set_values(numpy.array([(datetime64(2014, 1, 1, 12, 2, 13), 110)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual([
(datetime64(2014, 1, 1, 12, 0, 0), 3),
(datetime64(2014, 1, 1, 12, 1, 0), 7),
(datetime64(2014, 1, 1, 12, 2, 0), 110),
], list(ts['return'].fetch(datetime64(2014, 1, 1, 12, 0, 0))))
def test_serialize(self):
ts = {'sampling': numpy.timedelta64(500, 'ms'), 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 0, 1234), 3),
(datetime64(2014, 1, 1, 12, 0, 0, 321), 6),
(datetime64(2014, 1, 1, 12, 1, 4, 234), 5),
(datetime64(2014, 1, 1, 12, 1, 9, 32), 7),
(datetime64(2014, 1, 1, 12, 2, 12, 532), 1)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
key = ts['return'].get_split_key()
o, s = ts['return'].serialize(key)
self.assertEqual(ts['return'],
carbonara.AggregatedTimeSerie.unserialize(
s, key, ts['return'].aggregation))
def test_no_truncation(self):
ts = {'sampling': numpy.timedelta64(60, 's'), 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie()
for i in six.moves.range(1, 11):
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, i, i), float(i))],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, i, i + 1), float(i + 1))],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(i, len(list(ts['return'].fetch())))
def test_back_window(self):
"""Back window testing.
Test the back window on an archive is not longer than the window we
aggregate on.
"""
ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 1, 2300), 1),
(datetime64(2014, 1, 1, 12, 0, 1, 4600), 2),
(datetime64(2014, 1, 1, 12, 0, 2, 4500), 3),
(datetime64(2014, 1, 1, 12, 0, 2, 7800), 4),
(datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(
[
(datetime64(2014, 1, 1, 12, 0, 1), 1.5),
(datetime64(2014, 1, 1, 12, 0, 2), 3.5),
(datetime64(2014, 1, 1, 12, 0, 3), 2.5),
],
list(ts['return'].fetch()))
def test_back_window_ignore(self):
"""Back window testing.
Test the back window on an archive is not longer than the window we
aggregate on.
"""
ts = {'sampling': numpy.timedelta64(1, 's'), 'size': 60, 'agg': 'mean'}
tsb = carbonara.BoundTimeSerie(block_size=ts['sampling'])
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 1, 2300), 1),
(datetime64(2014, 1, 1, 12, 0, 1, 4600), 2),
(datetime64(2014, 1, 1, 12, 0, 2, 4500), 3),
(datetime64(2014, 1, 1, 12, 0, 2, 7800), 4),
(datetime64(2014, 1, 1, 12, 0, 3, 8), 2.5)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(
[
(datetime64(2014, 1, 1, 12, 0, 1), 1.5),
(datetime64(2014, 1, 1, 12, 0, 2), 3.5),
(datetime64(2014, 1, 1, 12, 0, 3), 2.5),
],
list(ts['return'].fetch()))
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 2, 99), 9)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(
[
(datetime64(2014, 1, 1, 12, 0, 1), 1.5),
(datetime64(2014, 1, 1, 12, 0, 2), 3.5),
(datetime64(2014, 1, 1, 12, 0, 3), 2.5),
],
list(ts['return'].fetch()))
tsb.set_values(numpy.array([
(datetime64(2014, 1, 1, 12, 0, 2, 99), 9),
(datetime64(2014, 1, 1, 12, 0, 3, 9), 4.5)],
dtype=carbonara.TIMESERIES_ARRAY_DTYPE),
before_truncate_callback=functools.partial(
self._resample_and_merge, agg_dict=ts))
self.assertEqual(
[
(datetime64(2014, 1, 1, 12, 0, 1), 1.5),
(datetime64(2014, 1, 1, 12, 0, 2), 3.5),
(datetime64(2014, 1, 1, 12, 0, 3), 3.5),
],
list(ts['return'].fetch()))
def test_split_key(self):
self.assertEqual(
numpy.datetime64("2014-10-07"),
carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01T15:03"),
numpy.timedelta64(3600, 's')))
self.assertEqual(
numpy.datetime64("2014-12-31 18:00"),
carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01 15:03:58"),
numpy.timedelta64(58, 's')))
key = carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01 15:03"),
numpy.timedelta64(3600, 's'))
self.assertGreater(key, numpy.datetime64("1970"))
self.assertGreaterEqual(key, numpy.datetime64("1970"))
def test_split_key_cmp(self):
dt1 = numpy.datetime64("2015-01-01T15:03")
dt1_1 = numpy.datetime64("2015-01-01T15:03")
dt2 = numpy.datetime64("2015-01-05T15:03")
td = numpy.timedelta64(60, 's')
td2 = numpy.timedelta64(300, 's')
self.assertEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt1_1, td))
self.assertNotEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
self.assertNotEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td2))
self.assertLess(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
self.assertLessEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertGreater(
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertGreaterEqual(
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
def test_split_key_cmp_negative(self):
dt1 = numpy.datetime64("2015-01-01T15:03")
dt1_1 = numpy.datetime64("2015-01-01T15:03")
dt2 = numpy.datetime64("2015-01-05T15:03")
td = numpy.timedelta64(60, 's')
td2 = numpy.timedelta64(300, 's')
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) !=
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) !=
carbonara.SplitKey.from_timestamp_and_sampling(dt1_1, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) ==
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) ==
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2))
self.assertRaises(
TypeError,
operator.le,
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2))
self.assertRaises(
TypeError,
operator.ge,
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2))
self.assertRaises(
TypeError,
operator.gt,
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2))
self.assertRaises(
TypeError,
operator.lt,
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td),
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td2))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) >=
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td) >
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td) <=
carbonara.SplitKey.from_timestamp_and_sampling(dt1, td))
self.assertFalse(
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td) <
carbonara.SplitKey.from_timestamp_and_sampling(dt2, td))
def test_split_key_next(self):
self.assertEqual(
numpy.datetime64("2015-03-06"),
next(carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01 15:03"),
numpy.timedelta64(3600, 's'))))
self.assertEqual(
numpy.datetime64("2015-08-03"),
next(next(carbonara.SplitKey.from_timestamp_and_sampling(
numpy.datetime64("2015-01-01T15:03"),
numpy.timedelta64(3600, 's')))))
def test_split(self):
sampling = numpy.timedelta64(5, 's')
points = 100000
ts = carbonara.TimeSerie.from_data(
timestamps=list(map(datetime.datetime.utcfromtimestamp,
six.moves.range(points))),
values=list(six.moves.range(points)))
agg = self._resample(ts, sampling, 'mean')
grouped_points = list(agg.split())
self.assertEqual(
math.ceil((points / sampling.astype(float))
/ carbonara.SplitKey.POINTS_PER_SPLIT),
len(grouped_points))
self.assertEqual("0.0",
str(carbonara.SplitKey(grouped_points[0][0], 0)))
# 3600 × 5s = 5 hours
self.assertEqual(datetime64(1970, 1, 1, 5),
grouped_points[1][0])
self.assertEqual(carbonara.SplitKey.POINTS_PER_SPLIT,
len(grouped_points[0][1]))
def test_from_timeseries(self):
sampling = numpy.timedelta64(5, 's')
points = 100000
ts = carbonara.TimeSerie.from_data(
timestamps=list(map(datetime.datetime.utcfromtimestamp,
six.moves.range(points))),
values=list(six.moves.range(points)))
agg = self._resample(ts, sampling, 'mean')
split = [t[1] for t in list(agg.split())]
self.assertEqual(agg,
carbonara.AggregatedTimeSerie.from_timeseries(
split, aggregation=agg.aggregation))
def test_resample(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 4),
datetime64(2014, 1, 1, 12, 0, 9),
datetime64(2014, 1, 1, 12, 0, 11),
datetime64(2014, 1, 1, 12, 0, 12)],
[3, 5, 6, 2, 4])
agg_ts = self._resample(ts, numpy.timedelta64(5, 's'), 'mean')
self.assertEqual(3, len(agg_ts))
agg_ts = agg_ts.resample(numpy.timedelta64(10, 's'))
self.assertEqual(2, len(agg_ts))
self.assertEqual(5, agg_ts[0][1])
self.assertEqual(3, agg_ts[1][1])
def test_iter(self):
ts = carbonara.TimeSerie.from_data(
[datetime64(2014, 1, 1, 12, 0, 0),
datetime64(2014, 1, 1, 12, 0, 11),
datetime64(2014, 1, 1, 12, 0, 12)],
[3, 5, 6])
self.assertEqual([
(numpy.datetime64('2014-01-01T12:00:00'), 3.),
(numpy.datetime64('2014-01-01T12:00:11'), 5.),
(numpy.datetime64('2014-01-01T12:00:12'), 6.),
], list(ts))
|
gnocchixyz/gnocchi
|
gnocchi/tests/test_carbonara.py
|
Python
|
apache-2.0
| 38,695
|
"""Config flow to configure the OVO Energy integration."""
import aiohttp
from ovoenergy.ovoenergy import OVOEnergy
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.config_entries import ConfigFlow
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import DOMAIN # pylint: disable=unused-import
REAUTH_SCHEMA = vol.Schema({vol.Required(CONF_PASSWORD): str})
USER_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
class OVOEnergyFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a OVO Energy config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the flow."""
self.username = None
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
errors = {}
if user_input is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
user_input[CONF_USERNAME], user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "cannot_connect"
else:
if authenticated:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=client.username,
data={
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
errors["base"] = "invalid_auth"
return self.async_show_form(
step_id="user", data_schema=USER_SCHEMA, errors=errors
)
async def async_step_reauth(self, user_input):
"""Handle configuration by re-auth."""
errors = {}
if user_input and user_input.get(CONF_USERNAME):
self.username = user_input[CONF_USERNAME]
self.context["title_placeholders"] = {CONF_USERNAME: self.username}
if user_input is not None and user_input.get(CONF_PASSWORD) is not None:
client = OVOEnergy()
try:
authenticated = await client.authenticate(
self.username, user_input[CONF_PASSWORD]
)
except aiohttp.ClientError:
errors["base"] = "connection_error"
else:
if authenticated:
await self.async_set_unique_id(self.username)
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry,
data={
CONF_USERNAME: self.username,
CONF_PASSWORD: user_input[CONF_PASSWORD],
},
)
return self.async_abort(reason="reauth_successful")
errors["base"] = "authorization_error"
return self.async_show_form(
step_id="reauth", data_schema=REAUTH_SCHEMA, errors=errors
)
|
partofthething/home-assistant
|
homeassistant/components/ovo_energy/config_flow.py
|
Python
|
apache-2.0
| 3,440
|
"""Tests for OwnTracks config flow."""
from unittest.mock import patch
from homeassistant.setup import async_setup_component
from tests.common import mock_coro
async def test_config_flow_import(hass):
"""Test that we automatically create a config flow."""
assert not hass.config_entries.async_entries('owntracks')
assert await async_setup_component(hass, 'owntracks', {
'owntracks': {
}
})
await hass.async_block_till_done()
assert hass.config_entries.async_entries('owntracks')
async def test_config_flow_unload(hass):
"""Test unloading a config flow."""
with patch('homeassistant.config_entries.ConfigEntries'
'.async_forward_entry_setup') as mock_forward:
result = await hass.config_entries.flow.async_init(
'owntracks', context={'source': 'import'},
data={}
)
assert len(mock_forward.mock_calls) == 1
entry = result['result']
assert mock_forward.mock_calls[0][1][0] is entry
assert mock_forward.mock_calls[0][1][1] == 'device_tracker'
assert entry.data['webhook_id'] in hass.data['webhook']
with patch('homeassistant.config_entries.ConfigEntries'
'.async_forward_entry_unload', return_value=mock_coro()
) as mock_unload:
assert await hass.config_entries.async_unload(entry.entry_id)
assert len(mock_unload.mock_calls) == 1
assert mock_forward.mock_calls[0][1][0] is entry
assert mock_forward.mock_calls[0][1][1] == 'device_tracker'
assert entry.data['webhook_id'] not in hass.data['webhook']
async def test_with_cloud_sub(hass):
"""Test creating a config flow while subscribed."""
with patch('homeassistant.components.cloud.async_active_subscription',
return_value=True), \
patch('homeassistant.components.cloud.async_create_cloudhook',
return_value=mock_coro('https://hooks.nabu.casa/ABCD')):
result = await hass.config_entries.flow.async_init(
'owntracks', context={'source': 'user'},
data={}
)
entry = result['result']
assert entry.data['cloudhook']
assert result['description_placeholders']['webhook_url'] == \
'https://hooks.nabu.casa/ABCD'
|
aequitas/home-assistant
|
tests/components/owntracks/test_config_flow.py
|
Python
|
apache-2.0
| 2,257
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
versions = ["v1beta1", "v1beta2", "v1p1beta1", "v1p2beta1", "v1"]
# ----------------------------------------------------------------------------
# Generate videointelligence GAPIC layer
# ----------------------------------------------------------------------------
for version in versions:
library = gapic.py_library(
"videointelligence", version, artman_output_name=f"video-intelligence-{version}"
)
# TODO: stop excluding tests and nox.py (excluded as we lack system tests)
s.move(
library,
excludes=[
"setup.py",
"nox*.py",
"README.rst",
"docs/index.rst",
f"tests/system/gapic/{version}/"
f"test_system_video_intelligence_service_{version}.py",
# f'tests/unit/gapic/{version}/'
# f'test_video_intelligence_service_client_{version}.py',
],
)
s.replace(
"**/*/video_intelligence_service_client.py",
"'google-cloud-video-intelligence', \).version",
"'google-cloud-videointelligence', ).version",
)
s.replace(
"tests/unit/gapic/**/test_video_intelligence_service_client_*.py",
"^(\s+)expected_request = video_intelligence_pb2.AnnotateVideoRequest\(\)",
"\g<1>expected_request = video_intelligence_pb2.AnnotateVideoRequest(\n"
"\g<1> input_uri=input_uri, features=features)",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
dhermes/gcloud-python
|
videointelligence/synth.py
|
Python
|
apache-2.0
| 2,452
|
#!/usr/bin/env python
from optparse import OptionParser
import getpass
import os
import sys
import yaml
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir,
'anvil',
'__init__.py')):
sys.path.insert(0, possible_topdir)
from anvil import log as logging
from anvil import importer
from anvil import passwords
from anvil.components.helpers import keystone
from anvil import utils
def get_token():
pw_storage = passwords.KeyringProxy(path='/etc/anvil/passwords.cfg')
lookup_name = "service_token"
prompt = "Please enter the password for %s: " % ('/etc/anvil/passwords.cfg')
(exists, token) = pw_storage.read(lookup_name, prompt)
if not exists:
pw_storage.save(lookup_name, token)
return token
def replace_services_endpoints(token, options):
client = importer.construct_entry_point("keystoneclient.v2_0.client:Client",
token=token, endpoint=options.keystone_uri)
current_endpoints = client.endpoints.list()
current_services = client.services.list()
def filter_resource(r):
raw = dict(r.__dict__) # Can't access the raw attrs, arg...
raw_cleaned = {}
for k, v in raw.items():
if k == 'manager' or k.startswith('_'):
continue
raw_cleaned[k] = v
return raw_cleaned
for e in current_endpoints:
print("Deleting endpoint: ")
print(utils.prettify_yaml(filter_resource(e)))
client.endpoints.delete(e.id)
for s in current_services:
print("Deleting service: ")
print(utils.prettify_yaml(filter_resource(s)))
client.services.delete(s.id)
if options.file:
with(open(options.file, 'r')) as fh:
contents = yaml.load(fh)
set_contents = {
'services': contents.get('services', []),
'endpoints': contents.get('endpoints', []),
}
print("Regenerating with:")
print(utils.prettify_yaml(set_contents))
set_contents['users'] = []
set_contents['roles'] = []
set_contents['tenants'] = []
initer = keystone.Initializer(token, options.keystone_uri)
initer.initialize(**set_contents)
def main():
parser = OptionParser()
parser.add_option("-k", '--keystone', dest='keystone_uri',
help='keystone endpoint uri to authenticate with', metavar='KEYSTONE')
parser.add_option("-f", '--file', dest='file',
help='service and endpoint creation file', metavar='FILE')
(options, args) = parser.parse_args()
if not options.keystone_uri or not options.file:
parser.error("options are missing, please try -h")
logging.setupLogging(logging.DEBUG)
replace_services_endpoints(get_token(), options)
if __name__ == "__main__":
sys.exit(main())
|
toby82/anvil
|
tools/endpoint-service-replace.py
|
Python
|
apache-2.0
| 3,065
|
from typing import Any, Dict, List, Union
from unittest import mock
import orjson
from zerver.lib.actions import (
do_remove_realm_custom_profile_field,
do_update_user_custom_profile_data_if_changed,
try_add_realm_custom_profile_field,
try_reorder_realm_custom_profile_fields,
)
from zerver.lib.external_accounts import DEFAULT_EXTERNAL_ACCOUNTS
from zerver.lib.markdown import markdown_convert
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_helpers import queries_captured
from zerver.models import (
CustomProfileField,
CustomProfileFieldValue,
custom_profile_fields_for_realm,
get_realm,
)
class CustomProfileFieldTestCase(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
self.realm = get_realm("zulip")
self.original_count = len(custom_profile_fields_for_realm(self.realm.id))
def custom_field_exists_in_realm(self, field_id: int) -> bool:
fields = custom_profile_fields_for_realm(self.realm.id)
field_ids = [field.id for field in fields]
return field_id in field_ids
class CreateCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_create(self) -> None:
self.login("iago")
realm = get_realm("zulip")
data: Dict[str, Any] = {"name": "Phone", "field_type": "text id"}
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'Argument "field_type" is not valid JSON.')
data["name"] = ""
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Label cannot be blank.")
data["name"] = "*" * 41
data["field_type"] = 100
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "name is too long (limit: 40 characters)")
data["name"] = "Phone"
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid field type.")
data["name"] = "Phone"
data["hint"] = "*" * 81
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
msg = "hint is too long (limit: 80 characters)"
self.assert_json_error(result, msg)
data["name"] = "Phone"
data["hint"] = "Contact number"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Phone", realm=realm)
self.assertEqual(field.id, field.order)
data["name"] = "Name "
data["hint"] = "Some name"
data["field_type"] = CustomProfileField.SHORT_TEXT
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
field = CustomProfileField.objects.get(name="Name", realm=realm)
self.assertEqual(field.id, field.order)
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "A field with that label already exists.")
def test_create_select_field(self) -> None:
self.login("iago")
data: Dict[str, Union[str, int]] = {}
data["name"] = "Favorite programming language"
data["field_type"] = CustomProfileField.SELECT
data["field_data"] = "invalid"
result = self.client_post("/json/realm/profile_fields", info=data)
error_msg = "Bad value for 'field_data': invalid"
self.assert_json_error(result, error_msg)
data["field_data"] = orjson.dumps(
{
"python": ["1"],
"java": ["2"],
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "field_data is not a dict")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python"},
"java": {"text": "Java"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "order key is missing from field_data")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": ""},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] cannot be blank.')
data["field_data"] = orjson.dumps(
{
"": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "'value' cannot be blank.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": 1},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["order"] is not a string')
data["field_data"] = orjson.dumps({}).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Field must have at least one choice.")
data["field_data"] = orjson.dumps(
{
"python": {"text": "Python", "order": "1"},
"java": {"text": "Java", "order": "2"},
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
def test_create_default_external_account_field(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field_type: int = CustomProfileField.EXTERNAL_ACCOUNT
field_data: str = orjson.dumps(
{
"subtype": "twitter",
}
).decode()
invalid_field_name: str = "Not required field name"
invalid_field_hint: str = "Not required field hint"
result = self.client_post(
"/json/realm/profile_fields",
info=dict(
field_type=field_type,
field_data=field_data,
hint=invalid_field_hint,
name=invalid_field_name,
),
)
self.assert_json_success(result)
# Silently overwrite name and hint with values set in default fields dict
# for default custom external account fields.
with self.assertRaises(CustomProfileField.DoesNotExist):
field = CustomProfileField.objects.get(name=invalid_field_name, realm=realm)
# The field is created with 'Twitter' name as per values in default fields dict
field = CustomProfileField.objects.get(name="Twitter")
self.assertEqual(field.name, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["name"])
self.assertEqual(field.hint, DEFAULT_EXTERNAL_ACCOUNTS["twitter"]["hint"])
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
# Should also work without name or hint and only external field type and subtype data
result = self.client_post(
"/json/realm/profile_fields", info=dict(field_type=field_type, field_data=field_data)
)
self.assert_json_success(result)
# Default external account field data cannot be updated
field = CustomProfileField.objects.get(name="Twitter", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Twitter username", "field_type": CustomProfileField.EXTERNAL_ACCOUNT},
)
self.assert_json_error(result, "Default custom field cannot be updated.")
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
def test_create_external_account_field(self) -> None:
self.login("iago")
realm = get_realm("zulip")
data: Dict[str, Union[str, int, Dict[str, str]]] = {}
data["name"] = "Twitter"
data["field_type"] = CustomProfileField.EXTERNAL_ACCOUNT
data["field_data"] = "invalid"
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Bad value for 'field_data': invalid")
data["field_data"] = orjson.dumps({}).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "subtype key is missing from field_data")
data["field_data"] = orjson.dumps(
{
"subtype": "",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["subtype"] cannot be blank.')
data["field_data"] = orjson.dumps(
{
"subtype": "123",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid external account type")
non_default_external_account = "linkedin"
data["field_data"] = orjson.dumps(
{
"subtype": non_default_external_account,
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Invalid external account type")
data["field_data"] = orjson.dumps(
{
"subtype": "twitter",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
twitter_field = CustomProfileField.objects.get(name="Twitter", realm=realm)
self.assertEqual(twitter_field.field_type, CustomProfileField.EXTERNAL_ACCOUNT)
self.assertEqual(twitter_field.name, "Twitter")
self.assertEqual(orjson.loads(twitter_field.field_data)["subtype"], "twitter")
data["name"] = "Reddit"
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Custom external account must define URL pattern")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": 123,
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["url_pattern"] is not a string')
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "invalid",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Malformed URL pattern.")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "https://www.reddit.com/%(username)s/user/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "Malformed URL pattern.")
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "reddit.com/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, 'field_data["url_pattern"] is not a URL')
data["field_data"] = orjson.dumps(
{
"subtype": "custom",
"url_pattern": "https://www.reddit.com/user/%(username)s",
}
).decode()
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
custom_field = CustomProfileField.objects.get(name="Reddit", realm=realm)
self.assertEqual(custom_field.field_type, CustomProfileField.EXTERNAL_ACCOUNT)
self.assertEqual(custom_field.name, "Reddit")
field_data = orjson.loads(custom_field.field_data)
self.assertEqual(field_data["subtype"], "custom")
self.assertEqual(field_data["url_pattern"], "https://www.reddit.com/user/%(username)s")
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_error(result, "A field with that label already exists.")
def test_create_field_of_type_user(self) -> None:
self.login("iago")
data = {
"name": "Your mentor",
"field_type": CustomProfileField.USER,
}
result = self.client_post("/json/realm/profile_fields", info=data)
self.assert_json_success(result)
def test_not_realm_admin(self) -> None:
self.login("hamlet")
result = self.client_post("/json/realm/profile_fields")
self.assert_json_error(result, "Must be an organization administrator")
result = self.client_delete("/json/realm/profile_fields/1")
self.assert_json_error(result, "Must be an organization administrator")
class DeleteCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_delete(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
result = self.client_delete("/json/realm/profile_fields/100")
self.assert_json_error(result, "Field id 100 not found.")
self.assertTrue(self.custom_field_exists_in_realm(field.id))
result = self.client_delete(f"/json/realm/profile_fields/{field.id}")
self.assert_json_success(result)
self.assertFalse(self.custom_field_exists_in_realm(field.id))
def test_delete_field_value(self) -> None:
iago = self.example_user("iago")
self.login_user(iago)
realm = get_realm("zulip")
invalid_field_id = 1234
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([invalid_field_id]).decode(),
},
)
self.assert_json_error(result, f"Field id {invalid_field_id} not found.")
field = CustomProfileField.objects.get(name="Mentor", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": [self.example_user("aaron").id]},
]
do_update_user_custom_profile_data_if_changed(iago, data)
iago_value = CustomProfileFieldValue.objects.get(user_profile=iago, field=field)
converter = field.FIELD_CONVERTERS[field.field_type]
self.assertEqual([self.example_user("aaron").id], converter(iago_value.value))
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([field.id]).decode(),
},
)
self.assert_json_success(result)
# Don't throw an exception here
result = self.client_delete(
"/json/users/me/profile_data",
{
"data": orjson.dumps([field.id]).decode(),
},
)
self.assert_json_success(result)
def test_delete_internals(self) -> None:
user_profile = self.example_user("iago")
realm = user_profile.realm
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": "123456"},
]
do_update_user_custom_profile_data_if_changed(user_profile, data)
self.assertTrue(self.custom_field_exists_in_realm(field.id))
self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count)
do_remove_realm_custom_profile_field(realm, field)
self.assertFalse(self.custom_field_exists_in_realm(field.id))
self.assertEqual(user_profile.customprofilefieldvalue_set.count(), self.original_count - 1)
class UpdateCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_update(self) -> None:
self.login("iago")
realm = get_realm("zulip")
result = self.client_patch(
"/json/realm/profile_fields/100",
info={"name": "Phone number", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "Field id 100 not found.")
field = CustomProfileField.objects.get(name="Phone number", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "Label cannot be blank.")
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "New phone number", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_success(result)
field = CustomProfileField.objects.get(id=field.id, realm=realm)
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
self.assertEqual(field.name, "New phone number")
self.assertIs(field.hint, "")
self.assertEqual(field.field_type, CustomProfileField.SHORT_TEXT)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "*" * 41, "field_type": CustomProfileField.SHORT_TEXT},
)
msg = "name is too long (limit: 40 characters)"
self.assert_json_error(result, msg)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={
"name": "New phone number",
"hint": "*" * 81,
"field_type": CustomProfileField.SHORT_TEXT,
},
)
msg = "hint is too long (limit: 80 characters)"
self.assert_json_error(result, msg)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={
"name": "New phone number",
"hint": "New contact number",
"field_type": CustomProfileField.SHORT_TEXT,
},
)
self.assert_json_success(result)
field = CustomProfileField.objects.get(id=field.id, realm=realm)
self.assertEqual(CustomProfileField.objects.count(), self.original_count)
self.assertEqual(field.name, "New phone number")
self.assertEqual(field.hint, "New contact number")
self.assertEqual(field.field_type, CustomProfileField.SHORT_TEXT)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Name ", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_success(result)
field.refresh_from_db()
self.assertEqual(field.name, "Name")
field = CustomProfileField.objects.get(name="Favorite editor", realm=realm)
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": "invalid"},
)
self.assert_json_error(result, "Bad value for 'field_data': invalid")
field_data = orjson.dumps(
{
"vim": "Vim",
"emacs": {"order": "2", "text": "Emacs"},
}
).decode()
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": field_data},
)
self.assert_json_error(result, "field_data is not a dict")
field_data = orjson.dumps(
{
"vim": {"order": "1", "text": "Vim"},
"emacs": {"order": "2", "text": "Emacs"},
"notepad": {"order": "3", "text": "Notepad"},
}
).decode()
result = self.client_patch(
f"/json/realm/profile_fields/{field.id}",
info={"name": "Favorite editor", "field_data": field_data},
)
self.assert_json_success(result)
def test_update_is_aware_of_uniqueness(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field_1 = try_add_realm_custom_profile_field(realm, "Phone", CustomProfileField.SHORT_TEXT)
field_2 = try_add_realm_custom_profile_field(
realm, "Phone 1", CustomProfileField.SHORT_TEXT
)
self.assertTrue(self.custom_field_exists_in_realm(field_1.id))
self.assertTrue(self.custom_field_exists_in_realm(field_2.id))
result = self.client_patch(
f"/json/realm/profile_fields/{field_2.id}",
info={"name": "Phone", "field_type": CustomProfileField.SHORT_TEXT},
)
self.assert_json_error(result, "A field with that label already exists.")
def assert_error_update_invalid_value(
self, field_name: str, new_value: object, error_msg: str
) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name=field_name, realm=realm)
# Update value of field
result = self.client_patch(
"/json/users/me/profile_data",
{"data": orjson.dumps([{"id": field.id, "value": new_value}]).decode()},
)
self.assert_json_error(result, error_msg)
def test_update_invalid_field(self) -> None:
self.login("iago")
data = [{"id": 1234, "value": "12"}]
result = self.client_patch(
"/json/users/me/profile_data",
{
"data": orjson.dumps(data).decode(),
},
)
self.assert_json_error(result, "Field id 1234 not found.")
def test_update_invalid_short_text(self) -> None:
field_name = "Phone number"
self.assert_error_update_invalid_value(
field_name, "t" * 201, f"{field_name} is too long (limit: 50 characters)"
)
def test_update_invalid_date(self) -> None:
field_name = "Birthday"
self.assert_error_update_invalid_value(field_name, "a-b-c", f"{field_name} is not a date")
self.assert_error_update_invalid_value(
field_name, "1909-3-5", f"{field_name} is not a date"
)
self.assert_error_update_invalid_value(field_name, 123, f"{field_name} is not a string")
def test_update_invalid_url(self) -> None:
field_name = "Favorite website"
self.assert_error_update_invalid_value(field_name, "not URL", f"{field_name} is not a URL")
def test_update_invalid_user_field(self) -> None:
field_name = "Mentor"
invalid_user_id = 1000
self.assert_error_update_invalid_value(
field_name, [invalid_user_id], f"Invalid user ID: {invalid_user_id}"
)
def test_update_profile_data_successfully(self) -> None:
self.login("iago")
realm = get_realm("zulip")
fields = [
("Phone number", "*short* text data"),
("Biography", "~~short~~ **long** text data"),
("Favorite food", "long short text data"),
("Favorite editor", "vim"),
("Birthday", "1909-03-05"),
("Favorite website", "https://zulip.com"),
("Mentor", [self.example_user("cordelia").id]),
("GitHub", "zulip-mobile"),
]
data = []
for i, field_value in enumerate(fields):
name, value = field_value
field = CustomProfileField.objects.get(name=name, realm=realm)
data.append(
{
"id": field.id,
"value": value,
"field": field,
}
)
# Update value of field
result = self.client_patch(
"/json/users/me/profile_data",
{"data": orjson.dumps([{"id": f["id"], "value": f["value"]} for f in data]).decode()},
)
self.assert_json_success(result)
iago = self.example_user("iago")
expected_value = {f["id"]: f["value"] for f in data}
expected_rendered_value: Dict[Union[int, float, str, None], Union[str, None]] = {}
for f in data:
if f["field"].is_renderable():
expected_rendered_value[f["id"]] = markdown_convert(f["value"]).rendered_content
else:
expected_rendered_value[f["id"]] = None
for field_dict in iago.profile_data:
self.assertEqual(field_dict["value"], expected_value[field_dict["id"]])
self.assertEqual(
field_dict["rendered_value"], expected_rendered_value[field_dict["id"]]
)
for k in ["id", "type", "name", "field_data"]:
self.assertIn(k, field_dict)
# Update value of one field.
field = CustomProfileField.objects.get(name="Biography", realm=realm)
data = [
{
"id": field.id,
"value": "foobar",
}
]
result = self.client_patch(
"/json/users/me/profile_data", {"data": orjson.dumps(data).decode()}
)
self.assert_json_success(result)
for field_dict in iago.profile_data:
if field_dict["id"] == field.id:
self.assertEqual(field_dict["value"], "foobar")
def test_update_invalid_select_field(self) -> None:
field_name = "Favorite editor"
self.assert_error_update_invalid_value(
field_name, "foobar", f"'foobar' is not a valid choice for '{field_name}'."
)
def test_update_select_field_successfully(self) -> None:
self.login("iago")
realm = get_realm("zulip")
field = CustomProfileField.objects.get(name="Favorite editor", realm=realm)
data = [
{
"id": field.id,
"value": "emacs",
}
]
result = self.client_patch(
"/json/users/me/profile_data", {"data": orjson.dumps(data).decode()}
)
self.assert_json_success(result)
def test_null_value_and_rendered_value(self) -> None:
self.login("iago")
realm = get_realm("zulip")
quote = try_add_realm_custom_profile_field(
realm=realm,
name="Quote",
hint="Saying or phrase which you known for.",
field_type=CustomProfileField.SHORT_TEXT,
)
iago = self.example_user("iago")
iago_profile_quote = iago.profile_data[-1]
value = iago_profile_quote["value"]
rendered_value = iago_profile_quote["rendered_value"]
self.assertIsNone(value)
self.assertIsNone(rendered_value)
update_dict: Dict[str, Union[int, str, List[int]]] = {
"id": quote.id,
"value": "***beware*** of jealousy...",
}
do_update_user_custom_profile_data_if_changed(iago, [update_dict])
iago_profile_quote = self.example_user("iago").profile_data[-1]
value = iago_profile_quote["value"]
rendered_value = iago_profile_quote["rendered_value"]
self.assertIsNotNone(value)
self.assertIsNotNone(rendered_value)
self.assertEqual("<p><strong><em>beware</em></strong> of jealousy...</p>", rendered_value)
def test_do_update_value_not_changed(self) -> None:
iago = self.example_user("iago")
self.login_user(iago)
realm = get_realm("zulip")
# Set field value:
field = CustomProfileField.objects.get(name="Mentor", realm=realm)
data: List[Dict[str, Union[int, str, List[int]]]] = [
{"id": field.id, "value": [self.example_user("aaron").id]},
]
do_update_user_custom_profile_data_if_changed(iago, data)
with mock.patch("zerver.lib.actions.notify_user_update_custom_profile_data") as mock_notify:
# Attempting to "update" the field value, when it wouldn't actually change,
# shouldn't trigger notify.
do_update_user_custom_profile_data_if_changed(iago, data)
mock_notify.assert_not_called()
class ListCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_list(self) -> None:
self.login("iago")
result = self.client_get("/json/realm/profile_fields")
self.assert_json_success(result)
self.assertEqual(200, result.status_code)
content = result.json()
self.assert_length(content["custom_fields"], self.original_count)
def test_list_order(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = (
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
try_reorder_realm_custom_profile_fields(realm, order)
result = self.client_get("/json/realm/profile_fields")
content = result.json()
self.assertListEqual(
content["custom_fields"], sorted(content["custom_fields"], key=lambda x: -x["id"])
)
def test_get_custom_profile_fields_from_api(self) -> None:
iago = self.example_user("iago")
test_bot = self.create_test_bot("foo-bot", iago)
self.login_user(iago)
assert test_bot
with queries_captured() as queries:
response = self.client_get(
"/json/users", {"client_gravatar": "false", "include_custom_profile_fields": "true"}
)
self.assert_length(queries, 4)
self.assertEqual(response.status_code, 200)
raw_users_data = response.json()["members"]
iago_raw_data = None
test_bot_raw_data = None
for user_dict in raw_users_data:
if user_dict["user_id"] == iago.id:
iago_raw_data = user_dict
continue
if user_dict["user_id"] == test_bot.id:
test_bot_raw_data = user_dict
continue
if (not iago_raw_data) or (not test_bot_raw_data):
raise AssertionError("Could not find required data from the response.")
expected_keys_for_iago = {
"delivery_email",
"email",
"user_id",
"avatar_url",
"avatar_version",
"is_admin",
"is_guest",
"is_billing_admin",
"is_bot",
"is_owner",
"role",
"full_name",
"timezone",
"is_active",
"date_joined",
"profile_data",
}
self.assertEqual(set(iago_raw_data.keys()), expected_keys_for_iago)
self.assertNotEqual(iago_raw_data["profile_data"], {})
expected_keys_for_test_bot = {
"delivery_email",
"email",
"user_id",
"avatar_url",
"avatar_version",
"is_admin",
"is_guest",
"is_bot",
"is_owner",
"is_billing_admin",
"role",
"full_name",
"timezone",
"is_active",
"date_joined",
"bot_type",
"bot_owner_id",
}
self.assertEqual(set(test_bot_raw_data.keys()), expected_keys_for_test_bot)
self.assertEqual(test_bot_raw_data["bot_type"], 1)
self.assertEqual(test_bot_raw_data["bot_owner_id"], iago_raw_data["user_id"])
response = self.client_get("/json/users", {"client_gravatar": "false"})
self.assertEqual(response.status_code, 200)
raw_users_data = response.json()["members"]
for user_dict in raw_users_data:
with self.assertRaises(KeyError):
user_dict["profile_data"]
def test_get_custom_profile_fields_from_api_for_single_user(self) -> None:
self.login("iago")
expected_keys = {
"result",
"msg",
"max_message_id",
"user_id",
"avatar_url",
"full_name",
"email",
"is_bot",
"is_admin",
"is_owner",
"is_billing_admin",
"role",
"profile_data",
"avatar_version",
"timezone",
"delivery_email",
"is_active",
"is_guest",
"date_joined",
}
url = "/json/users/me"
response = self.client_get(url)
self.assertEqual(response.status_code, 200)
raw_user_data = response.json()
self.assertEqual(set(raw_user_data.keys()), expected_keys)
class ReorderCustomProfileFieldTest(CustomProfileFieldTestCase):
def test_reorder(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_success(result)
fields = CustomProfileField.objects.filter(realm=realm).order_by("order")
for field in fields:
self.assertEqual(field.id, order[field.order])
def test_reorder_duplicates(self) -> None:
self.login("iago")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
order.append(4)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_success(result)
fields = CustomProfileField.objects.filter(realm=realm).order_by("order")
for field in fields:
self.assertEqual(field.id, order[field.order])
def test_reorder_unauthorized(self) -> None:
self.login("hamlet")
realm = get_realm("zulip")
order = list(
CustomProfileField.objects.filter(realm=realm)
.order_by("-order")
.values_list("order", flat=True)
)
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Must be an organization administrator")
def test_reorder_invalid(self) -> None:
self.login("iago")
order = [100, 200, 300]
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Invalid order mapping.")
order = [1, 2]
result = self.client_patch(
"/json/realm/profile_fields", info={"order": orjson.dumps(order).decode()}
)
self.assert_json_error(result, "Invalid order mapping.")
|
punchagan/zulip
|
zerver/tests/test_custom_profile_data.py
|
Python
|
apache-2.0
| 35,581
|
# -*- coding: utf-8 -*-
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Accesses the google.devtools.containeranalysis.v1 ContainerAnalysis API."""
import pkg_resources
import warnings
from google.oauth2 import service_account
import google.api_core.client_options
import google.api_core.gapic_v1.client_info
import google.api_core.gapic_v1.config
import google.api_core.gapic_v1.method
import google.api_core.gapic_v1.routing_header
import google.api_core.grpc_helpers
import google.api_core.path_template
import grpc
from google.cloud.devtools.containeranalysis_v1.gapic import (
container_analysis_client_config,
)
from google.cloud.devtools.containeranalysis_v1.gapic.transports import (
container_analysis_grpc_transport,
)
from google.cloud.devtools.containeranalysis_v1.proto import containeranalysis_pb2_grpc
from google.iam.v1 import iam_policy_pb2
from google.iam.v1 import options_pb2
from google.iam.v1 import policy_pb2
from grafeas import grafeas_v1
from grafeas.grafeas_v1.gapic.transports import grafeas_grpc_transport
_GAPIC_LIBRARY_VERSION = pkg_resources.get_distribution(
"google-cloud-containeranalysis"
).version
class ContainerAnalysisClient(object):
"""
Retrieves analysis results of Cloud components such as Docker container
images. The Container Analysis API is an implementation of the
`Grafeas <https://grafeas.io>`__ API.
Analysis results are stored as a series of occurrences. An
``Occurrence`` contains information about a specific analysis instance
on a resource. An occurrence refers to a ``Note``. A note contains
details describing the analysis and is generally stored in a separate
project, called a ``Provider``. Multiple occurrences can refer to the
same note.
For example, an SSL vulnerability could affect multiple images. In this
case, there would be one note for the vulnerability and an occurrence
for each image with the vulnerability referring to that note.
"""
SERVICE_ADDRESS = "containeranalysis.googleapis.com:443"
"""The default address of the service."""
# The name of the interface for this client. This is the key used to
# find the method configuration in the client_config dictionary.
_INTERFACE_NAME = "google.devtools.containeranalysis.v1.ContainerAnalysis"
@classmethod
def from_service_account_file(cls, filename, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ContainerAnalysisClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@classmethod
def note_path(cls, project, note):
"""Return a fully-qualified note string."""
return google.api_core.path_template.expand(
"projects/{project}/notes/{note}", project=project, note=note
)
@classmethod
def occurrence_path(cls, project, occurrence):
"""Return a fully-qualified occurrence string."""
return google.api_core.path_template.expand(
"projects/{project}/occurrences/{occurrence}",
project=project,
occurrence=occurrence,
)
def __init__(
self,
transport=None,
channel=None,
credentials=None,
client_config=None,
client_info=None,
client_options=None,
):
"""Constructor.
Args:
transport (Union[~.ContainerAnalysisGrpcTransport,
Callable[[~.Credentials, type], ~.ContainerAnalysisGrpcTransport]): A transport
instance, responsible for actually making the API calls.
The default transport uses the gRPC protocol.
This argument may also be a callable which returns a
transport instance. Callables will be sent the credentials
as the first argument and the default transport class as
the second argument.
channel (grpc.Channel): DEPRECATED. A ``Channel`` instance
through which to make calls. This argument is mutually exclusive
with ``credentials``; providing both will raise an exception.
credentials (google.auth.credentials.Credentials): The
authorization credentials to attach to requests. These
credentials identify this application to the service. If none
are specified, the client will attempt to ascertain the
credentials from the environment.
This argument is mutually exclusive with providing a
transport instance to ``transport``; doing so will raise
an exception.
client_config (dict): DEPRECATED. A dictionary of call options for
each method. If not specified, the default configuration is used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
client_options (Union[dict, google.api_core.client_options.ClientOptions]):
Client options used to set user options on the client. API Endpoint
should be set through client_options.
"""
# Raise deprecation warnings for things we want to go away.
if client_config is not None:
warnings.warn(
"The `client_config` argument is deprecated.",
PendingDeprecationWarning,
stacklevel=2,
)
else:
client_config = container_analysis_client_config.config
if channel:
warnings.warn(
"The `channel` argument is deprecated; use " "`transport` instead.",
PendingDeprecationWarning,
stacklevel=2,
)
api_endpoint = self.SERVICE_ADDRESS
if client_options:
if type(client_options) == dict:
client_options = google.api_core.client_options.from_dict(
client_options
)
if client_options.api_endpoint:
api_endpoint = client_options.api_endpoint
# Instantiate the transport.
# The transport is responsible for handling serialization and
# deserialization and actually sending data to the service.
if transport:
if callable(transport):
self.transport = transport(
credentials=credentials,
default_class=container_analysis_grpc_transport.ContainerAnalysisGrpcTransport,
address=api_endpoint,
)
else:
if credentials:
raise ValueError(
"Received both a transport instance and "
"credentials; these are mutually exclusive."
)
self.transport = transport
else:
self.transport = container_analysis_grpc_transport.ContainerAnalysisGrpcTransport(
address=api_endpoint, channel=channel, credentials=credentials
)
if client_info is None:
client_info = google.api_core.gapic_v1.client_info.ClientInfo(
gapic_version=_GAPIC_LIBRARY_VERSION
)
else:
client_info.gapic_version = _GAPIC_LIBRARY_VERSION
self._client_info = client_info
# Parse out the default settings for retry and timeout for each RPC
# from the client configuration.
# (Ordinarily, these are the defaults specified in the `*_config.py`
# file next to this one.)
self._method_configs = google.api_core.gapic_v1.config.parse_method_configs(
client_config["interfaces"][self._INTERFACE_NAME]
)
# Save a dictionary of cached API call functions.
# These are the actual callables which invoke the proper
# transport methods, wrapped with `wrap_method` to add retry,
# timeout, and the like.
self._inner_api_calls = {}
def get_grafeas_client(self):
"""Returns an equivalent grafeas client.
Returns:
A :class:`~grafeas.grafeas_v1.GrafeasClient` instance.
"""
grafeas_transport = grafeas_grpc_transport.GrafeasGrpcTransport(
self.SERVICE_ADDRESS, self.transport._OAUTH_SCOPES
)
return grafeas_v1.GrafeasClient(grafeas_transport)
# Service calls
def set_iam_policy(
self,
resource,
policy,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Sets the access control policy on the specified note or occurrence.
Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or an occurrence, respectively.
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> # TODO: Initialize `policy`:
>>> policy = {}
>>>
>>> response = client.set_iam_policy(resource, policy)
Args:
resource (str): REQUIRED: The resource for which the policy is being specified.
See the operation documentation for the appropriate value for this field.
policy (Union[dict, ~google.cloud.devtools.containeranalysis_v1.types.Policy]): REQUIRED: The complete policy to be applied to the ``resource``. The
size of the policy is limited to a few 10s of KB. An empty policy is a
valid policy but certain Cloud Platform services (such as Projects)
might reject them.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "set_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"set_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.set_iam_policy,
default_retry=self._method_configs["SetIamPolicy"].retry,
default_timeout=self._method_configs["SetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.SetIamPolicyRequest(resource=resource, policy=policy)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["set_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def get_iam_policy(
self,
resource,
options_=None,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the access control policy for a note or an occurrence resource.
Requires ``containeranalysis.notes.setIamPolicy`` or
``containeranalysis.occurrences.setIamPolicy`` permission if the
resource is a note or occurrence, respectively.
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> response = client.get_iam_policy(resource)
Args:
resource (str): REQUIRED: The resource for which the policy is being requested.
See the operation documentation for the appropriate value for this field.
options_ (Union[dict, ~google.cloud.devtools.containeranalysis_v1.types.GetPolicyOptions]): OPTIONAL: A ``GetPolicyOptions`` object for specifying options to
``GetIamPolicy``. This field is only used by Cloud IAM.
If a dict is provided, it must be of the same form as the protobuf
message :class:`~google.cloud.devtools.containeranalysis_v1.types.GetPolicyOptions`
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.Policy` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_iam_policy" not in self._inner_api_calls:
self._inner_api_calls[
"get_iam_policy"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_iam_policy,
default_retry=self._method_configs["GetIamPolicy"].retry,
default_timeout=self._method_configs["GetIamPolicy"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.GetIamPolicyRequest(
resource=resource, options=options_
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_iam_policy"](
request, retry=retry, timeout=timeout, metadata=metadata
)
def test_iam_permissions(
self,
resource,
permissions,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Returns the permissions that a caller has on the specified note or
occurrence. Requires list permission on the project (for example,
``containeranalysis.notes.list``).
The resource takes the format ``projects/[PROJECT_ID]/notes/[NOTE_ID]``
for notes and ``projects/[PROJECT_ID]/occurrences/[OCCURRENCE_ID]`` for
occurrences.
Example:
>>> from google.cloud.devtools import containeranalysis_v1
>>>
>>> client = containeranalysis_v1.ContainerAnalysisClient()
>>>
>>> resource = client.note_path('[PROJECT]', '[NOTE]')
>>>
>>> # TODO: Initialize `permissions`:
>>> permissions = []
>>>
>>> response = client.test_iam_permissions(resource, permissions)
Args:
resource (str): REQUIRED: The resource for which the policy detail is being requested.
See the operation documentation for the appropriate value for this field.
permissions (list[str]): The set of permissions to check for the ``resource``. Permissions with
wildcards (such as '*' or 'storage.*') are not allowed. For more
information see `IAM
Overview <https://cloud.google.com/iam/docs/overview#permissions>`__.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will
be retried using a default configuration.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.devtools.containeranalysis_v1.types.TestIamPermissionsResponse` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "test_iam_permissions" not in self._inner_api_calls:
self._inner_api_calls[
"test_iam_permissions"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.test_iam_permissions,
default_retry=self._method_configs["TestIamPermissions"].retry,
default_timeout=self._method_configs["TestIamPermissions"].timeout,
client_info=self._client_info,
)
request = iam_policy_pb2.TestIamPermissionsRequest(
resource=resource, permissions=permissions
)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("resource", resource)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["test_iam_permissions"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
tseaver/google-cloud-python
|
containeranalysis/google/cloud/devtools/containeranalysis_v1/gapic/container_analysis_client.py
|
Python
|
apache-2.0
| 21,328
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
def load_pem_traditional_openssl_private_key(data, password, backend):
return backend.load_traditional_openssl_pem_private_key(
data, password
)
def load_pem_pkcs8_private_key(data, password, backend):
return backend.load_pkcs8_pem_private_key(
data, password
)
|
fkolacek/FIT-VUT
|
bp-revok/python/lib/python2.7/site-packages/cryptography-0.5.2-py2.7-linux-x86_64.egg/cryptography/hazmat/primitives/serialization.py
|
Python
|
apache-2.0
| 909
|
#!/usr/bin/python
"""
Copyright 2014 The Trustees of Princeton University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import base64
from Crypto.PublicKey import RSA as CryptoKey
import logging
from logging import Logger
logging.basicConfig( format='[%(levelname)s] [%(module)s:%(lineno)d] %(message)s' )
logger = logging.getLogger()
logger.setLevel( logging.INFO )
import syndicate.syndicate as c_syndicate
#-------------------------------
def encrypt_slice_secret( observer_pkey_pem, slice_secret ):
"""
Encrypt and serialize the slice secret with the Observer private key
"""
# get the public key
try:
observer_pubkey_pem = CryptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
except Exception, e:
logger.exception(e)
logger.error("Failed to derive public key from private key")
return None
# encrypt the data
rc, sealed_slice_secret = c_syndicate.encrypt_data( observer_pkey_pem, observer_pubkey_pem, slice_secret )
if rc != 0:
logger.error("Failed to encrypt slice secret")
return None
sealed_slice_secret_b64 = base64.b64encode( sealed_slice_secret )
return sealed_slice_secret_b64
#-------------------------------
def decrypt_slice_secret( observer_pkey_pem, sealed_slice_secret_b64 ):
"""
Unserialize and decrypt a slice secret
"""
# get the public key
try:
observer_pubkey_pem = CryptoKey.importKey( observer_pkey_pem ).publickey().exportKey()
except Exception, e:
logger.exception(e)
logger.error("Failed to derive public key from private key")
return None
sealed_slice_secret = base64.b64decode( sealed_slice_secret_b64 )
# decrypt it
rc, slice_secret = c_syndicate.decrypt_data( observer_pubkey_pem, observer_pkey_pem, sealed_slice_secret )
if rc != 0:
logger.error("Failed to decrypt '%s', rc = %d" % (sealed_slice_secret_b64, rc))
return None
return slice_secret
|
jcnelson/syndicate
|
python/syndicate/observer/storage/common.py
|
Python
|
apache-2.0
| 2,548
|
#!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from resource_management.core.logger import Logger
def setup_ranger_hbase(upgrade_type=None, service_name="hbase-master"):
import params
if params.enable_ranger_hbase:
stack_version = None
if upgrade_type is not None:
stack_version = params.version
if params.retryAble:
Logger.info("HBase: Setup ranger: command retry enables thus retrying if ranger admin is down !")
else:
Logger.info("HBase: Setup ranger: command retry not enabled thus skipping if ranger admin is down !")
if params.xml_configurations_supported and params.enable_ranger_hbase and params.xa_audit_hdfs_is_enabled and service_name == 'hbase-master' :
params.HdfsResource("/ranger/audit",
type="directory",
action="create_on_execute",
owner=params.hdfs_user,
group=params.hdfs_user,
mode=0755,
recursive_chmod=True
)
params.HdfsResource("/ranger/audit/hbaseMaster",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True
)
params.HdfsResource("/ranger/audit/hbaseRegional",
type="directory",
action="create_on_execute",
owner=params.hbase_user,
group=params.hbase_user,
mode=0700,
recursive_chmod=True
)
params.HdfsResource(None, action="execute")
if params.xml_configurations_supported:
api_version=None
if params.stack_supports_ranger_kerberos:
api_version='v2'
from resource_management.libraries.functions.adh_setup_ranger_plugin_xml import setup_ranger_plugin
setup_ranger_plugin('hbase', 'hbase', params.previous_jdbc_jar, params.downloaded_custom_connector,
params.driver_curl_source, params.driver_curl_target, params.java64_home,
params.repo_name, params.hbase_ranger_plugin_repo,
params.ranger_env, params.ranger_plugin_properties,
params.policy_user, params.policymgr_mgr_url,
params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
component_list=['hbase'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble, api_version=api_version,
is_security_enabled = params.security_enabled,
is_stack_supports_ranger_kerberos = params.stack_supports_ranger_kerberos if params.security_enabled else None,
component_user_principal=params.ranger_hbase_principal if params.security_enabled else None,
component_user_keytab=params.ranger_hbase_keytab if params.security_enabled else None)
else:
from resource_management.libraries.functions.adh_setup_ranger_plugin import setup_ranger_plugin
setup_ranger_plugin('hbase', 'hbase', params.previous_jdbc_jar,
params.downloaded_custom_connector, params.driver_curl_source,
params.driver_curl_target, params.java64_home,
params.repo_name, params.hbase_ranger_plugin_repo,
params.ranger_env, params.ranger_plugin_properties,
params.policy_user, params.policymgr_mgr_url,
params.enable_ranger_hbase, conf_dict=params.hbase_conf_dir,
component_user=params.hbase_user, component_group=params.user_group, cache_service_list=['hbaseMaster', 'hbaseRegional'],
plugin_audit_properties=params.config['configurations']['ranger-hbase-audit'], plugin_audit_attributes=params.config['configuration_attributes']['ranger-hbase-audit'],
plugin_security_properties=params.config['configurations']['ranger-hbase-security'], plugin_security_attributes=params.config['configuration_attributes']['ranger-hbase-security'],
plugin_policymgr_ssl_properties=params.config['configurations']['ranger-hbase-policymgr-ssl'], plugin_policymgr_ssl_attributes=params.config['configuration_attributes']['ranger-hbase-policymgr-ssl'],
component_list=['hbase'], audit_db_is_enabled=params.xa_audit_db_is_enabled,
credential_file=params.credential_file, xa_audit_db_password=params.xa_audit_db_password,
ssl_truststore_password=params.ssl_truststore_password, ssl_keystore_password=params.ssl_keystore_password,
stack_version_override = stack_version, skip_if_rangeradmin_down= not params.retryAble)
else:
Logger.info('Ranger HBase plugin is not enabled')
|
arenadata/ambari
|
ambari-server/src/main/resources/stacks/ADH/1.6/services/HBASE/package/scripts/setup_ranger_hbase.py
|
Python
|
apache-2.0
| 6,925
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Fake RPC implementation which calls proxy methods directly with no
queues. Casts will block, but this is very useful for tests.
"""
import inspect
# NOTE(russellb): We specifically want to use json, not our own jsonutils.
# jsonutils has some extra logic to automatically convert objects to primitive
# types so that they can be serialized. We want to catch all cases where
# non-primitive types make it into this code and treat it as an error.
import json
import time
import eventlet
from ceilometer.openstack.common.rpc import common as rpc_common
CONSUMERS = {}
class RpcContext(rpc_common.CommonRpcContext):
def __init__(self, **kwargs):
super(RpcContext, self).__init__(**kwargs)
self._response = []
self._done = False
def deepcopy(self):
values = self.to_dict()
new_inst = self.__class__(**values)
new_inst._response = self._response
new_inst._done = self._done
return new_inst
def reply(self, reply=None, failure=None, ending=False):
if ending:
self._done = True
if not self._done:
self._response.append((reply, failure))
class Consumer(object):
def __init__(self, topic, proxy):
self.topic = topic
self.proxy = proxy
def call(self, context, version, method, args, timeout):
done = eventlet.event.Event()
def _inner():
ctxt = RpcContext.from_dict(context.to_dict())
try:
rval = self.proxy.dispatch(context, version, method, **args)
res = []
# Caller might have called ctxt.reply() manually
for (reply, failure) in ctxt._response:
if failure:
raise failure[0], failure[1], failure[2]
res.append(reply)
# if ending not 'sent'...we might have more data to
# return from the function itself
if not ctxt._done:
if inspect.isgenerator(rval):
for val in rval:
res.append(val)
else:
res.append(rval)
done.send(res)
except rpc_common.ClientException as e:
done.send_exception(e._exc_info[1])
except Exception as e:
done.send_exception(e)
thread = eventlet.greenthread.spawn(_inner)
if timeout:
start_time = time.time()
while not done.ready():
eventlet.greenthread.sleep(1)
cur_time = time.time()
if (cur_time - start_time) > timeout:
thread.kill()
raise rpc_common.Timeout()
return done.wait()
class Connection(object):
"""Connection object."""
def __init__(self):
self.consumers = []
def create_consumer(self, topic, proxy, fanout=False):
consumer = Consumer(topic, proxy)
self.consumers.append(consumer)
if topic not in CONSUMERS:
CONSUMERS[topic] = []
CONSUMERS[topic].append(consumer)
def close(self):
for consumer in self.consumers:
CONSUMERS[consumer.topic].remove(consumer)
self.consumers = []
def consume_in_thread(self):
pass
def create_connection(conf, new=True):
"""Create a connection"""
return Connection()
def check_serialize(msg):
"""Make sure a message intended for rpc can be serialized."""
json.dumps(msg)
def multicall(conf, context, topic, msg, timeout=None):
"""Make a call that returns multiple times."""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
try:
consumer = CONSUMERS[topic][0]
except (KeyError, IndexError):
return iter([None])
else:
return consumer.call(context, version, method, args, timeout)
def call(conf, context, topic, msg, timeout=None):
"""Sends a message on a topic and wait for a response."""
rv = multicall(conf, context, topic, msg, timeout)
# NOTE(vish): return the last result from the multicall
rv = list(rv)
if not rv:
return
return rv[-1]
def cast(conf, context, topic, msg):
check_serialize(msg)
try:
call(conf, context, topic, msg)
except Exception:
pass
def notify(conf, context, topic, msg, envelope):
check_serialize(msg)
def cleanup():
pass
def fanout_cast(conf, context, topic, msg):
"""Cast to all consumers of a topic"""
check_serialize(msg)
method = msg.get('method')
if not method:
return
args = msg.get('args', {})
version = msg.get('version', None)
for consumer in CONSUMERS.get(topic, []):
try:
consumer.call(context, version, method, args, None)
except Exception:
pass
|
dreamhost/ceilometer
|
ceilometer/openstack/common/rpc/impl_fake.py
|
Python
|
apache-2.0
| 5,641
|
# -*- coding: utf-8 -*-
"""
pygments.lexers.web
~~~~~~~~~~~~~~~~~~~
Lexers for web-related languages and markup.
:copyright: Copyright 2006-2010 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import RegexLexer, bygroups, using, include, this
from pygments.token import \
Text, Comment, Operator, Keyword, Name, String, Number, Other, Punctuation
from pygments.util import get_bool_opt, get_list_opt, looks_like_xml, \
html_doctype_matches
__all__ = ['HtmlLexer', 'XmlLexer', 'JavascriptLexer', 'CssLexer',
'PhpLexer', 'ActionScriptLexer', 'XsltLexer', 'ActionScript3Lexer',
'MxmlLexer']
class JavascriptLexer(RegexLexer):
"""
For JavaScript source code.
"""
name = 'JavaScript'
aliases = ['js', 'javascript']
filenames = ['*.js']
mimetypes = ['application/x-javascript', 'text/x-javascript', 'text/javascript']
flags = re.DOTALL
tokens = {
'commentsandwhitespace': [
(r'\s+', Text),
(r'<!--', Comment),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline)
],
'slashstartsregex': [
include('commentsandwhitespace'),
(r'/(\\.|[^[/\\\n]|\[(\\.|[^\]\\\n])*])+/'
r'([gim]+\b|\B)', String.Regex, '#pop'),
(r'(?=/)', Text, ('#pop', 'badregex')),
(r'', Text, '#pop')
],
'badregex': [
('\n', Text, '#pop')
],
'root': [
(r'^(?=\s|/|<!--)', Text, 'slashstartsregex'),
include('commentsandwhitespace'),
(r'\+\+|--|~|&&|\?|:|\|\||\\(?=\n)|'
r'(<<|>>>?|==?|!=?|[-<>+*%&\|\^/])=?', Operator, 'slashstartsregex'),
(r'[{(\[;,]', Punctuation, 'slashstartsregex'),
(r'[})\].]', Punctuation),
(r'(for|in|while|do|break|return|continue|switch|case|default|if|else|'
r'throw|try|catch|finally|new|delete|typeof|instanceof|void|'
r'this)\b', Keyword, 'slashstartsregex'),
(r'(var|with|function)\b', Keyword.Declaration, 'slashstartsregex'),
(r'(abstract|boolean|byte|char|class|const|debugger|double|enum|export|'
r'extends|final|float|goto|implements|import|int|interface|long|native|'
r'package|private|protected|public|short|static|super|synchronized|throws|'
r'transient|volatile)\b', Keyword.Reserved),
(r'(true|false|null|NaN|Infinity|undefined)\b', Keyword.Constant),
(r'(Array|Boolean|Date|Error|Function|Math|netscape|'
r'Number|Object|Packages|RegExp|String|sun|decodeURI|'
r'decodeURIComponent|encodeURI|encodeURIComponent|'
r'Error|eval|isFinite|isNaN|parseFloat|parseInt|document|this|'
r'window)\b', Name.Builtin),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-fA-F]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
class ActionScriptLexer(RegexLexer):
"""
For ActionScript source code.
*New in Pygments 0.9.*
"""
name = 'ActionScript'
aliases = ['as', 'actionscript']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
flags = re.DOTALL
tokens = {
'root': [
(r'\s+', Text),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^/\n])*/[gim]*', String.Regex),
(r'[~\^\*!%&<>\|+=:;,/?\\-]+', Operator),
(r'[{}\[\]();.]+', Punctuation),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|var|with|new|typeof|arguments|instanceof|this|'
r'switch)\b', Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|Void)\b',
Keyword.Constant),
(r'(Accessibility|AccessibilityProperties|ActionScriptVersion|'
r'ActivityEvent|AntiAliasType|ApplicationDomain|AsBroadcaster|Array|'
r'AsyncErrorEvent|AVM1Movie|BevelFilter|Bitmap|BitmapData|'
r'BitmapDataChannel|BitmapFilter|BitmapFilterQuality|BitmapFilterType|'
r'BlendMode|BlurFilter|Boolean|ByteArray|Camera|Capabilities|CapsStyle|'
r'Class|Color|ColorMatrixFilter|ColorTransform|ContextMenu|'
r'ContextMenuBuiltInItems|ContextMenuEvent|ContextMenuItem|'
r'ConvultionFilter|CSMSettings|DataEvent|Date|DefinitionError|'
r'DeleteObjectSample|Dictionary|DisplacmentMapFilter|DisplayObject|'
r'DisplacmentMapFilterMode|DisplayObjectContainer|DropShadowFilter|'
r'Endian|EOFError|Error|ErrorEvent|EvalError|Event|EventDispatcher|'
r'EventPhase|ExternalInterface|FileFilter|FileReference|'
r'FileReferenceList|FocusDirection|FocusEvent|Font|FontStyle|FontType|'
r'FrameLabel|FullScreenEvent|Function|GlowFilter|GradientBevelFilter|'
r'GradientGlowFilter|GradientType|Graphics|GridFitType|HTTPStatusEvent|'
r'IBitmapDrawable|ID3Info|IDataInput|IDataOutput|IDynamicPropertyOutput'
r'IDynamicPropertyWriter|IEventDispatcher|IExternalizable|'
r'IllegalOperationError|IME|IMEConversionMode|IMEEvent|int|'
r'InteractiveObject|InterpolationMethod|InvalidSWFError|InvokeEvent|'
r'IOError|IOErrorEvent|JointStyle|Key|Keyboard|KeyboardEvent|KeyLocation|'
r'LineScaleMode|Loader|LoaderContext|LoaderInfo|LoadVars|LocalConnection|'
r'Locale|Math|Matrix|MemoryError|Microphone|MorphShape|Mouse|MouseEvent|'
r'MovieClip|MovieClipLoader|Namespace|NetConnection|NetStatusEvent|'
r'NetStream|NewObjectSample|Number|Object|ObjectEncoding|PixelSnapping|'
r'Point|PrintJob|PrintJobOptions|PrintJobOrientation|ProgressEvent|Proxy|'
r'QName|RangeError|Rectangle|ReferenceError|RegExp|Responder|Sample|Scene|'
r'ScriptTimeoutError|Security|SecurityDomain|SecurityError|'
r'SecurityErrorEvent|SecurityPanel|Selection|Shape|SharedObject|'
r'SharedObjectFlushStatus|SimpleButton|Socket|Sound|SoundChannel|'
r'SoundLoaderContext|SoundMixer|SoundTransform|SpreadMethod|Sprite|'
r'StackFrame|StackOverflowError|Stage|StageAlign|StageDisplayState|'
r'StageQuality|StageScaleMode|StaticText|StatusEvent|String|StyleSheet|'
r'SWFVersion|SyncEvent|SyntaxError|System|TextColorType|TextField|'
r'TextFieldAutoSize|TextFieldType|TextFormat|TextFormatAlign|'
r'TextLineMetrics|TextRenderer|TextSnapshot|Timer|TimerEvent|Transform|'
r'TypeError|uint|URIError|URLLoader|URLLoaderDataFormat|URLRequest|'
r'URLRequestHeader|URLRequestMethod|URLStream|URLVariabeles|VerifyError|'
r'Video|XML|XMLDocument|XMLList|XMLNode|XMLNodeType|XMLSocket|XMLUI)\b',
Name.Builtin),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b',Name.Function),
(r'[$a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
]
}
def analyse_text(text):
return 0.05
class ActionScript3Lexer(RegexLexer):
"""
For ActionScript 3 source code.
*New in Pygments 0.11.*
"""
name = 'ActionScript 3'
aliases = ['as3', 'actionscript3']
filenames = ['*.as']
mimetypes = ['application/x-actionscript', 'text/x-actionscript',
'text/actionscript']
identifier = r'[$a-zA-Z_][a-zA-Z0-9_]*'
flags = re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'\s+', Text),
(r'(function\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword.Declaration, Name.Function, Text, Operator),
'funcparams'),
(r'(var|const)(\s+)(' + identifier + r')(\s*)(:)(\s*)(' + identifier + r')',
bygroups(Keyword.Declaration, Text, Name, Text, Punctuation, Text,
Keyword.Type)),
(r'(import|package)(\s+)((?:' + identifier + r'|\.)+)(\s*)',
bygroups(Keyword, Text, Name.Namespace, Text)),
(r'(new)(\s+)(' + identifier + r')(\s*)(\()',
bygroups(Keyword, Text, Keyword.Type, Text, Operator)),
(r'//.*?\n', Comment.Single),
(r'/\*.*?\*/', Comment.Multiline),
(r'/(\\\\|\\/|[^\n])*/[gisx]*', String.Regex),
(r'(\.)(' + identifier + r')', bygroups(Operator, Name.Attribute)),
(r'(case|default|for|each|in|while|do|break|return|continue|if|else|'
r'throw|try|catch|with|new|typeof|arguments|instanceof|this|'
r'switch|import|include|as|is)\b',
Keyword),
(r'(class|public|final|internal|native|override|private|protected|'
r'static|import|extends|implements|interface|intrinsic|return|super|'
r'dynamic|function|const|get|namespace|package|set)\b',
Keyword.Declaration),
(r'(true|false|null|NaN|Infinity|-Infinity|undefined|void)\b',
Keyword.Constant),
(r'(decodeURI|decodeURIComponent|encodeURI|escape|eval|isFinite|isNaN|'
r'isXMLName|clearInterval|fscommand|getTimer|getURL|getVersion|'
r'isFinite|parseFloat|parseInt|setInterval|trace|updateAfterEvent|'
r'unescape)\b', Name.Function),
(identifier, Name),
(r'[0-9][0-9]*\.[0-9]+([eE][0-9]+)?[fd]?', Number.Float),
(r'0x[0-9a-f]+', Number.Hex),
(r'[0-9]+', Number.Integer),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[~\^\*!%&<>\|+=:;,/?\\{}\[\]();.-]+', Operator),
],
'funcparams': [
(r'\s+', Text),
(r'(\s*)(\.\.\.)?(' + identifier + r')(\s*)(:)(\s*)(' +
identifier + r'|\*)(\s*)',
bygroups(Text, Punctuation, Name, Text, Operator, Text,
Keyword.Type, Text), 'defval'),
(r'\)', Operator, 'type')
],
'type': [
(r'(\s*)(:)(\s*)(' + identifier + r'|\*)',
bygroups(Text, Operator, Text, Keyword.Type), '#pop:2'),
(r'\s*', Text, '#pop:2')
],
'defval': [
(r'(=)(\s*)([^(),]+)(\s*)(,?)',
bygroups(Operator, Text, using(this), Text, Operator), '#pop'),
(r',?', Operator, '#pop')
]
}
def analyse_text(text):
if re.match(r'\w+\s*:\s*\w', text): return 0.3
return 0.1
class CssLexer(RegexLexer):
"""
For CSS (Cascading Style Sheets).
"""
name = 'CSS'
aliases = ['css']
filenames = ['*.css']
mimetypes = ['text/css']
tokens = {
'root': [
include('basics'),
],
'basics': [
(r'\s+', Text),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'{', Punctuation, 'content'),
(r'\:[a-zA-Z0-9_-]+', Name.Decorator),
(r'\.[a-zA-Z0-9_-]+', Name.Class),
(r'\#[a-zA-Z0-9_-]+', Name.Function),
(r'@[a-zA-Z0-9_-]+', Keyword, 'atrule'),
(r'[a-zA-Z0-9_-]+', Name.Tag),
(r'[~\^\*!%&\[\]\(\)<>\|+=@:;,./?-]', Operator),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single)
],
'atrule': [
(r'{', Punctuation, 'atcontent'),
(r';', Punctuation, '#pop'),
include('basics'),
],
'atcontent': [
include('basics'),
(r'}', Punctuation, '#pop:2'),
],
'content': [
(r'\s+', Text),
(r'}', Punctuation, '#pop'),
(r'url\(.*?\)', String.Other),
(r'^@.*?$', Comment.Preproc),
(r'(azimuth|background-attachment|background-color|'
r'background-image|background-position|background-repeat|'
r'background|border-bottom-color|border-bottom-style|'
r'border-bottom-width|border-left-color|border-left-style|'
r'border-left-width|border-right|border-right-color|'
r'border-right-style|border-right-width|border-top-color|'
r'border-top-style|border-top-width|border-bottom|'
r'border-collapse|border-left|border-width|border-color|'
r'border-spacing|border-style|border-top|border|caption-side|'
r'clear|clip|color|content|counter-increment|counter-reset|'
r'cue-after|cue-before|cue|cursor|direction|display|'
r'elevation|empty-cells|float|font-family|font-size|'
r'font-size-adjust|font-stretch|font-style|font-variant|'
r'font-weight|font|height|letter-spacing|line-height|'
r'list-style-type|list-style-image|list-style-position|'
r'list-style|margin-bottom|margin-left|margin-right|'
r'margin-top|margin|marker-offset|marks|max-height|max-width|'
r'min-height|min-width|opacity|orphans|outline|outline-color|'
r'outline-style|outline-width|overflow|padding-bottom|'
r'padding-left|padding-right|padding-top|padding|page|'
r'page-break-after|page-break-before|page-break-inside|'
r'pause-after|pause-before|pause|pitch|pitch-range|'
r'play-during|position|quotes|richness|right|size|'
r'speak-header|speak-numeral|speak-punctuation|speak|'
r'speech-rate|stress|table-layout|text-align|text-decoration|'
r'text-indent|text-shadow|text-transform|top|unicode-bidi|'
r'vertical-align|visibility|voice-family|volume|white-space|'
r'widows|width|word-spacing|z-index|bottom|left|'
r'above|absolute|always|armenian|aural|auto|avoid|baseline|'
r'behind|below|bidi-override|blink|block|bold|bolder|both|'
r'capitalize|center-left|center-right|center|circle|'
r'cjk-ideographic|close-quote|collapse|condensed|continuous|'
r'crop|crosshair|cross|cursive|dashed|decimal-leading-zero|'
r'decimal|default|digits|disc|dotted|double|e-resize|embed|'
r'extra-condensed|extra-expanded|expanded|fantasy|far-left|'
r'far-right|faster|fast|fixed|georgian|groove|hebrew|help|'
r'hidden|hide|higher|high|hiragana-iroha|hiragana|icon|'
r'inherit|inline-table|inline|inset|inside|invert|italic|'
r'justify|katakana-iroha|katakana|landscape|larger|large|'
r'left-side|leftwards|level|lighter|line-through|list-item|'
r'loud|lower-alpha|lower-greek|lower-roman|lowercase|ltr|'
r'lower|low|medium|message-box|middle|mix|monospace|'
r'n-resize|narrower|ne-resize|no-close-quote|no-open-quote|'
r'no-repeat|none|normal|nowrap|nw-resize|oblique|once|'
r'open-quote|outset|outside|overline|pointer|portrait|px|'
r'relative|repeat-x|repeat-y|repeat|rgb|ridge|right-side|'
r'rightwards|s-resize|sans-serif|scroll|se-resize|'
r'semi-condensed|semi-expanded|separate|serif|show|silent|'
r'slow|slower|small-caps|small-caption|smaller|soft|solid|'
r'spell-out|square|static|status-bar|super|sw-resize|'
r'table-caption|table-cell|table-column|table-column-group|'
r'table-footer-group|table-header-group|table-row|'
r'table-row-group|text|text-bottom|text-top|thick|thin|'
r'transparent|ultra-condensed|ultra-expanded|underline|'
r'upper-alpha|upper-latin|upper-roman|uppercase|url|'
r'visible|w-resize|wait|wider|x-fast|x-high|x-large|x-loud|'
r'x-low|x-small|x-soft|xx-large|xx-small|yes)\b', Keyword),
(r'(indigo|gold|firebrick|indianred|yellow|darkolivegreen|'
r'darkseagreen|mediumvioletred|mediumorchid|chartreuse|'
r'mediumslateblue|black|springgreen|crimson|lightsalmon|brown|'
r'turquoise|olivedrab|cyan|silver|skyblue|gray|darkturquoise|'
r'goldenrod|darkgreen|darkviolet|darkgray|lightpink|teal|'
r'darkmagenta|lightgoldenrodyellow|lavender|yellowgreen|thistle|'
r'violet|navy|orchid|blue|ghostwhite|honeydew|cornflowerblue|'
r'darkblue|darkkhaki|mediumpurple|cornsilk|red|bisque|slategray|'
r'darkcyan|khaki|wheat|deepskyblue|darkred|steelblue|aliceblue|'
r'gainsboro|mediumturquoise|floralwhite|coral|purple|lightgrey|'
r'lightcyan|darksalmon|beige|azure|lightsteelblue|oldlace|'
r'greenyellow|royalblue|lightseagreen|mistyrose|sienna|'
r'lightcoral|orangered|navajowhite|lime|palegreen|burlywood|'
r'seashell|mediumspringgreen|fuchsia|papayawhip|blanchedalmond|'
r'peru|aquamarine|white|darkslategray|ivory|dodgerblue|'
r'lemonchiffon|chocolate|orange|forestgreen|slateblue|olive|'
r'mintcream|antiquewhite|darkorange|cadetblue|moccasin|'
r'limegreen|saddlebrown|darkslateblue|lightskyblue|deeppink|'
r'plum|aqua|darkgoldenrod|maroon|sandybrown|magenta|tan|'
r'rosybrown|pink|lightblue|palevioletred|mediumseagreen|'
r'dimgray|powderblue|seagreen|snow|mediumblue|midnightblue|'
r'paleturquoise|palegoldenrod|whitesmoke|darkorchid|salmon|'
r'lightslategray|lawngreen|lightgreen|tomato|hotpink|'
r'lightyellow|lavenderblush|linen|mediumaquamarine|green|'
r'blueviolet|peachpuff)\b', Name.Builtin),
(r'\!important', Comment.Preproc),
(r'/\*(?:.|\n)*?\*/', Comment),
(r'\#[a-zA-Z0-9]{1,6}', Number),
(r'[\.-]?[0-9]*[\.]?[0-9]+(em|px|\%|pt|pc|in|mm|cm|ex)', Number),
(r'-?[0-9]+', Number),
(r'[~\^\*!%&<>\|+=@:,./?-]+', Operator),
(r'[\[\]();]+', Punctuation),
(r'"(\\\\|\\"|[^"])*"', String.Double),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r'[a-zA-Z][a-zA-Z0-9]+', Name)
]
}
class HtmlLexer(RegexLexer):
"""
For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
by the appropriate lexer.
"""
name = 'HTML'
aliases = ['html']
filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
mimetypes = ['text/html', 'application/xhtml+xml']
flags = re.IGNORECASE | re.DOTALL
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*script\s*', Name.Tag, ('script-content', 'tag')),
(r'<\s*style\s*', Name.Tag, ('style-content', 'tag')),
(r'<\s*[a-zA-Z0-9:]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_:-]+\s*=', Name.Attribute, 'attr'),
(r'[a-zA-Z0-9_:-]+', Name.Attribute),
(r'/?\s*>', Name.Tag, '#pop'),
],
'script-content': [
(r'<\s*/\s*script\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
],
'style-content': [
(r'<\s*/\s*style\s*>', Name.Tag, '#pop'),
(r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
],
'attr': [
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if html_doctype_matches(text):
return 0.5
class PhpLexer(RegexLexer):
"""
For `PHP <http://www.php.net/>`_ source code.
For PHP embedded in HTML, use the `HtmlPhpLexer`.
Additional options accepted:
`startinline`
If given and ``True`` the lexer starts highlighting with
php code (i.e.: no starting ``<?php`` required). The default
is ``False``.
`funcnamehighlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabledmodules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted
except the special ``'unknown'`` module that includes functions
that are known to php but are undocumented.
To get a list of allowed modules have a look into the
`_phpbuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._phpbuiltins import MODULES
>>> MODULES.keys()
['PHP Options/Info', 'Zip', 'dba', ...]
In fact the names of those modules match the module names from
the php documentation.
"""
name = 'PHP'
aliases = ['php', 'php3', 'php4', 'php5']
filenames = ['*.php', '*.php[345]']
mimetypes = ['text/x-php']
flags = re.IGNORECASE | re.DOTALL | re.MULTILINE
tokens = {
'root': [
(r'<\?(php)?', Comment.Preproc, 'php'),
(r'[^<]+', Other),
(r'<', Other)
],
'php': [
(r'\?>', Comment.Preproc, '#pop'),
(r'<<<([a-zA-Z_][a-zA-Z0-9_]*)\n.*?\n\1\;?\n', String),
(r'\s+', Text),
(r'#.*?\n', Comment.Single),
(r'//.*?\n', Comment.Single),
# put the empty comment here, it is otherwise seen as
# the start of a docstring
(r'/\*\*/', Comment.Multiline),
(r'/\*\*.*?\*/', String.Doc),
(r'/\*.*?\*/', Comment.Multiline),
(r'(->|::)(\s*)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Operator, Text, Name.Attribute)),
(r'[~!%^&*+=|:.<>/?@-]+', Operator),
(r'[\[\]{}();,]+', Punctuation),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'(function)(\s+)(&?)(\s*)',
bygroups(Keyword, Text, Operator, Text), 'functionname'),
(r'(const)(\s+)([a-zA-Z_][a-zA-Z0-9_]*)',
bygroups(Keyword, Text, Name.Constant)),
(r'(and|E_PARSE|old_function|E_ERROR|or|as|E_WARNING|parent|'
r'eval|PHP_OS|break|exit|case|extends|PHP_VERSION|cfunction|'
r'FALSE|print|for|require|continue|foreach|require_once|'
r'declare|return|default|static|do|switch|die|stdClass|'
r'echo|else|TRUE|elseif|var|empty|if|xor|enddeclare|include|'
r'virtual|endfor|include_once|while|endforeach|global|__FILE__|'
r'endif|list|__LINE__|endswitch|new|__sleep|endwhile|not|'
r'array|__wakeup|E_ALL|NULL|final|php_user_filter|interface|'
r'implements|public|private|protected|abstract|clone|try|'
r'catch|throw|this)\b', Keyword),
('(true|false|null)\b', Keyword.Constant),
(r'\$\{\$+[a-zA-Z_][a-zA-Z0-9_]*\}', Name.Variable),
(r'\$+[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable),
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Other),
(r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
r"0[xX][0-9a-fA-F]+[Ll]?", Number),
(r"'([^'\\]*(?:\\.[^'\\]*)*)'", String.Single),
(r'`([^`\\]*(?:\\.[^`\\]*)*)`', String.Backtick),
(r'"', String.Double, 'string'),
],
'classname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'functionname': [
(r'[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'string': [
(r'"', String.Double, '#pop'),
(r'[^{$"\\]+', String.Double),
(r'\\([nrt\"$]|[0-7]{1,3}|x[0-9A-Fa-f]{1,2})', String.Escape),
(r'\$[a-zA-Z_][a-zA-Z0-9_]*(\[\S+\]|->[a-zA-Z_][a-zA-Z0-9_]*)?',
String.Interpol),
(r'(\{\$\{)(.*?)(\}\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\{)(\$.*?)(\})',
bygroups(String.Interpol, using(this, _startinline=True),
String.Interpol)),
(r'(\$\{)(\S+)(\})',
bygroups(String.Interpol, Name.Variable, String.Interpol)),
(r'[${\\]+', String.Double)
],
}
def __init__(self, **options):
self.funcnamehighlighting = get_bool_opt(
options, 'funcnamehighlighting', True)
self.disabledmodules = get_list_opt(
options, 'disabledmodules', ['unknown'])
self.startinline = get_bool_opt(options, 'startinline', False)
# private option argument for the lexer itself
if '_startinline' in options:
self.startinline = options.pop('_startinline')
# collect activated functions in a set
self._functions = set()
if self.funcnamehighlighting:
from pygments.lexers._phpbuiltins import MODULES
for key, value in MODULES.iteritems():
if key not in self.disabledmodules:
self._functions.update(value)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
stack = ['root']
if self.startinline:
stack.append('php')
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Other:
if value in self._functions:
yield index, Name.Builtin, value
continue
yield index, token, value
def analyse_text(text):
rv = 0.0
if re.search(r'<\?(?!xml)', text):
rv += 0.3
if '?>' in text:
rv += 0.1
return rv
class XmlLexer(RegexLexer):
"""
Generic lexer for XML (eXtensible Markup Language).
"""
flags = re.MULTILINE | re.DOTALL
name = 'XML'
aliases = ['xml']
filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd', '*.wsdl']
mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
'application/rss+xml', 'application/atom+xml',
'application/xsl+xml', 'application/xslt+xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
def analyse_text(text):
if looks_like_xml(text):
return 0.5
class XsltLexer(XmlLexer):
'''
A lexer for XSLT.
*New in Pygments 0.10.*
'''
name = 'XSLT'
aliases = ['xslt']
filenames = ['*.xsl', '*.xslt']
EXTRA_KEYWORDS = set([
'apply-imports', 'apply-templates', 'attribute',
'attribute-set', 'call-template', 'choose', 'comment',
'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
'for-each', 'if', 'import', 'include', 'key', 'message',
'namespace-alias', 'number', 'otherwise', 'output', 'param',
'preserve-space', 'processing-instruction', 'sort',
'strip-space', 'stylesheet', 'template', 'text', 'transform',
'value-of', 'variable', 'when', 'with-param'
])
def get_tokens_unprocessed(self, text):
for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
m = re.match('</?xsl:([^>]*)/?>?', value)
if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
yield index, Keyword, value
else:
yield index, token, value
def analyse_text(text):
if looks_like_xml(text) and '<xsl' in text:
return 0.8
class MxmlLexer(RegexLexer):
"""
For MXML markup.
Nested AS3 in <script> tags is highlighted by the appropriate lexer.
"""
flags = re.MULTILINE | re.DOTALL
name = 'MXML'
aliases = ['mxml']
filenames = ['*.mxml']
mimetimes = ['text/xml', 'application/xml']
tokens = {
'root': [
('[^<&]+', Text),
(r'&\S*?;', Name.Entity),
(r'(\<\!\[CDATA\[)(.*?)(\]\]\>)',
bygroups(String, using(ActionScript3Lexer), String)),
('<!--', Comment, 'comment'),
(r'<\?.*?\?>', Comment.Preproc),
('<![^>]*>', Comment.Preproc),
(r'<\s*[a-zA-Z0-9:._-]+', Name.Tag, 'tag'),
(r'<\s*/\s*[a-zA-Z0-9:._-]+\s*>', Name.Tag),
],
'comment': [
('[^-]+', Comment),
('-->', Comment, '#pop'),
('-', Comment),
],
'tag': [
(r'\s+', Text),
(r'[a-zA-Z0-9_.:-]+\s*=', Name.Attribute, 'attr'),
(r'/?\s*>', Name.Tag, '#pop'),
],
'attr': [
('\s+', Text),
('".*?"', String, '#pop'),
("'.*?'", String, '#pop'),
(r'[^\s>]+', String, '#pop'),
],
}
|
artdent/jgments
|
lib/pygments-1.2.2-patched/pygments/lexers/web.py
|
Python
|
bsd-2-clause
| 31,156
|
from oct2py import octave
from cvxopt import matrix, solvers
from cvxpy import *
import numpy as np
def svm_classifier(X, y,mode):
[num_sample,d]=X.shape
mode_control=np.ones((num_sample,1))
for i in range(num_sample):
if(mode==1):
if(y[i]==-1):
mode_control[i]=0;
if(mode==2):
if(y[i]==1):
mode_control[i]=0;
if(mode==4):
mode_control[i]=0;
#[G,beta]= octave.PWL_feature(X, M, beta_type);
A=np.array([[]])
for i in range(num_sample):
s=np.zeros((1,num_sample));
s[0,i]=1;
temp_1=y[i]*np.concatenate((np.array([[1]]),[X[i,:]]),axis=1)
temp_a=-np.concatenate((temp_1,s),axis=1)
if(i==0):
A=temp_a
else:
A=np.concatenate((A,temp_a),axis=0)
dum_concat=-np.concatenate((np.zeros((num_sample,d+1)),np.eye(num_sample)),axis=1)
A=np.concatenate((A,dum_concat),axis=0);
beq=np.zeros((1+d+num_sample,1));
Aeq=np.concatenate((np.zeros((d+1,1)),np.ones((num_sample,1))-mode_control),axis=0);
Aeq=np.diag(Aeq[:,0]);
b=np.concatenate((-np.ones((num_sample,1)), np.zeros((num_sample,1))),axis=0);
gamma=1;
x=Variable(d+num_sample+1,1)
constraints=[A*x<=b, Aeq*x==0]
obj=Minimize(( 0.5*(norm(x[0:d+1:1,0])**2) )+100*(sum_entries(x[d+1:d+1+num_sample:1,0])) )
prob = Problem(obj, constraints)
prob.solve()
x_val=x.value
ypredicted = x_val[0,0]+(X*x_val[1:1+d,0]);
ypredicted=np.sign(ypredicted);
error=np.zeros(y.shape[0]);
for i in range(y.shape[0]):
error[i]=(y[i]-ypredicted[i,0])/2
return (error,x.value[0:d+1],ypredicted)
|
dathath/IJCAI_2017_SD
|
proteus/svm_classifier.py
|
Python
|
bsd-2-clause
| 1,509
|
from warpnet_framework.warpnet_client import *
from warpnet_framework.warpnet_common_params import *
from warpnet_experiment_structs import *
from twisted.internet import reactor
from datetime import *
from numpy import log10, linspace
import time
import sys
mods = [[2,2,2100,78-1]]
pktLens = [1412]; #range(1412, 91, -240) #[1440:-120:120]-28
time_on = 5*60
time_off = 0
numItrs = 1
fileName_offset = 50
#cfo = 2**20
cfo = 2**17
txGain = 55
minChanMag_D = 20
class ScriptMaster:
def startup(self):
stderr_log = open("exp_err.log", "a")
stderr_log.write("\r\n####################################################################\r\n")
stderr_log.write("%s started at %s\r\n" % (sys.argv[0], datetime.now()))
stderr_log.write("####################################################################\r\n\r\n")
stderr_log.flush()
sys.stderr = stderr_log
er_log = MyDataLogger('results/twoNode_realCFO_v%d_logging.txt' % (fileName_offset))
er_log.log("%s" % (datetime.now()) )
er_log.log("CFO: %d, Time on: %d, time off: %d, numIttrs: %d, fn_offset: %d\r\n" % (cfo, time_on, time_off, numItrs, fileName_offset))
er_log.log("Continuous test of actual CFO on emulator kits\r\n")
registerWithServer()
nodes = dict()
#WARP Nodes
createNode(nodes, Node(0, NODE_PCAP))
createNode(nodes, Node(2, NODE_PCAP))
#BER processor "node"
createNode(nodes, Node(98, NODE_PCAP)) #PHY logger
connectToServer(nodes)
controlStruct = ControlStruct()
nodes[0].addStruct('controlStruct', controlStruct)
nodes[2].addStruct('controlStruct', controlStruct)
phyCtrl0 = PHYctrlStruct()
phyCtrl1 = PHYctrlStruct()
nodes[0].addStruct('phyCtrlStruct', phyCtrl0)
nodes[2].addStruct('phyCtrlStruct', phyCtrl1)
cmdStructStart = CommandStruct(COMMANDID_STARTTRIAL, 0)
nodes[0].addStruct('cmdStructStart', cmdStructStart)
cmdStructStop = CommandStruct(COMMANDID_STOPTRIAL, 0)
nodes[0].addStruct('cmdStructStop', cmdStructStop)
cmdStructResetPER = CommandStruct(COMMANDID_RESET_PER, 0)
nodes[0].addStruct('cmdStructResetPER', cmdStructResetPER)
nodes[2].addStruct('cmdStructResetPER', cmdStructResetPER)
perStruct0 = ObservePERStruct()
perStruct1 = ObservePERStruct()
nodes[0].addStruct('perStruct', perStruct0)
nodes[2].addStruct('perStruct', perStruct1)
logParams = LogParams()
nodes[98].addStruct('logParams', logParams)
sendRegistrations(nodes)
controlStruct.packetGeneratorPeriod = mods[0][2]
controlStruct.packetGeneratorLength = pktLens[0]
controlStruct.channel = 9
controlStruct.txPower = txGain
controlStruct.modOrderHeader = mods[0][0]
controlStruct.modOrderPayload = mods[0][1]
#PHYCtrol params:
#param0: txStartOut delay
#param1: artificial txCFO
#param2: minPilotChanMag
#param3:
# [0-0x01]: PHYCTRL_BER_EN: enable BER reporting
# [1-0x02]: PHYCTRL_CFO_EN: enable CFO reporting
# [2-0x04]: PHYCTRL_PHYDUMP_EN: enable Rx PHY dumping
# [3-0x08]: PHYTRCL_EXTPKTDET_EN: use only ext pkt det
# [4-0x10]: PHYCTRL_COOP_EN: 0=nonCoop, 1=coopMode
# [5-0x20]: PHYCTRL_CFO_CORR_EN: 0=bypass CFO correction, 1=enable CFO correction
# [6-0x40]: PHYCTRL_SWAP_ANT: 0=AntA, 1=AntA_Swapped
#param4:
# [ 7:0]: src re-Tx delay
# [ 7:0]: relay AF Tx delay (only used when in COOP_TESTING)
# [15:8]: relay DF Tx delay (only used when in COOP_TESTING)
#param5: (0 ignores)
# [17: 0]: AGC IIR coef FB
#param6: (0 ignores)
# [31:16]: H_BA minEstMag (UFix16_15)
# [15: 0]: H_AA minEstMag (UFix16_15)
#param7: (0 ignores)
# [27:16]: AF blank stop
# [11: 0]: AF blank start
#param8: (0 ignores)
# [17: 0]: AGC IIR coef Gain
#param9: (Tx pkt types)
# [31: 0]: OR'd combination of PHYCTRL_TX_*
phyCtrl0.param0 = 32+12
phyCtrl0.param1 = cfo #(2**19 ~ 1.2e-4)
phyCtrl0.param2 = 0xFFF
# phyCtrl0.param3 = (PHYCTRL_COOP_EN | PHYCTRL_BER_EN)
phyCtrl0.param3 = (0) #PHYCTRL_COOP_EN)
# phyCtrl0.param4 = (251-2) #v21 timing; #######reTxDly/FFToffset: 251/12, 249/10
phyCtrl0.param4 = 255 #v22 timing
phyCtrl0.param5 = 0
phyCtrl0.param6 = 0
phyCtrl0.param7 = 0
phyCtrl0.param8 = 0
# phyCtrl0.param9 = (PHYCTRL_TX_NC | PHYCTRL_TX_DF | PHYCTRL_TX_AF | PHYCTRL_TX_AFGH | PHYCTRL_TX_DFGH | PHYCTRL_TX_NCMHOP)
phyCtrl0.param9 = (PHYCTRL_TX_NC)
phyCtrl1.param0 = 0
phyCtrl1.param1 = 0
phyCtrl1.param2 = minChanMag_D
# phyCtrl1.param3 = (PHYCTRL_CFO_CORR_EN | PHYCTRL_PHYDUMP_EN)
phyCtrl1.param3 = (PHYCTRL_PHYDUMP_EN)
phyCtrl1.param4 = 0
phyCtrl1.param5 = 0x20000
phyCtrl1.param6 = 1000 | (1000<<16)
phyCtrl1.param7 = 0
phyCtrl1.param8 = 0x20000
phyCtrl1.param9 = 0
nodes[0].sendToNode('phyCtrlStruct')
nodes[2].sendToNode('phyCtrlStruct')
nodes[0].sendToNode('controlStruct')
nodes[2].sendToNode('controlStruct')
nodes[0].sendToNode('cmdStructResetPER')
nodes[2].sendToNode('cmdStructResetPER')
trialInd = -1 #Increment before first trial, which should be trialNum=0
pktLen = pktLens[0];
#Experiment Loops
for ittr in range(1,numItrs+1):
print("Starting iteration %d of %d at %s" % (ittr, numItrs, datetime.now().strftime("%H:%M:%S")))
trialInd += 1
#Stop any traffic that might be running
nodes[0].sendToNode('cmdStructStop')
logParams.fileSuffix = fileName_offset+trialInd
logParams.param0 = ittr
logParams.param1 = 0
logParams.param2 = 0
logParams.param3 = 0
nodes[98].sendToNode('logParams')
#Reset the PER counters at all nodes
nodes[0].sendToNode('cmdStructResetPER')
nodes[2].sendToNode('cmdStructResetPER')
#Start the trial
nodes[0].sendToNode('cmdStructStart')
#Run until minTime elapses
time.sleep(time_on)
nodes[0].sendToNode('cmdStructStop')
time.sleep(time_off)
if not reactor.running:
return
print("############################################")
print("############# Experiment Done! #############")
print("############################################")
reactor.callFromThread(reactor.stop)
sm = ScriptMaster()
stdio.StandardIO(CmdReader()) #if interactive shell is needed
factory = WARPnetClient(sm.startup);
reactor.connectTCP('localhost', 10101, factory)
reactor.run()
|
shailcoolboy/Warp-Trinity
|
ResearchApps/Measurement/warpnet_coprocessors/phy_logger/examples/twoNode_cfoLogging.py
|
Python
|
bsd-2-clause
| 6,164
|
import asyncio
import warnings
import psycopg2
from .log import logger
class Cursor:
def __init__(self, conn, impl, timeout, echo):
self._conn = conn
self._impl = impl
self._timeout = timeout
self._echo = echo
@property
def echo(self):
"""Return echo mode status."""
return self._echo
@property
def description(self):
"""This read-only attribute is a sequence of 7-item sequences.
Each of these sequences is a collections.namedtuple containing
information describing one result column:
0. name: the name of the column returned.
1. type_code: the PostgreSQL OID of the column.
2. display_size: the actual length of the column in bytes.
3. internal_size: the size in bytes of the column associated to
this column on the server.
4. precision: total number of significant digits in columns of
type NUMERIC. None for other types.
5. scale: count of decimal digits in the fractional part in
columns of type NUMERIC. None for other types.
6. null_ok: always None as not easy to retrieve from the libpq.
This attribute will be None for operations that do not
return rows or if the cursor has not had an operation invoked
via the execute() method yet.
"""
return self._impl.description
def close(self):
"""Close the cursor now."""
self._impl.close()
@property
def closed(self):
"""Read-only boolean attribute: specifies if the cursor is closed."""
return self._impl.closed
@property
def connection(self):
"""Read-only attribute returning a reference to the `Connection`."""
return self._conn
@property
def raw(self):
"""Underlying psycopg cursor object, readonly"""
return self._impl
@property
def name(self):
# Not supported
return self._impl.name
@property
def scrollable(self):
# Not supported
return self._impl.scrollable
@scrollable.setter
def scrollable(self, val):
# Not supported
self._impl.scrollable = val
@property
def withhold(self):
# Not supported
return self._impl.withhold
@withhold.setter
def withhold(self, val):
# Not supported
self._impl.withhold = val
@asyncio.coroutine
def execute(self, operation, parameters=None, *, timeout=None):
"""Prepare and execute a database operation (query or command).
Parameters may be provided as sequence or mapping and will be
bound to variables in the operation. Variables are specified
either with positional %s or named %({name})s placeholders.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.execute')
if self._echo:
logger.info(operation)
logger.info("%r", parameters)
try:
self._impl.execute(operation, parameters)
except:
self._conn._waiter = None
raise
else:
yield from self._conn._poll(waiter, timeout)
@asyncio.coroutine
def executemany(self, operation, seq_of_parameters):
# Not supported
raise psycopg2.ProgrammingError(
"executemany cannot be used in asynchronous mode")
@asyncio.coroutine
def callproc(self, procname, parameters=None, *, timeout=None):
"""Call a stored database procedure with the given name.
The sequence of parameters must contain one entry for each
argument that the procedure expects. The result of the call is
returned as modified copy of the input sequence. Input
parameters are left untouched, output and input/output
parameters replaced with possibly new values.
"""
if timeout is None:
timeout = self._timeout
waiter = self._conn._create_waiter('cursor.callproc')
if self._echo:
logger.info("CALL %s", procname)
logger.info("%r", parameters)
try:
self._impl.callproc(procname, parameters)
except:
self._conn._waiter = None
raise
else:
yield from self._conn._poll(waiter, timeout)
@asyncio.coroutine
def mogrify(self, operation, parameters=None):
"""Return a query string after arguments binding.
The string returned is exactly the one that would be sent to
the database running the .execute() method or similar.
"""
ret = self._impl.mogrify(operation, parameters)
assert not self._conn._isexecuting(), ("Don't support server side "
"mogrify")
return ret
@asyncio.coroutine
def setinputsizes(self, sizes):
"""This method is exposed in compliance with the DBAPI.
It currently does nothing but it is safe to call it.
"""
self._impl.setinputsizes(sizes)
@asyncio.coroutine
def fetchone(self):
"""Fetch the next row of a query result set.
Returns a single tuple, or None when no more data is
available.
"""
ret = self._impl.fetchone()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def fetchmany(self, size=None):
"""Fetch the next set of rows of a query result.
Returns a list of tuples. An empty list is returned when no
more rows are available.
The number of rows to fetch per call is specified by the
parameter. If it is not given, the cursor's .arraysize
determines the number of rows to be fetched. The method should
try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number
of rows not being available, fewer rows may be returned.
"""
if size is None:
size = self._impl.arraysize
ret = self._impl.fetchmany(size)
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def fetchall(self):
"""Fetch all (remaining) rows of a query result.
Returns them as a list of tuples. An empty list is returned
if there is no more record to fetch.
"""
ret = self._impl.fetchall()
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@asyncio.coroutine
def scroll(self, value, mode="relative"):
"""Scroll to a new position according to mode.
If mode is relative (default), value is taken as offset
to the current position in the result set, if set to
absolute, value states an absolute target position.
"""
ret = self._impl.scroll(value, mode)
assert not self._conn._isexecuting(), ("Don't support server side "
"cursors yet")
return ret
@property
def arraysize(self):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
return self._impl.arraysize
@arraysize.setter
def arraysize(self, val):
"""How many rows will be returned by fetchmany() call.
This read/write attribute specifies the number of rows to
fetch at a time with fetchmany(). It defaults to
1 meaning to fetch a single row at a time.
"""
self._impl.arraysize = val
@property
def itersize(self):
# Not supported
return self._impl.itersize
@itersize.setter
def itersize(self, val):
# Not supported
self._impl.itersize = val
@property
def rowcount(self):
"""Returns the number of rows that has been produced of affected.
This read-only attribute specifies the number of rows that the
last :meth:`execute` produced (for Data Query Language
statements like SELECT) or affected (for Data Manipulation
Language statements like UPDATE or INSERT).
The attribute is -1 in case no .execute() has been performed
on the cursor or the row count of the last operation if it
can't be determined by the interface.
"""
return self._impl.rowcount
@property
def rownumber(self):
"""Row index.
This read-only attribute provides the current 0-based index of the
cursor in the result set or ``None`` if the index cannot be
determined."""
return self._impl.rownumber
@property
def lastrowid(self):
"""OID of the last inserted row.
This read-only attribute provides the OID of the last row
inserted by the cursor. If the table wasn't created with OID
support or the last operation is not a single record insert,
the attribute is set to None.
"""
return self._impl.lastrowid
@property
def query(self):
"""The last executed query string.
Read-only attribute containing the body of the last query sent
to the backend (including bound arguments) as bytes
string. None if no query has been executed yet.
"""
return self._impl.query
@property
def statusmessage(self):
"""the message returned by the last command."""
return self._impl.statusmessage
# @asyncio.coroutine
# def cast(self, old, s):
# ...
@property
def tzinfo_factory(self):
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
return self._impl.tzinfo_factory
@tzinfo_factory.setter
def tzinfo_factory(self, val):
"""The time zone factory used to handle data types such as
`TIMESTAMP WITH TIME ZONE`.
"""
self._impl.tzinfo_factory = val
@asyncio.coroutine
def nextset(self):
# Not supported
self._impl.nextset() # raises psycopg2.NotSupportedError
@asyncio.coroutine
def setoutputsize(self, size, column=None):
# Does nothing
self._impl.setoutputsize(size, column)
@asyncio.coroutine
def copy_from(self, file, table, sep='\t', null='\\N', size=8192,
columns=None):
raise psycopg2.ProgrammingError(
"copy_from cannot be used in asynchronous mode")
@asyncio.coroutine
def copy_to(self, file, table, sep='\t', null='\\N', columns=None):
raise psycopg2.ProgrammingError(
"copy_to cannot be used in asynchronous mode")
@asyncio.coroutine
def copy_expert(self, sql, file, size=8192):
raise psycopg2.ProgrammingError(
"copy_expert cannot be used in asynchronous mode")
@property
def timeout(self):
"""Return default timeout for cursor operations."""
return self._timeout
def __iter__(self):
warnings.warn("Iteration over cursor is deprecated",
DeprecationWarning,
stacklevel=2)
while True:
row = yield from self.fetchone()
if row is None:
raise StopIteration
else:
yield row
|
nerandell/aiopg
|
aiopg/cursor.py
|
Python
|
bsd-2-clause
| 11,747
|
# -*- coding: utf-8 -*-
from behave.formatter.base import Formatter
class PlainFormatter(Formatter):
"""
Provides a simple plain formatter without coloring/formatting.
In addition, multi-line text and tables are not shown in output (SAD).
"""
name = 'plain'
description = 'Very basic formatter with maximum compatibility'
def __init__(self, stream, config):
super(PlainFormatter, self).__init__(stream, config)
self.steps = []
self.show_timings = config.show_timings
def reset_steps(self):
self.steps = []
def feature(self, feature):
self.reset_steps()
self.stream.write(u'%s: %s\n' % (feature.keyword, feature.name))
def background(self, background):
self.stream.write(u'%s: %s\n' % (background.keyword, background.name))
def scenario(self, scenario):
self.reset_steps()
self.stream.write(u'%11s: %s\n' % (scenario.keyword, scenario.name))
def scenario_outline(self, outline):
self.reset_steps()
self.stream.write(u' %s: %s\n' % (outline.keyword, outline.name))
def step(self, step):
self.steps.append(step)
def result(self, result):
step = self.steps.pop(0)
# TODO right-align the keyword to maximum keyword width?
self.stream.write(u'%12s %s ... ' % (step.keyword, step.name))
status = result.status
if self.show_timings:
status += " in %0.2fs" % step.duration
if result.error_message:
self.stream.write(u'%s\n%s\n' % (status, result.error_message))
else:
self.stream.write(u'%s\n' % status)
|
tokunbo/behave-parallel
|
behave/formatter/plain.py
|
Python
|
bsd-2-clause
| 1,652
|
import datetime
from django.core.exceptions import ObjectDoesNotExist
from django.db import models
from django.db.models import Sum
from django.contrib.contenttypes.models import ContentType
class TrendingManager(models.Manager):
def trending(self, model, days=30, kind=""):
views = self.filter(
viewed_content_type=ContentType.objects.get_for_model(model),
views_on__gte=datetime.date.today() - datetime.timedelta(days=days),
kind=kind
).values(
"viewed_content_type",
"viewed_object_id",
"kind"
).annotate(
num_views=Sum("count")
).order_by("-num_views")
for d in views:
try:
d["object"] = ContentType.objects.get_for_id(
d["viewed_content_type"]
).get_object_for_this_type(
pk=d["viewed_object_id"]
)
except ObjectDoesNotExist:
d["object"] = None
return views
|
eldarion/django-trending
|
trending/managers.py
|
Python
|
bsd-3-clause
| 1,033
|
#!/usr/bin/env python2.7
#! -*- encoding: utf-8 -*-
import simplejson as json
import threading
from sqlobject import SQLObject, connectionForURI
from sqlobject.sqlbuilder import Insert, Select, Delete
from octopus.dispatcher.model.node import FolderNode, TaskNode
from octopus.dispatcher.model.task import Task, TaskGroup
from octopus.dispatcher.model.command import Command
from octopus.dispatcher.model.rendernode import RenderNode
from octopus.dispatcher.model.pool import Pool, PoolShare
from octopus.dispatcher.db.pulidb import FolderNodes, TaskNodes, Dependencies, TaskGroups, Rules, Tasks, Commands, Pools, PoolShares
BUFFER_SIZE = 1000
def deleteElementFromMainDB(table, elementId):
mainConn.query(mainConn.sqlrepr(Delete(table.q, where=(table.q.id==elementId))))
def insertElementIntoStatDB(table, values):
statConn.query(statConn.sqlrepr(Insert(table.q, values=values)))
def archiveTaskNodesDependencies(taskNodeId):
Dependencies._connection = mainConn
dependencies = Dependencies.select(Dependencies.q.taskNodes == taskNodeId)
for dependency in dependencies:
duplicateDependencyIntoStatDB(dependency)
deleteElementFromMainDB(Dependencies, dependency.id)
def archiveFolderNodesDependencies(folderNodeId):
Dependencies._connection = mainConn
dependencies = Dependencies.select(Dependencies.q.folderNodes ==folderNodeId)
for dependency in dependencies:
duplicateDependencyIntoStatDB(dependency)
deleteElementFromMainDB(Dependencies, dependency.id)
def archiveTaskNodesRules(taskNodeId):
Rules._connection = mainConn
rules = Rules.select(Rules.q.taskNodeId == taskNodeId )
for rule in rules:
duplicateRuleIntoStatDB(rule)
deleteElementFromMainDB(Rules, rule.id)
def archiveFolderNodesRules(folderNodeId):
Rules._connection = mainConn
rules = Rules.select(Rules.q.folderNodeId == folderNodeId )
for rule in rules:
duplicateRuleIntoStatDB(rule)
deleteElementFromMainDB(Rules, rule.id)
def archivePoolShares():
PoolShares._connection = mainConn
print "Starting to archive PoolShares"
poolSharestoArchive = PoolShares.select(PoolShares.q.archived==True)
processedItems = 0
totalItems = poolSharestoArchive.count()
print "Found " + str(totalItems) + " PoolShares to archive"
while totalItems > processedItems:
for poolShare in poolSharestoArchive.limit(BUFFER_SIZE):
duplicatePoolSharesIntoStatDB(poolShare)
deleteElementFromMainDB(PoolShares, poolShare.id)
processedItems+=1
print str(totalItems - processedItems) + " PoolShares remaining"
print "Finished to archive PoolShares"
def archivePools():
Pools._connection = mainConn
print "Starting to archive Pools"
poolstoArchive = Pools.select(Pools.q.archived==True)
processedItems = 0
totalItems = poolstoArchive.count()
print "Found " + str(totalItems) + " Pools to archive"
while totalItems > processedItems:
for pool in poolstoArchive.limit(BUFFER_SIZE):
duplicatePoolsIntoStatDB(pool)
deleteElementFromMainDB(Pools, pool.id)
processedItems+=1
print str(totalItems - processedItems) + " Pools remaining"
print "Finished to archive Pools"
def archiveFolderNodes():
FolderNodes._connection = mainConn
print "Starting to archive FolderNodes"
folderNodestoArchive = FolderNodes.select(FolderNodes.q.archived==True)
processedItems = 0
totalItems = folderNodestoArchive.count()
print "Found " + str(totalItems) + " FolderNodes to archive"
while totalItems > processedItems:
for node in folderNodestoArchive.limit(BUFFER_SIZE):
manageFolderNode(node)
processedItems+=1
print str(totalItems - processedItems) + " FolderNodes remaining"
print "Finished to archive FolderNodes"
def manageFolderNode(node):
duplicateFolderNodesIntoStatDB(node)
deleteElementFromMainDB(FolderNodes, node.id)
archiveFolderNodesDependencies(node.id)
archiveFolderNodesRules(node.id)
def archiveTaskNodes():
TaskNodes._connection = mainConn
print "Starting to archive TaskNodes"
taskNodestoArchive = TaskNodes.select(TaskNodes.q.archived==True)
processedItems = 0
totalItems = taskNodestoArchive.count()
print "Found " + str(totalItems) + " TaskNodes to archive"
while totalItems > processedItems:
for node in taskNodestoArchive.limit(BUFFER_SIZE):
manageTaskNode(node)
processedItems+=1
print str(totalItems - processedItems) + " TaskNodes remaining"
print "Finished to archive TaskNodes"
def manageTaskNode(node):
duplicateTaskNodesIntoStatDB(node)
deleteElementFromMainDB(TaskNodes, node.id)
archiveTaskNodesDependencies(node.id)
archiveTaskNodesRules(node.id)
def archiveCommands():
Commands._connection = mainConn
print "Starting to archive Commands"
commandsToArchive = Commands.select(Commands.q.archived==True)
processedItems = 0
totalItems = commandsToArchive.count()
print "Found " + str(totalItems) + " Commands to archive"
while totalItems > processedItems:
for commands in commandsToArchive.limit(BUFFER_SIZE):
duplicateCommandIntoStatDB(commands)
deleteElementFromMainDB(Commands, commands.id)
processedItems+=1
print str(totalItems - processedItems) + " Commands remaining"
print "Finished to archive Commands"
def archiveTaskGroups():
TaskGroups._connection = mainConn
print "Starting to archive taskGroups"
tasksGroupsToArchive = TaskGroups.select(TaskGroups.q.archived==True)
processedItems = 0
totalItems = tasksGroupsToArchive.count()
print "Found " + str(totalItems) + " taskGroups to archive"
while totalItems > processedItems:
for taskGroup in tasksGroupsToArchive.limit(BUFFER_SIZE):
duplicateTaskGroupIntoStatDB(taskGroup)
deleteElementFromMainDB(TaskGroups, taskGroup.id)
processedItems+=1
print str(totalItems - processedItems) + " taskGroups remaining"
print "Finished to archive taskGroups"
def archiveTasks():
Tasks._connection = mainConn
print "Starting to archive tasks"
tasksToArchive = Tasks.select(Tasks.q.archived==True)
processedItems = 0
totalItems = tasksToArchive.count()
print "Found " + str(totalItems) + " tasks to archive"
while totalItems > processedItems:
for task in tasksToArchive.limit(BUFFER_SIZE):
duplicateTaskIntoStatDB(task)
deleteElementFromMainDB(Tasks, task.id)
processedItems+=1
print str(totalItems - processedItems) + " tasks remaining"
print "Finished to archive tasks"
def duplicateRuleIntoStatDB(rule):
fields = {Rules.q.id.fieldName: rule.id,
Rules.q.name.fieldName: rule.name,
Rules.q.taskNodeId.fieldName: rule.taskNodeId,
Rules.q.folderNodeId.fieldName: rule.folderNodeId}
insertElementIntoStatDB(Rules, fields)
def duplicateDependencyIntoStatDB(element):
fields = {Dependencies.q.toNodeId.fieldName: element.toNodeId,
Dependencies.q.statusList.fieldName: element.statusList,
Dependencies.q.taskNodes.fieldName: element.taskNodesID,
Dependencies.q.folderNodes.fieldName: element.folderNodesID,
Dependencies.q.archived.fieldName: False}
insertElementIntoStatDB(Dependencies,fields)
def duplicateRenderNodesIntoStatDB(element):
fields = {RenderNodes.q.id.fieldName: element.id,
RenderNodes.q.name.fieldName: element.name,
RenderNodes.q.coresNumber.fieldName: element.coresNumber,
RenderNodes.q.speed.fieldName: element.speed,
RenderNodes.q.ip.fieldName: element.ip,
RenderNodes.q.port.fieldName: element.port,
RenderNodes.q.ramSize.fieldName: element.ramSize,
RenderNodes.q.caracteristics.fieldName: json.dumps(element.caracteristics),
RenderNodes.q.performance.fieldName: element.performance}
insertElementIntoStatDB(RenderNodes,fields)
def duplicatePoolSharesIntoStatDB(element):
fields = {PoolShares.q.id.fieldName: element.id,
PoolShares.q.poolId.fieldName: element.poolId,
PoolShares.q.nodeId.fieldName: element.nodeId,
PoolShares.q.maxRN.fieldName: element.maxRN,
PoolShares.q.archived.fieldName: True}
insertElementIntoStatDB(PoolShares,fields)
def duplicatePoolsIntoStatDB(element):
fields = {Pools.q.id.fieldName: element.id,
Pools.q.name.fieldName: element.name,
Pools.q.archived.fieldName: True}
insertElementIntoStatDB(Pools,fields)
def duplicateFolderNodesIntoStatDB(element):
fields = {FolderNodes.q.id.fieldName: element.id,
FolderNodes.q.name.fieldName: element.name,
FolderNodes.q.parentId.fieldName: element.parentId,
FolderNodes.q.user.fieldName: element.user,
FolderNodes.q.priority.fieldName: element.priority,
FolderNodes.q.dispatchKey.fieldName: element.dispatchKey,
FolderNodes.q.maxRN.fieldName: element.maxRN,
FolderNodes.q.taskGroupId.fieldName: element.taskGroupId,
FolderNodes.q.strategy.fieldName: element.strategy,
FolderNodes.q.creationTime.fieldName: element.creationTime,
FolderNodes.q.startTime.fieldName: element.startTime,
FolderNodes.q.updateTime.fieldName: element.updateTime,
FolderNodes.q.endTime.fieldName: element.endTime,
FolderNodes.q.archived.fieldName: True}
insertElementIntoStatDB(FolderNodes,fields)
def duplicateTaskNodesIntoStatDB(element):
fields = {TaskNodes.q.id.fieldName: element.id,
TaskNodes.q.name.fieldName: element.name,
TaskNodes.q.parentId.fieldName: element.parentId,
TaskNodes.q.user.fieldName: element.user,
TaskNodes.q.priority.fieldName: element.priority,
TaskNodes.q.dispatchKey.fieldName: element.dispatchKey,
TaskNodes.q.maxRN.fieldName: element.maxRN,
TaskNodes.q.taskId.fieldName: element.taskId,
TaskNodes.q.creationTime.fieldName: element.creationTime,
TaskNodes.q.startTime.fieldName: element.startTime,
TaskNodes.q.updateTime.fieldName: element.updateTime,
TaskNodes.q.endTime.fieldName: element.endTime,
TaskNodes.q.maxAttempt.fieldName: element.maxAttempt,
TaskNodes.q.archived.fieldName: True}
insertElementIntoStatDB(TaskNodes,fields)
def duplicateCommandIntoStatDB(element):
fields = {Commands.q.id.fieldName: element.id,
Commands.q.description.fieldName: element.description,
Commands.q.taskId.fieldName: element.taskId,
Commands.q.status.fieldName: element.status,
Commands.q.completion.fieldName: element.completion,
Commands.q.creationTime.fieldName: element.creationTime,
Commands.q.startTime.fieldName: element.startTime,
Commands.q.updateTime.fieldName: element.updateTime,
Commands.q.endTime.fieldName: element.endTime,
Commands.q.message.fieldName: element.message,
Commands.q.stats.fieldName: str(element.stats),
Commands.q.archived.fieldName: True,
Commands.q.args.fieldName: str(element.args),
Commands.q.attempt.fieldName: str(element.attempt),
Commands.q.runnerPackages.fieldName: json.dumps(element.runnerPackages),
Commands.q.watcherPackages.fieldName: json.dumps(element.watcherPackages)}
insertElementIntoStatDB(Commands,fields)
def duplicateTaskGroupIntoStatDB(element):
fields = {TaskGroups.q.id.fieldName: element.id,
TaskGroups.q.name.fieldName: element.name,
TaskGroups.q.parentId.fieldName: element.parentId,
TaskGroups.q.user.fieldName: element.user,
TaskGroups.q.priority.fieldName: element.priority,
TaskGroups.q.dispatchKey.fieldName: element.dispatchKey,
TaskGroups.q.maxRN.fieldName: element.maxRN,
TaskGroups.q.environment.fieldName: json.dumps(element.environment),
TaskGroups.q.requirements.fieldName: json.dumps(element.requirements),
TaskGroups.q.tags.fieldName: json.dumps(element.tags),
TaskGroups.q.strategy.fieldName: element.strategy,
TaskGroups.q.archived.fieldName: True,
TaskGroups.q.args.fieldName: str(element.args)}
insertElementIntoStatDB(TaskGroups,fields)
def duplicateTaskIntoStatDB(element):
fields = {Tasks.q.id.fieldName: element.id,
Tasks.q.name.fieldName: element.name,
Tasks.q.parentId.fieldName: element.parentId,
Tasks.q.user.fieldName: element.user,
Tasks.q.priority.fieldName: element.priority,
Tasks.q.dispatchKey.fieldName: element.dispatchKey,
Tasks.q.maxRN.fieldName: element.maxRN,
Tasks.q.runner.fieldName: element.runner,
Tasks.q.environment.fieldName: json.dumps(element.environment),
Tasks.q.requirements.fieldName: json.dumps(element.requirements),
Tasks.q.minNbCores.fieldName: element.minNbCores,
Tasks.q.maxNbCores.fieldName: element.maxNbCores,
Tasks.q.ramUse.fieldName: element.ramUse,
Tasks.q.licence.fieldName: element.licence,
Tasks.q.tags.fieldName: json.dumps(element.tags),
Tasks.q.validationExpression.fieldName: element.validationExpression,
Tasks.q.archived.fieldName: True,
Tasks.q.args.fieldName: str(element.args),
Tasks.q.maxAttempt.fieldName: element.maxAttempt,
Tasks.q.runnerPackages.fieldName: json.dumps(element.runnerPackages),
Tasks.q.watcherPackages.fieldName: json.dumps(element.watcherPackages)}
insertElementIntoStatDB(Tasks,fields)
def groupForThread1():
archivePoolShares()
archivePools()
archiveTaskNodes()
def groupForThread2():
archiveTasks()
archiveFolderNodes()
def groupForThread3():
archiveTaskGroups()
archiveCommands()
DB_URL = "mysql://root@127.0.0.1/pulidb"
STAT_DB_URL = "mysql://root@127.0.0.1/pulistatdb"
mainConn = connectionForURI(DB_URL)
statConn = connectionForURI(STAT_DB_URL)
threading.Thread(target=groupForThread1).start()
threading.Thread(target=groupForThread2).start()
threading.Thread(target=groupForThread3).start()
|
mikrosimage/OpenRenderManagement
|
scripts/archive_migration.py
|
Python
|
bsd-3-clause
| 13,337
|
# -*- coding: utf-8 -*-
__version__ = "who knows"
#
'''
I/O for VTK <https://www.vtk.org/wp-content/uploads/2015/04/file-formats.pdf>.
.. moduleauthor:: Nico Schlömer <nico.schloemer@gmail.com>
NOTE: Stolen from https://github.com/nschloe/meshio/blob/master/meshio/vtk_io.py
NOTE: which is distributed under the MIT license:
The MIT License (MIT)
Copyright (c) 2015-2018 Nico Schlömer
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import logging
import numpy
# https://www.vtk.org/doc/nightly/html/vtkCellType_8h_source.html
vtk_to_meshio_type = {
0: 'empty',
1: 'vertex',
# 2: 'poly_vertex',
3: 'line',
# 4: 'poly_line',
5: 'triangle',
# 6: 'triangle_strip',
# 7: 'polygon',
# 8: 'pixel',
9: 'quad',
10: 'tetra',
# 11: 'voxel',
12: 'hexahedron',
13: 'wedge',
14: 'pyramid',
15: 'penta_prism',
16: 'hexa_prism',
21: 'line3',
22: 'triangle6',
23: 'quad8',
24: 'tetra10',
25: 'hexahedron20',
26: 'wedge15',
27: 'pyramid13',
28: 'quad9',
29: 'hexahedron27',
30: 'quad6',
31: 'wedge12',
32: 'wedge18',
33: 'hexahedron24',
34: 'triangle7',
35: 'line4',
#
# 60: VTK_HIGHER_ORDER_EDGE,
# 61: VTK_HIGHER_ORDER_TRIANGLE,
# 62: VTK_HIGHER_ORDER_QUAD,
# 63: VTK_HIGHER_ORDER_POLYGON,
# 64: VTK_HIGHER_ORDER_TETRAHEDRON,
# 65: VTK_HIGHER_ORDER_WEDGE,
# 66: VTK_HIGHER_ORDER_PYRAMID,
# 67: VTK_HIGHER_ORDER_HEXAHEDRON,
}
meshio_to_vtk_type = {v: k for k, v in vtk_to_meshio_type.items()}
# These are all VTK data types. One sometimes finds 'vtktypeint64', but
# this is ill-formed.
vtk_to_numpy_dtype_name = {
'bit': 'bool',
'unsigned_char': 'uint8',
'char': 'int8',
'unsigned_short': 'uint16',
'short': 'int16',
'unsigned_int': 'uint32',
'int': numpy.dtype('int32'),
'unsigned_long': 'int64',
'long': 'int64',
'float': 'float32',
'double': 'float64',
}
numpy_to_vtk_dtype = {v: k for k, v in vtk_to_numpy_dtype_name.items()}
def read(filename):
'''Reads a Gmsh msh file.
'''
with open(filename, 'rb') as f:
out = read_buffer(f)
return out
def read_buffer(f):
# initialize output data
points = None
field_data = {}
cell_data_raw = {}
point_data = {}
# skip header and title
f.readline()
f.readline()
data_type = f.readline().decode('utf-8').strip()
assert data_type in ['ASCII', 'BINARY'], \
'Unknown VTK data type \'{}\'.'.format(data_type)
is_ascii = data_type == 'ASCII'
c = None
offsets = None
ct = None
# One of the problem in reading VTK files are POINT_DATA and CELL_DATA
# fields. They can contain a number of SCALARS+LOOKUP_TABLE tables, without
# giving and indication of how many there are. Hence, SCALARS must be
# treated like a first-class section. To associate it with POINT/CELL_DATA,
# we store the `active` section in this variable.
active = None
while True:
line = f.readline().decode('utf-8')
if not line:
# EOF
break
line = line.strip()
# pylint: disable=len-as-condition
if len(line) == 0:
continue
split = line.split()
section = split[0]
if section == 'DATASET':
dataset_type = split[1]
assert dataset_type == 'UNSTRUCTURED_GRID', \
'Only VTK UNSTRUCTURED_GRID supported.'
elif section == 'POINTS':
active = 'POINTS'
num_points = int(split[1])
data_type = split[2]
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
points = numpy.fromfile(
f, count=num_points*3, sep=' ',
dtype=dtype
)
else:
# binary
num_bytes = numpy.dtype(dtype).itemsize
total_num_bytes = num_points * (3 * num_bytes)
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder('>')
points = \
numpy.fromstring(f.read(total_num_bytes), dtype=dtype)
line = f.readline().decode('utf-8')
assert line == '\n'
points = points.reshape((num_points, 3))
elif section == 'CELLS':
active = 'CELLS'
num_items = int(split[2])
if is_ascii:
c = numpy.fromfile(f, count=num_items, sep=' ', dtype=int)
else:
# binary
num_bytes = 4
total_num_bytes = num_items * num_bytes
c = numpy.fromstring(f.read(total_num_bytes), dtype='>i4')
line = f.readline().decode('utf-8')
assert line == '\n'
offsets = []
if len(c) > 0:
offsets.append(0)
while offsets[-1] + c[offsets[-1]] + 1 < len(c):
offsets.append(offsets[-1] + c[offsets[-1]] + 1)
offsets = numpy.array(offsets)
elif section == 'CELL_TYPES':
active = 'CELL_TYPES'
num_items = int(split[1])
if is_ascii:
ct = \
numpy.fromfile(f, count=int(num_items), sep=' ', dtype=int)
else:
# binary
num_bytes = 4
total_num_bytes = num_items * num_bytes
ct = numpy.fromstring(f.read(total_num_bytes), dtype='>i4')
line = f.readline().decode('utf-8')
assert line == '\n'
elif section == 'POINT_DATA':
active = 'POINT_DATA'
num_items = int(split[1])
elif section == 'CELL_DATA':
active = 'CELL_DATA'
num_items = int(split[1])
elif section == 'SCALARS':
if active == 'POINT_DATA':
d = point_data
else:
assert active == 'CELL_DATA', \
'Illegal SCALARS in section \'{}\'.'.format(active)
d = cell_data_raw
d.update(_read_scalar_field(f, num_items, split))
elif section == 'VECTORS':
if active == 'POINT_DATA':
d = point_data
else:
assert active == 'CELL_DATA', \
'Illegal SCALARS in section \'{}\'.'.format(active)
d = cell_data_raw
d.update(_read_vector_field(f, num_items, split))
elif section == 'TENSORS':
if active == 'POINT_DATA':
d = point_data
else:
assert active == 'CELL_DATA', \
'Illegal SCALARS in section \'{}\'.'.format(active)
d = cell_data_raw
d.update(_read_tensor_field(f, num_items, split))
else:
assert section == 'FIELD', \
'Unknown section \'{}\'.'.format(section)
if active == 'POINT_DATA':
d = point_data
else:
assert active == 'CELL_DATA', \
'Illegal FIELD in section \'{}\'.'.format(active)
d = cell_data_raw
d.update(_read_fields(f, int(split[2]), is_ascii))
assert c is not None, \
'Required section CELLS not found.'
assert ct is not None, \
'Required section CELL_TYPES not found.'
cells, cell_data = translate_cells(c, offsets, ct, cell_data_raw)
return points, cells, point_data, cell_data, field_data
def _read_scalar_field(f, num_data, split):
data_name = split[1]
data_type = split[2]
try:
num_comp = int(split[3])
except IndexError:
num_comp = 1
# The standard says:
# > The parameter numComp must range between (1,4) inclusive; [...]
assert 0 < num_comp < 5
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
lt, _ = f.readline().decode('utf-8').split()
assert lt == 'LOOKUP_TABLE'
data = numpy.fromfile(f, count=num_data, sep=' ', dtype=dtype)
return {data_name: data}
def _read_vector_field(f, num_data, split):
data_name = split[1]
data_type = split[2]
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
data = numpy.fromfile(
f, count=3*num_data, sep=' ', dtype=dtype
).reshape(-1, 3)
return {data_name: data}
def _read_tensor_field(f, num_data, split):
data_name = split[1]
data_type = split[2]
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
data = numpy.fromfile(
f, count=9*num_data, sep=' ', dtype=dtype
).reshape(-1, 3, 3)
return {data_name: data}
def _read_fields(f, num_fields, is_ascii):
data = {}
for _ in range(num_fields):
name, shape0, shape1, data_type = \
f.readline().decode('utf-8').split()
shape0 = int(shape0)
shape1 = int(shape1)
dtype = numpy.dtype(vtk_to_numpy_dtype_name[data_type])
if is_ascii:
dat = numpy.fromfile(
f, count=shape0 * shape1, sep=' ', dtype=dtype
)
else:
# binary
num_bytes = numpy.dtype(dtype).itemsize
total_num_bytes = shape0 * shape1 * num_bytes
# Binary data is big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
dtype = dtype.newbyteorder('>')
dat = numpy.fromstring(f.read(total_num_bytes), dtype=dtype)
line = f.readline().decode('utf-8')
assert line == '\n'
if shape0 != 1:
dat = dat.reshape((shape1, shape0))
data[name] = dat
return data
def raw_from_cell_data(cell_data):
# merge cell data
cell_data_raw = {}
for d in cell_data.values():
for name, values in d.items():
if name in cell_data_raw:
cell_data_raw[name].append(values)
else:
cell_data_raw[name] = [values]
for name in cell_data_raw:
cell_data_raw[name] = numpy.concatenate(cell_data_raw[name])
return cell_data_raw
def translate_cells(data, offsets, types, cell_data_raw):
# Translate it into the cells dictionary.
# `data` is a one-dimensional vector with
# (num_points0, p0, p1, ... ,pk, numpoints1, p10, p11, ..., p1k, ...
# Collect types into bins.
# See <https://stackoverflow.com/q/47310359/353337> for better
# alternatives.
uniques = numpy.unique(types)
bins = {u: numpy.where(types == u)[0] for u in uniques}
cells = {}
cell_data = {}
for tpe, b in bins.items():
meshio_type = vtk_to_meshio_type[tpe]
n = data[offsets[b[0]]]
assert (data[offsets[b]] == n).all()
indices = numpy.array([
numpy.arange(1, n+1) + o for o in offsets[b]
])
cells[meshio_type] = data[indices]
cell_data[meshio_type] = \
{key: value[b] for key, value in cell_data_raw.items()}
return cells, cell_data
def write(filename,
points,
cells,
point_data=None,
cell_data=None,
field_data=None,
write_binary=True):
if not write_binary:
logging.warning('VTK ASCII files are only meant for debugging.')
with open(filename, 'wb') as f:
f.write('# vtk DataFile Version 4.2\n'.encode('utf-8'))
f.write('written by meshio v{}\n'.format(__version__).encode('utf-8'))
f.write(('BINARY\n' if write_binary else 'ASCII\n').encode('utf-8'))
f.write('DATASET UNSTRUCTURED_GRID\n'.encode('utf-8'))
# write points and cells
_write_points(f, points, write_binary)
_write_cells(f, cells, write_binary)
# write point data
if point_data is not None:
num_points = len(points)
f.write('POINT_DATA {}\n'.format(num_points).encode('utf-8'))
_write_field_data(f, point_data, write_binary)
# write cell data
if cell_data is not None:
total_num_cells = sum([len(c) for c in cells.values()])
cell_data_raw = raw_from_cell_data(cell_data)
f.write('CELL_DATA {}\n'.format(total_num_cells).encode('utf-8'))
_write_field_data(f, cell_data_raw, write_binary)
return
def _write_points(f, points, write_binary):
f.write(
'POINTS {} {}\n'.format(
len(points), numpy_to_vtk_dtype[points.dtype.name]
).encode('utf-8'))
if write_binary:
# Binary data must be big endian, see
# <https://www.vtk.org/Wiki/VTK/Writing_VTK_files_using_python#.22legacy.22>.
points.astype(points.dtype.newbyteorder('>')).tofile(f, sep='')
else:
# ascii
points.tofile(f, sep=' ')
f.write('\n'.encode('utf-8'))
return
def _write_cells(f, cells, write_binary):
total_num_cells = sum([len(c) for c in cells.values()])
total_num_idx = sum([numpy.prod(c.shape) for c in cells.values()])
# For each cell, the number of nodes is stored
total_num_idx += total_num_cells
f.write(
'CELLS {} {}\n'.format(total_num_cells, total_num_idx)
.encode('utf-8'))
if write_binary:
for key in cells:
n = cells[key].shape[1]
d = numpy.column_stack([
numpy.full(len(cells[key]), n), cells[key]
]).astype(numpy.dtype('>i4'))
f.write(d.tostring())
if write_binary:
f.write('\n'.encode('utf-8'))
else:
# ascii
for key in cells:
n = cells[key].shape[1]
for cell in cells[key]:
f.write((' '.join([
'{}'.format(idx)
for idx in numpy.concatenate([[n], cell])
]) + '\n').encode('utf-8'))
# write cell types
f.write('CELL_TYPES {}\n'.format(total_num_cells).encode('utf-8'))
if write_binary:
for key in cells:
d = numpy.full(
len(cells[key]), meshio_to_vtk_type[key]
).astype(numpy.dtype('>i4'))
f.write(d.tostring())
f.write('\n'.encode('utf-8'))
else:
# ascii
for key in cells:
for _ in range(len(cells[key])):
f.write(
'{}\n'.format(meshio_to_vtk_type[key]).encode('utf-8')
)
return
def _write_field_data(f, data, write_binary):
f.write((
'FIELD FieldData {}\n'.format(len(data))
).encode('utf-8'))
for name, values in data.items():
if len(values.shape) == 1:
num_tuples = values.shape[0]
num_components = 1
else:
assert len(values.shape) == 2, \
'Only one and two-dimensional field data supported.'
num_tuples = values.shape[0]
num_components = values.shape[1]
f.write(('{} {} {} {}\n'.format(
name, num_components, num_tuples,
numpy_to_vtk_dtype[values.dtype.name]
)).encode('utf-8'))
if write_binary:
values.astype(values.dtype.newbyteorder('>')).tofile(f, sep='')
else:
# ascii
values.tofile(f, sep=' ')
# numpy.savetxt(f, points)
f.write('\n'.encode('utf-8'))
return
|
amanzi/ats-dev
|
tools/meshing_ats/vtk_io.py
|
Python
|
bsd-3-clause
| 16,481
|
from __future__ import absolute_import, unicode_literals
import operator
import sys
from collections import OrderedDict
from functools import reduce
from django import forms
from django.contrib.admin import FieldListFilter, widgets
from django.contrib.admin.exceptions import DisallowedModelAdminLookup
from django.contrib.admin.options import IncorrectLookupParameters
from django.contrib.admin.utils import (
get_fields_from_path, lookup_needs_distinct, prepare_lookup_value, quote, unquote)
from django.contrib.auth.decorators import login_required
from django.core.exceptions import ImproperlyConfigured, PermissionDenied, SuspiciousOperation
from django.core.paginator import InvalidPage, Paginator
from django.db import models
from django.db.models.constants import LOOKUP_SEP
from django.db.models.fields import FieldDoesNotExist
from django.db.models.fields.related import ForeignObjectRel
from django.db.models.sql.constants import QUERY_TERMS
from django.shortcuts import get_object_or_404, redirect, render
from django.template.defaultfilters import filesizeformat
from django.utils import six
from django.utils.decorators import method_decorator
from django.utils.encoding import force_text
from django.utils.functional import cached_property
from django.utils.http import urlencode
from django.utils.safestring import mark_safe
from django.utils.text import capfirst
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
from django.views.generic.edit import FormView
from wagtail.wagtailadmin import messages
from wagtail.wagtailadmin.edit_handlers import (
ObjectList, extract_panel_definitions_from_model_class)
from wagtail.wagtaildocs.models import get_document_model
from wagtail.wagtailimages.models import Filter, get_image_model
from .forms import ParentChooserForm
class WMABaseView(TemplateView):
"""
Groups together common functionality for all app views.
"""
model_admin = None
meta_title = ''
page_title = ''
page_subtitle = ''
def __init__(self, model_admin):
self.model_admin = model_admin
self.model = model_admin.model
self.opts = self.model._meta
self.app_label = force_text(self.opts.app_label)
self.model_name = force_text(self.opts.model_name)
self.verbose_name = force_text(self.opts.verbose_name)
self.verbose_name_plural = force_text(self.opts.verbose_name_plural)
self.pk_attname = self.opts.pk.attname
self.is_pagemodel = model_admin.is_pagemodel
self.permission_helper = model_admin.permission_helper
self.url_helper = model_admin.url_helper
def check_action_permitted(self, user):
return True
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted(request.user):
raise PermissionDenied
button_helper_class = self.model_admin.get_button_helper_class()
self.button_helper = button_helper_class(self, request)
return super(WMABaseView, self).dispatch(request, *args, **kwargs)
@cached_property
def menu_icon(self):
return self.model_admin.get_menu_icon()
@cached_property
def header_icon(self):
return self.menu_icon
def get_page_title(self):
return self.page_title or capfirst(self.opts.verbose_name_plural)
def get_meta_title(self):
return self.meta_title or self.get_page_title()
@cached_property
def index_url(self):
return self.url_helper.index_url
@cached_property
def create_url(self):
return self.url_helper.create_url
def get_base_queryset(self, request=None):
return self.model_admin.get_queryset(request or self.request)
class ModelFormView(WMABaseView, FormView):
def get_edit_handler_class(self):
if hasattr(self.model, 'edit_handler'):
edit_handler = self.model.edit_handler
else:
panels = extract_panel_definitions_from_model_class(self.model)
edit_handler = ObjectList(panels)
return edit_handler.bind_to_model(self.model)
def get_form_class(self):
return self.get_edit_handler_class().get_form_class(self.model)
def get_success_url(self):
return self.index_url
def get_instance(self):
return getattr(self, 'instance', None) or self.model()
def get_form_kwargs(self):
kwargs = FormView.get_form_kwargs(self)
kwargs.update({'instance': self.get_instance()})
return kwargs
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_form_view_extra_css()},
js=self.model_admin.get_form_view_extra_js()
)
def get_context_data(self, **kwargs):
context = super(ModelFormView, self).get_context_data(**kwargs)
instance = self.get_instance()
edit_handler_class = self.get_edit_handler_class()
form = self.get_form()
context.update({
'view': self,
'model_admin': self.model_admin,
'is_multipart': form.is_multipart(),
'edit_handler': edit_handler_class(instance=instance, form=form),
'form': form,
})
return context
def get_success_message(self, instance):
return _("{model_name} '{instance}' created.").format(
model_name=capfirst(self.opts.verbose_name), instance=instance)
def get_success_message_buttons(self, instance):
button_url = self.url_helper.get_action_url('edit', quote(instance.pk))
return [
messages.button(button_url, _('Edit'))
]
def get_error_message(self):
model_name = self.verbose_name
return _("The %s could not be created due to errors.") % model_name
def form_valid(self, form):
instance = form.save()
messages.success(
self.request, self.get_success_message(instance),
buttons=self.get_success_message_buttons(instance)
)
return redirect(self.get_success_url())
def form_invalid(self, form):
messages.error(self.request, self.get_error_message())
return self.render_to_response(self.get_context_data())
class InstanceSpecificView(WMABaseView):
instance_pk = None
pk_quoted = None
instance = None
def __init__(self, model_admin, instance_pk):
super(InstanceSpecificView, self).__init__(model_admin)
self.instance_pk = unquote(instance_pk)
self.pk_quoted = quote(self.instance_pk)
filter_kwargs = {}
filter_kwargs[self.pk_attname] = self.instance_pk
object_qs = model_admin.model._default_manager.get_queryset().filter(
**filter_kwargs)
self.instance = get_object_or_404(object_qs)
def get_page_subtitle(self):
return self.instance
@cached_property
def edit_url(self):
return self.url_helper.get_action_url('edit', self.pk_quoted)
@cached_property
def delete_url(self):
return self.url_helper.get_action_url('delete', self.pk_quoted)
class IndexView(WMABaseView):
# IndexView settings
ORDER_VAR = 'o'
ORDER_TYPE_VAR = 'ot'
PAGE_VAR = 'p'
SEARCH_VAR = 'q'
ERROR_FLAG = 'e'
IGNORED_PARAMS = (ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
# Only continue if logged in user has list permission
if not self.permission_helper.user_can_list(request.user):
raise PermissionDenied
self.list_display = self.model_admin.get_list_display(request)
self.list_filter = self.model_admin.get_list_filter(request)
self.search_fields = self.model_admin.get_search_fields(request)
self.items_per_page = self.model_admin.list_per_page
self.select_related = self.model_admin.list_select_related
# Get search parameters from the query string.
try:
self.page_num = int(request.GET.get(self.PAGE_VAR, 0))
except ValueError:
self.page_num = 0
self.params = dict(request.GET.items())
if self.PAGE_VAR in self.params:
del self.params[self.PAGE_VAR]
if self.ERROR_FLAG in self.params:
del self.params[self.ERROR_FLAG]
self.query = request.GET.get(self.SEARCH_VAR, '')
self.queryset = self.get_queryset(request)
return super(IndexView, self).dispatch(request, *args, **kwargs)
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_index_view_extra_css()},
js=self.model_admin.get_index_view_extra_js()
)
def get_buttons_for_obj(self, obj):
return self.button_helper.get_buttons_for_obj(
obj, classnames_add=['button-small', 'button-secondary'])
def get_search_results(self, request, queryset, search_term):
"""
Returns a tuple containing a queryset to implement the search,
and a boolean indicating if the results may contain duplicates.
"""
use_distinct = False
if self.search_fields and search_term:
orm_lookups = ['%s__icontains' % str(search_field)
for search_field in self.search_fields]
for bit in search_term.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
return queryset, use_distinct
def lookup_allowed(self, lookup, value):
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in self.model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
if len(parts) > 1 and parts[-1] in QUERY_TERMS:
parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specifically included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = self.model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existent fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'rel'):
if field.rel is None:
# This property or relation doesn't exist, but it's allowed
# since it's ignored in ChangeList.get_filters().
return True
model = field.rel.to
rel_name = field.rel.get_related_field().name
elif isinstance(field, ForeignObjectRel):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_filters_params(self, params=None):
"""
Returns all params except IGNORED_PARAMS
"""
if not params:
params = self.params
lookup_params = params.copy() # a dictionary of the query string
# Remove all the parameters that are globally and systematically
# ignored.
for ignored in self.IGNORED_PARAMS:
if ignored in lookup_params:
del lookup_params[ignored]
return lookup_params
def get_filters(self, request):
lookup_params = self.get_filters_params()
use_distinct = False
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise DisallowedModelAdminLookup(
"Filtering by %s not allowed" % key)
filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(
request,
lookup_params,
self.model,
self.model_admin)
else:
field_path = None
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given
# field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field = list_filter
field_list_filter_class = FieldListFilter.create
if not isinstance(field, models.Field):
field_path = field
field = get_fields_from_path(self.model,
field_path)[-1]
spec = field_list_filter_class(
field,
request,
lookup_params,
self.model,
self.model_admin,
field_path=field_path)
# Check if we need to use distinct()
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts,
field_path))
if spec and spec.has_output():
filter_specs.append(spec)
# At this point, all the parameters used by the various ListFilters
# have been removed from lookup_params, which now only contains other
# parameters passed via the query string. We now loop through the
# remaining parameters both to ensure that all the parameters are valid
# fields and to determine if at least one of them needs distinct(). If
# the lookup parameters aren't real fields, then bail out.
try:
for key, value in lookup_params.items():
lookup_params[key] = prepare_lookup_value(key, value)
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
return (
filter_specs, bool(filter_specs), lookup_params, use_distinct
)
except FieldDoesNotExist as e:
six.reraise(
IncorrectLookupParameters,
IncorrectLookupParameters(e),
sys.exc_info()[2])
def get_query_string(self, new_params=None, remove=None):
if new_params is None:
new_params = {}
if remove is None:
remove = []
p = self.params.copy()
for r in remove:
for k in list(p):
if k.startswith(r):
del p[k]
for k, v in new_params.items():
if v is None:
if k in p:
del p[k]
else:
p[k] = v
return '?%s' % urlencode(sorted(p.items()))
def _get_default_ordering(self):
ordering = []
if self.model_admin.ordering:
ordering = self.model_admin.ordering
elif self.opts.ordering:
ordering = self.opts.ordering
return ordering
def get_default_ordering(self, request):
if self.model_admin.get_ordering(request):
return self.model_admin.get_ordering(request)
if self.opts.ordering:
return self.opts.ordering
return ()
def get_ordering_field(self, field_name):
"""
Returns the proper model field name corresponding to the given
field_name to use for ordering. field_name may either be the name of a
proper model field or the name of a method (on the admin or model) or a
callable with the 'admin_order_field' attribute. Returns None if no
proper model field name can be matched.
"""
try:
field = self.opts.get_field(field_name)
return field.name
except FieldDoesNotExist:
# See whether field_name is a name of a non-field
# that allows sorting.
if callable(field_name):
attr = field_name
elif hasattr(self.model_admin, field_name):
attr = getattr(self.model_admin, field_name)
else:
attr = getattr(self.model, field_name)
return getattr(attr, 'admin_order_field', None)
def get_ordering(self, request, queryset):
"""
Returns the list of ordering fields for the change list.
First we check the get_ordering() method in model admin, then we check
the object's default ordering. Then, any manually-specified ordering
from the query string overrides anything. Finally, a deterministic
order is guaranteed by ensuring the primary key is used as the last
ordering field.
"""
params = self.params
ordering = list(self.get_default_ordering(request))
if self.ORDER_VAR in params:
# Clear ordering and used params
ordering = []
order_params = params[self.ORDER_VAR].split('.')
for p in order_params:
try:
none, pfx, idx = p.rpartition('-')
field_name = self.list_display[int(idx)]
order_field = self.get_ordering_field(field_name)
if not order_field:
continue # No 'admin_order_field', skip it
# reverse order if order_field has already "-" as prefix
if order_field.startswith('-') and pfx == "-":
ordering.append(order_field[1:])
else:
ordering.append(pfx + order_field)
except (IndexError, ValueError):
continue # Invalid ordering specified, skip it.
# Add the given query's ordering fields, if any.
ordering.extend(queryset.query.order_by)
# Ensure that the primary key is systematically present in the list of
# ordering fields so we can guarantee a deterministic order across all
# database backends.
pk_name = self.opts.pk.name
if not (set(ordering) & {'pk', '-pk', pk_name, '-' + pk_name}):
# The two sets do not intersect, meaning the pk isn't present. So
# we add it.
ordering.append('-pk')
return ordering
def get_ordering_field_columns(self):
"""
Returns an OrderedDict of ordering field column numbers and asc/desc
"""
# We must cope with more than one column having the same underlying
# sort field, so we base things on column numbers.
ordering = self._get_default_ordering()
ordering_fields = OrderedDict()
if self.ORDER_VAR not in self.params:
# for ordering specified on model_admin or model Meta, we don't
# know the right column numbers absolutely, because there might be
# morr than one column associated with that ordering, so we guess.
for field in ordering:
if field.startswith('-'):
field = field[1:]
order_type = 'desc'
else:
order_type = 'asc'
for index, attr in enumerate(self.list_display):
if self.get_ordering_field(attr) == field:
ordering_fields[index] = order_type
break
else:
for p in self.params[self.ORDER_VAR].split('.'):
none, pfx, idx = p.rpartition('-')
try:
idx = int(idx)
except ValueError:
continue # skip it
ordering_fields[idx] = 'desc' if pfx == '-' else 'asc'
return ordering_fields
def get_queryset(self, request=None):
request = request or self.request
# First, we collect all the declared list filters.
(self.filter_specs, self.has_filters, remaining_lookup_params,
filters_use_distinct) = self.get_filters(request)
# Then, we let every list filter modify the queryset to its liking.
qs = self.get_base_queryset(request)
for filter_spec in self.filter_specs:
new_qs = filter_spec.queryset(request, qs)
if new_qs is not None:
qs = new_qs
try:
# Finally, we apply the remaining lookup parameters from the query
# string (i.e. those that haven't already been processed by the
# filters).
qs = qs.filter(**remaining_lookup_params)
except (SuspiciousOperation, ImproperlyConfigured):
# Allow certain types of errors to be re-raised as-is so that the
# caller can treat them in a special way.
raise
except Exception as e:
# Every other error is caught with a naked except, because we don't
# have any other way of validating lookup parameters. They might be
# invalid if the keyword arguments are incorrect, or if the values
# are not in the correct type, so we might get FieldError,
# ValueError, ValidationError, or ?.
raise IncorrectLookupParameters(e)
if not qs.query.select_related:
qs = self.apply_select_related(qs)
# Set ordering.
ordering = self.get_ordering(request, qs)
qs = qs.order_by(*ordering)
# Apply search results
qs, search_use_distinct = self.get_search_results(
request, qs, self.query)
# Remove duplicates from results, if necessary
if filters_use_distinct | search_use_distinct:
return qs.distinct()
else:
return qs
def apply_select_related(self, qs):
if self.select_related is True:
return qs.select_related()
if self.select_related is False:
if self.has_related_field_in_list_display():
return qs.select_related()
if self.select_related:
return qs.select_related(*self.select_related)
return qs
def has_related_field_in_list_display(self):
for field_name in self.list_display:
try:
field = self.opts.get_field(field_name)
except FieldDoesNotExist:
pass
else:
if isinstance(field, models.ManyToOneRel):
return True
return False
def get_context_data(self, *args, **kwargs):
user = self.request.user
all_count = self.get_base_queryset().count()
queryset = self.get_queryset()
result_count = queryset.count()
paginator = Paginator(queryset, self.items_per_page)
try:
page_obj = paginator.page(self.page_num + 1)
except InvalidPage:
page_obj = paginator.page(1)
context = {
'view': self,
'all_count': all_count,
'result_count': result_count,
'paginator': paginator,
'page_obj': page_obj,
'object_list': page_obj.object_list,
'user_can_create': self.permission_helper.user_can_create(user)
}
if self.is_pagemodel:
models = self.model.allowed_parent_page_models()
allowed_parent_types = [m._meta.verbose_name for m in models]
valid_parents = self.permission_helper.get_valid_parent_pages(user)
valid_parent_count = valid_parents.count()
context.update({
'no_valid_parents': not valid_parent_count,
'required_parent_types': allowed_parent_types,
})
return context
def get_template_names(self):
return self.model_admin.get_index_template()
class CreateView(ModelFormView):
page_title = _('New')
def check_action_permitted(self, user):
return self.permission_helper.user_can_create(user)
def dispatch(self, request, *args, **kwargs):
if self.is_pagemodel:
user = request.user
parents = self.permission_helper.get_valid_parent_pages(user)
parent_count = parents.count()
# There's only one available parent for this page type for this
# user, so we send them along with that as the chosen parent page
if parent_count == 1:
parent = parents.get()
parent_pk = quote(parent.pk)
return redirect(self.url_helper.get_action_url(
'add', self.app_label, self.model_name, parent_pk))
# The page can be added in multiple places, so redirect to the
# choose_parent view so that the parent can be specified
return redirect(self.url_helper.get_action_url('choose_parent'))
return super(CreateView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Create new %s') % self.verbose_name
def get_page_subtitle(self):
return capfirst(self.verbose_name)
def get_template_names(self):
return self.model_admin.get_create_template()
class EditView(ModelFormView, InstanceSpecificView):
page_title = _('Editing')
def check_action_permitted(self, user):
return self.permission_helper.user_can_edit_obj(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if self.is_pagemodel:
return redirect(
self.url_helper.get_action_url('edit', self.pk_quoted)
)
return super(EditView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Editing %s') % self.verbose_name
def get_success_message(self, instance):
return _("{model_name} '{instance}' updated.").format(
model_name=capfirst(self.verbose_name), instance=instance)
def get_context_data(self, **kwargs):
kwargs['user_can_delete'] = self.permission_helper.user_can_delete_obj(
self.request.user, self.instance)
return super(EditView, self).get_context_data(**kwargs)
def get_error_message(self):
name = self.verbose_name
return _("The %s could not be saved due to errors.") % name
def get_template_names(self):
return self.model_admin.get_edit_template()
class ChooseParentView(WMABaseView):
def dispatch(self, request, *args, **kwargs):
if not self.permission_helper.user_can_create(request.user):
raise PermissionDenied
return super(ChooseParentView, self).dispatch(request, *args, **kwargs)
def get_page_title(self):
return _('Add %s') % self.verbose_name
def get_form(self, request):
parents = self.permission_helper.get_valid_parent_pages(request.user)
return ParentChooserForm(parents, request.POST or None)
def get(self, request, *args, **kwargs):
form = self.get_form(request)
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def post(self, request, *args, **kargs):
form = self.get_form(request)
if form.is_valid():
parent_pk = quote(form.cleaned_data['parent_page'].pk)
return redirect(self.url_helper.get_action_url(
'add', self.app_label, self.model_name, parent_pk))
context = {'view': self, 'form': form}
return render(request, self.get_template(), context)
def get_template(self):
return self.model_admin.get_choose_parent_template()
class DeleteView(InstanceSpecificView):
page_title = _('Delete')
def check_action_permitted(self, user):
return self.permission_helper.user_can_delete_obj(user, self.instance)
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
if not self.check_action_permitted(request.user):
raise PermissionDenied
if self.is_pagemodel:
return redirect(
self.url_helper.get_action_url('delete', self.pk_quoted)
)
return super(DeleteView, self).dispatch(request, *args, **kwargs)
def get_meta_title(self):
return _('Confirm deletion of %s') % self.verbose_name
def confirmation_message(self):
return _(
"Are you sure you want to delete this %s? If other things in your "
"site are related to it, they may also be affected."
) % self.verbose_name
def get(self, request, *args, **kwargs):
context = {'view': self, 'instance': self.instance}
return self.render_to_response(context)
def delete_instance(self):
self.instance.delete()
def post(self, request, *args, **kwargs):
try:
self.delete_instance()
messages.success(
request,
_("{model} '{instance}' deleted.").format(
model=self.verbose_name, instance=self.instance))
return redirect(self.index_url)
except models.ProtectedError:
linked_objects = []
for rel in self.model._meta.get_all_related_objects():
if rel.on_delete == models.PROTECT:
qs = getattr(self.instance, rel.get_accessor_name())
for obj in qs.all():
linked_objects.append(obj)
context = {
'view': self,
'instance': self.instance,
'protected_error': True,
'linked_objects': linked_objects,
}
return self.render_to_response(context)
def get_template_names(self):
return self.model_admin.get_delete_template()
class InspectView(InstanceSpecificView):
page_title = _('Inspecting')
def check_action_permitted(self, user):
return self.permission_helper.user_can_inspect_obj(user, self.instance)
@property
def media(self):
return forms.Media(
css={'all': self.model_admin.get_inspect_view_extra_css()},
js=self.model_admin.get_inspect_view_extra_js()
)
def get_meta_title(self):
return _('Inspecting %s') % self.verbose_name
def get_field_label(self, field_name, field=None):
""" Return a label to display for a field """
label = None
if field is not None:
label = getattr(field, 'verbose_name', None)
if label is None:
label = getattr(field, 'name', None)
if label is None:
label = field_name
return label
def get_field_display_value(self, field_name, field=None):
""" Return a display value for a field """
# First we check for a 'get_fieldname_display' property/method on
# the model, and return the value of that, if present.
val_funct = getattr(self.instance, 'get_%s_display' % field_name, None)
if val_funct is not None:
if callable(val_funct):
return val_funct()
return val_funct
# If we have a real field, we can utilise that to try to display
# something more useful
if field is not None:
try:
field_type = field.get_internal_type()
if (
field_type == 'ForeignKey' and
field.related_model == get_image_model()
):
# The field is an image
return self.get_image_field_display(field_name, field)
if (
field_type == 'ForeignKey' and
field.related_model == get_document_model()
):
# The field is a document
return self.get_document_field_display(field_name, field)
except AttributeError:
pass
# Resort to getting the value of 'field_name' from the instance
return getattr(self.instance, field_name,
self.model_admin.get_empty_value_display(field_name))
def get_image_field_display(self, field_name, field):
""" Render an image """
image = getattr(self.instance, field_name)
if image:
fltr, _ = Filter.objects.get_or_create(spec='max-400x400')
rendition = image.get_rendition(fltr)
return rendition.img_tag
return self.model_admin.get_empty_value_display(field_name)
def get_document_field_display(self, field_name, field):
""" Render a link to a document """
document = getattr(self.instance, field_name)
if document:
return mark_safe(
'<a href="%s">%s <span class="meta">(%s, %s)</span></a>' % (
document.url,
document.title,
document.file_extension.upper(),
filesizeformat(document.file.size),
)
)
return self.model_admin.get_empty_value_display(field_name)
def get_dict_for_field(self, field_name):
"""
Return a dictionary containing `label` and `value` values to display
for a field.
"""
try:
field = self.model._meta.get_field(field_name)
except FieldDoesNotExist:
field = None
return {
'label': self.get_field_label(field_name, field),
'value': self.get_field_display_value(field_name, field),
}
def get_fields_dict(self):
"""
Return a list of `label`/`value` dictionaries to represent the
fiels named by the model_admin class's `get_inspect_view_fields` method
"""
fields = []
for field_name in self.model_admin.get_inspect_view_fields():
fields.append(self.get_dict_for_field(field_name))
return fields
def get_context_data(self, **kwargs):
context = super(InspectView, self).get_context_data(**kwargs)
buttons = self.button_helper.get_buttons_for_obj(
self.instance, exclude=['inspect'])
context.update({
'view': self,
'fields': self.get_fields_dict(),
'buttons': buttons,
'instance': self.instance,
})
return context
def get_template_names(self):
return self.model_admin.get_inspect_template()
|
hamsterbacke23/wagtail
|
wagtail/contrib/modeladmin/views.py
|
Python
|
bsd-3-clause
| 35,740
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
"""
from __future__ import unicode_literals
import json
import os
from osbs.api import OSBS
from osbs.conf import Configuration
from atomic_reactor.plugin import PostBuildPlugin
from atomic_reactor.util import ImageName
class ImportImagePlugin(PostBuildPlugin):
"""
Import image tags from external docker registry into OpenShift.
"""
key = "import_image"
can_fail = False
def __init__(self, tasker, workflow, url, verify_ssl=True, use_auth=True):
"""
constructor
:param tasker: DockerTasker instance
:param workflow: DockerBuildWorkflow instance
:param url: str, URL to OSv3 instance
:param verify_ssl: bool, verify SSL certificate?
:param use_auth: bool, initiate authentication with openshift?
"""
# call parent constructor
super(ImportImagePlugin, self).__init__(tasker, workflow)
self.url = url
self.verify_ssl = verify_ssl
self.use_auth = use_auth
def run(self):
try:
build_json = json.loads(os.environ["BUILD"])
except KeyError:
self.log.error("No $BUILD env variable. "
"Probably not running in build container.")
raise
osbs_conf = Configuration(conf_file=None, openshift_uri=self.url,
use_auth=self.use_auth,
verify_ssl=self.verify_ssl)
osbs = OSBS(osbs_conf, osbs_conf)
metadata = build_json.get("metadata", {})
kwargs = {}
if 'namespace' in metadata:
kwargs['namespace'] = metadata['namespace']
labels = metadata.get("labels", {})
try:
imagestream = labels["imagestream"]
except KeyError:
self.log.error("No imagestream label set for this Build")
raise
self.log.info("Importing tags for %s", imagestream)
osbs.import_image(imagestream, **kwargs)
|
mmilata/atomic-reactor
|
atomic_reactor/plugins/post_import_image.py
|
Python
|
bsd-3-clause
| 2,151
|
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import os
import time
import lldb
from lldbsuite.test.lldbtest import *
from lldbsuite.test.decorators import *
import lldbsuite.test.lldbutil as lldbutil
class GlobalsDataFormatterTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
def setUp(self):
# Call super's setUp().
TestBase.setUp(self)
# Find the line number to break at.
self.line = line_number('main.cpp', '// Set break point at this line.')
@skipIf(debug_info="gmodules",
bugnumber="https://bugs.llvm.org/show_bug.cgi?id=36048")
def test_with_run_command(self):
"""Test that that file and class static variables display correctly."""
self.build()
self.runCmd("file " + self.getBuildArtifact("a.out"), CURRENT_EXECUTABLE_SET)
lldbutil.run_break_set_by_file_and_line(
self, "main.cpp", self.line, num_expected_locations=1, loc_exact=True)
# This is the function to remove the custom formats in order to have a
# clean slate for the next test case.
def cleanup():
self.runCmd('type format clear', check=False)
self.runCmd('type summary clear', check=False)
# Execute the cleanup function during test case tear down.
self.addTearDownHook(cleanup)
self.runCmd("type summary add --summary-string \"JustATest\" Point")
# Simply check we can get at global variables
self.expect("target variable g_point",
substrs=['JustATest'])
self.expect("target variable g_point_pointer",
substrs=['(Point *) g_point_pointer ='])
# Print some information about the variables
# (we ignore the actual values)
self.runCmd(
"type summary add --summary-string \"(x=${var.x},y=${var.y})\" Point")
self.expect("target variable g_point",
substrs=['x=',
'y='])
self.expect("target variable g_point_pointer",
substrs=['(Point *) g_point_pointer ='])
# Test Python code on resulting SBValue
self.runCmd(
"type summary add --python-script \"return 'x=' + str(valobj.GetChildMemberWithName('x').GetValue());\" Point")
self.expect("target variable g_point",
substrs=['x='])
self.expect("target variable g_point_pointer",
substrs=['(Point *) g_point_pointer ='])
|
youtube/cobalt
|
third_party/llvm-project/lldb/packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-globals/TestDataFormatterGlobals.py
|
Python
|
bsd-3-clause
| 2,544
|
#######################################################
# Copyright (c) 2015, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
"""
Features class used for Computer Vision algorithms.
"""
from .library import *
from .array import *
import numbers
class Features(object):
"""
A container class used for various feature detectors.
Parameters
----------
num: optional: int. default: 0.
Specifies the number of features.
"""
def __init__(self, num=0):
self.feat = c_void_ptr_t(0)
if num is not None:
assert(isinstance(num, numbers.Number))
safe_call(backend.get().af_create_features(c_pointer(self.feat), c_dim_t(num)))
def __del__(self):
"""
Release features' memory
"""
if self.feat:
backend.get().af_release_features(self.feat)
self.feat = None
def num_features(self):
"""
Returns the number of features detected.
"""
num = c_dim_t(0)
safe_call(backend.get().af_get_features_num(c_pointer(num), self.feat))
return num
def get_xpos(self):
"""
Returns the x-positions of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_xpos(c_pointer(out.arr), self.feat))
return out
def get_ypos(self):
"""
Returns the y-positions of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_ypos(c_pointer(out.arr), self.feat))
return out
def get_score(self):
"""
Returns the scores of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_score(c_pointer(out.arr), self.feat))
return out
def get_orientation(self):
"""
Returns the orientations of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_orientation(c_pointer(out.arr), self.feat))
return out
def get_size(self):
"""
Returns the sizes of the features detected.
"""
out = Array()
safe_call(backend.get().af_get_features_size(c_pointer(out.arr), self.feat))
return out
|
arrayfire/arrayfire-python
|
arrayfire/features.py
|
Python
|
bsd-3-clause
| 2,480
|
import subprocess
import sys
import os
import setup_util
def start(args, logfile, errfile):
setup_util.replace_text("rails-stripped/config/database.yml", "host: .*", "host: " + args.database_host)
try:
subprocess.check_call("cp Gemfile-jruby Gemfile", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.check_call("cp Gemfile-jruby.lock Gemfile.lock", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.Popen("rvm jruby-1.7.8 do bundle exec torqbox -b 0.0.0.0 -E production", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
def stop(logfile, errfile):
try:
p = subprocess.Popen(['ps', 'aux'], stdout=subprocess.PIPE)
out, err = p.communicate()
for line in out.splitlines():
if 'torqbox' in line:
pid = int(line.split(None, 2)[1])
os.kill(pid, 15)
subprocess.check_call("rm Gemfile", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
subprocess.check_call("rm Gemfile.lock", shell=True, cwd="rails-stripped", stderr=errfile, stdout=logfile)
return 0
except subprocess.CalledProcessError:
return 1
|
torhve/FrameworkBenchmarks
|
frameworks/Ruby/rails-stripped/setup_jruby.py
|
Python
|
bsd-3-clause
| 1,221
|
#coding=utf-8
from __future__ import with_statement
from itertools import chain
from select import select
import os
import socket
import sys
import threading
from ssdb._compat import (b, xrange, imap, byte_to_chr, unicode, bytes, long,
BytesIO, nativestr, basestring,
LifoQueue, Empty, Full)
from ssdb.utils import get_integer
from ssdb.exceptions import (
RES_STATUS_MSG,
RES_STATUS,
SSDBError,
TimeoutError,
ConnectionError,
BusyLoadingError,
ResponseError,
InvalidResponse,
AuthenticationError,
NoScriptError,
ExecAbortError,
)
SYM_LF = b('\n')
SYM_EMPTY = b('')
SERVER_CLOSED_CONNECTION_ERROR = "Connection closed by server."
class Token(object):
"""
Literal strings in SSDB commands, such as the command names and any
hard-coded arguments are wrapped in this class so we know not to
apply and encoding rules on them.
"""
def __init__(self, value):
if isinstance(value, Token):
value = value.value
self.value = value
def __repr__(self):
return self.value
def __str__(self):
return self.value
class BaseParser(object):
EXCEPTION_CLASSES = {
'ERR': ResponseError,
'EXECABORT': ExecAbortError,
'LOADING': BusyLoadingError,
'NOSCRIPT': NoScriptError,
}
def parse_error(self, response):
"Parse an error response"
error_code = response.split(' ')[0]
if error_code in self.EXCEPTION_CLASSES:
response = response[len(error_code) + 1:]
return self.EXCEPTION_CLASSES[error_code](response)
return ResponseError(response)
class SocketBuffer(object):
def __init__(self, socket, socket_read_size):
self._sock = socket
self.socket_read_size = socket_read_size
self._buffer = BytesIO()
# number of bytes written to the buffer from the socket
self.bytes_written = 0
# number of bytes read from the buffer
self.bytes_read = 0
@property
def length(self):
return self.bytes_written - self.bytes_read
def read(self, length):
# make sure to read the \n terminator
length = length + 1
# make sure we've read enough data from the socket
if length > self.length:
self._read_from_socket(length - self.length)
self._buffer.seek(self.bytes_read)
data = self._buffer.read(length)
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-1]
def _read_from_socket(self, length=None):
socket_read_size = self.socket_read_size
buf = self._buffer
buf.seek(self.bytes_written)
marker = 0
try:
while True:
data = self._sock.recv(socket_read_size)
# an empty string indicates the server shutdown the socket
if isinstance(data, bytes) and len(data) == 0:
raise socket.error(SERVER_CLOSED_CONNECTION_ERROR)
buf.write(data)
data_length = len(data)
self.bytes_written += data_length
marker += data_length
if length is not None and length > marker:
continue
break
except socket.timeout:
raise TimeoutError("Timeout reading from socket")
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError("Error while reading from socket:%s"
%(e.args,))
def readline(self):
buf = self._buffer
buf.seek(self.bytes_read)
data = buf.readline()
while not data.endswith(SYM_LF):
# there's more data in the socket that we need
self._read_from_socket()
buf.seek(self.bytes_read)
data = buf.readline()
self.bytes_read += len(data)
# purge the buffer when we've consumed it all so it doesn't
# grow forever
if self.bytes_read == self.bytes_written:
self.purge()
return data[:-1]
def purge(self):
self._buffer.seek(0)
self._buffer.truncate()
self.bytes_written = 0
self.bytes_read = 0
def close(self):
self.purge()
self._buffer.close()
self._buffer = None
self._sock = None
class PythonParser(BaseParser):
"""
Plain Python parsing class
"""
encoding = None
def __init__(self, socket_read_size):
self.socket_read_size = socket_read_size
self._sock = None
self._buffer = None
def __del__(self):
try:
self.on_disconnect()
except Exception:
pass
def on_connect(self, connection):
"""
Called when the socket connects
"""
self._sock = connection._sock
self._buffer = SocketBuffer(self._sock, self.socket_read_size)
if connection.decode_responses:
self.encoding = connection.encoding
def on_disconnect(self):
"Called when the socket disconnects"
if self._sock is not None:
self._sock.close()
self._sock = None
if self._buffer is not None:
self._buffer.close()
self._buffer = None
self.encoding = None
def can_read(self):
return self._buffer and bool(self._buffer.length)
def read_response(self):
try:
lgt = int(self._buffer.readline())
except ValueError:
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
status = self._buffer.readline()
if status not in RES_STATUS or lgt!=len(status):
raise ConnectionError(SERVER_CLOSED_CONNECTION_ERROR)
result = [status]
while True:
lgt = self._buffer.readline()
if lgt == '':
break
try:
value = self._buffer.read(int(lgt))
except ValueError:
raise ConnectionError(RES_STATUS_MSG.ERROR)
if isinstance(value, bytes) and self.encoding:
value = value.decode(self.encoding)
result.append(value)
return result
DefaultParser = PythonParser
class Connection(object):
"""
Manages TCP communication to and from a SSDB server
>>> from ssdb.connection import Connection
>>> conn = Connection(host='localhost', port=8888)
"""
description_format = "Connection<host=%(host)s,port=%(port)s>"
def __init__(self, host="127.0.0.1",port=8888,socket_timeout=None,
socket_connect_timeout=None,socket_keepalive=False,
socket_keepalive_options=None,retry_on_timeout=False,
encoding='utf-8', encoding_errors='strict',
decode_responses=False, parser_class=DefaultParser,
socket_read_size=65536):
self.pid = os.getpid()
self.host = host
self.port = port
self._sock = None
self.socket_timeout = socket_timeout
self.socket_connect_timeout = socket_connect_timeout or socket_timeout
self.socket_keepalive = socket_keepalive
self.socket_keepalive_options = socket_keepalive_options or {}
self.retry_on_timeout = retry_on_timeout
self.encoding = encoding
self.encoding_errors = encoding_errors
self.decode_responses = decode_responses
self._parser = parser_class(socket_read_size=socket_read_size)
self._description_args = {
'host': self.host,
'port': self.port,
}
self._connect_callbacks = []
@property
def kwargs(self):
return self._description_args
def __repr__(self):
return self.description_format % self._description_args
def __del__(self):
try:
self.disconnect()
except Exception:
pass
def register_connect_callback(self, callback):
self._connect_callbacks.append(callback)
def clear_connect_callbacks(self):
self._connect_callbacks = []
def connect(self):
"""
Connects to the SSDB server if not already connected
"""
if self._sock:
return
try:
sock = self._connect()
except socket.error:
e = sys.exc_info()[1]
raise ConnectionError(self._error_message(e))
self._sock = sock
try:
self.on_connect()
except SSDBError:
# clean up after any error in on_connect
self.disconnect()
raise
# run any user callbacks. right now the only internal callback
# is for pubsub channel/pattern resubscription
for callback in self._connect_callbacks:
callback(self)
def _connect(self):
"""
Create a TCP socket connection
"""
# we want to mimic what socket.create_connection does to support
# ipv4/ipv6, but we want to set options prior to calling
# socket.connect()
err = None
for res in socket.getaddrinfo(self.host, self.port, 0,
socket.SOCK_STREAM):
family, socktype, proto, canonname, socket_address = res
sock = None
try:
sock = socket.socket(family, socktype, proto)
# TCP_NODELAY
sock.setsockopt(socket.IPPROTO_TCP,socket.TCP_NODELAY, 1)
# TCP_KEEPALIVE
if self.socket_keepalive:
sock.setsockopt(socket.SOL_SOCKET,socket.SO_KEEPALIVE, 1)
for k, v in iteritems(self.socket_keepalive_options):
sock.setsockopt(socket.SOL_TCP, k, v)
# set the socket_connect_timeout before we connect
sock.settimeout(self.socket_connect_timeout)
# connect
sock.connect(socket_address)
# set the socket_timeout now that we're connected
sock.settimeout(self.socket_timeout)
return sock
except socket.error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
raise socket.error("socket.getaddrinfo returned an empty list")
#sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
#sock.settimeout(self.socket_timeout)
#sock.connect((self.host, self.port))
#return sock
def _error_message(self, exception):
"""
args for socket.error can either be (errno, "message") or just "message"
"""
if len(exception.args) == 1:
return "Error connecting to %s:%s. %s." % \
(self.host, self.port, exception.args[0])
else:
return "Error %s connecting %s:%s. %s." % \
(exception.args[0], self.host, self.port, exception.args[1])
def on_connect(self):
"""
Initialize the connection
"""
self._parser.on_connect(self)
def disconnect(self):
"""
Disconnects from the SSDB server
"""
self._parser.on_disconnect()
if self._sock is None:
return
try:
self._sock.shutdown(socket.SHUT_RDWR)
self._sock.close()
except socket.error:
pass
self._sock = None
def send_packed_command(self, command):
"""
Send an already packed command to the SSDB server
"""
if not self._sock:
self.connect()
try:
if isinstance(command, str):
command = [command]
for item in command:
self._sock.sendall(item)
except socket.timeout:
self.disconnect()
raise TimeoutError("Timeout writing to socket")
except socket.error:
e = sys.exc_info()[1]
self.disconnect()
if len(e.args) == 1:
_errno, errmsg = 'UNKNOWN', e.args[0]
else:
_errno, errmsg = e.args
raise ConnectionError("Error %s while writing to socket. %s." %
(_errno, errmsg))
except:
self.disconnect()
raise
def send_command(self, *args):
"""
Pack and send a command to the SSDB server
"""
self.send_packed_command(self.pack_command(*args))
def can_read(self, timeout=0):
"Poll the socket to see if there's data that can be read."
sock = self._sock
if not sock:
self.connect()
sock = self._sock
return self._parser.can_read() or \
bool(select([sock], [], [], timeout)[0])
def read_response(self):
"""
Read the response from a previously sent command
"""
try:
response = self._parser.read_response()
except:
self.disconnect()
raise
if isinstance(response, ResponseError):
raise response
#print(response)
return response
def encode(self, value):
"""
Return a bytestring representation of the value
"""
if isinstance(value, Token):
return b(value.value)
if isinstance(value, bytes):
return value
elif isinstance(value, (int, long)):
value = b(str(value))
elif isinstance(value, float):
value = repr(value)
elif not isinstance(value, basestring):
value = str(value)
if isinstance(value, unicode):
value = value.encode(self.encoding, self.encoding_errors)
return value
def pack_command(self, *args):
"""
Pack a series of arguments into a value SSDB command
"""
# the client might have included 1 or more literal arguments in
# the command name, e.g., 'CONFIG GET'. The SSDB server expects
# these arguments to be sent separately, so split the first
# argument manually. All of these arguements get wrapped
# in the Token class to prevent them from being encoded.
command = args[0]
if ' ' in command:
args = tuple([Token(s) for s in command.split(' ')]) + args[1:]
else:
args = (Token(command),) + args[1:]
args_output = SYM_EMPTY.join([
SYM_EMPTY.join((
b(str(len(k))),
SYM_LF,
k,
SYM_LF
)) for k in imap(self.encode, args)
])
output = "%s%s" % (args_output,SYM_LF)
return output
def pack_commands(self, commands):
"Pack multiple commands into the SSDB protocol"
output = []
pieces = []
buffer_length = 0
for cmd in commands:
for chunk in self.pack_command(*cmd):
pieces.append(chunk)
buffer_length += len(chunk)
if buffer_length > 6000:
output.append(SYM_EMPTY.join(pieces))
buffer_length = 0
pieces = []
if pieces:
output.append(SYM_EMPTY.join(pieces))
return output
class ConnectionPool(object):
"""
Generic connection pool.
>>> from ssdb.client import SSDB
>>> client = SSDB(connection_pool=ConnectionPool())
If max_connections is set, then this objectraises ssdb.ConnectionError when
the pool's limit is reached. By default, TCP connections are created
connection_class is specified. Any additionan keyword arguments are passed
to the constructor of connection_class.
"""
def __init__(self, connection_class=Connection, max_connections=None,
**connection_kwargs):
"""
Create a connection pool. If max_connections is set, then this object
raises ssdb.ConnectionError when the pool's limit is reached. By
default, TCP connections are created connection_class is specified. Any
additionan keyword arguments are passed to the constructor of
connection_class.
"""
max_connections = max_connections or 2 ** 31
if not isinstance(max_connections, (int, long)) or max_connections < 0:
raise ValueError('"max_connections" must be a positive integer')
self.connection_class = connection_class
self.connection_kwargs = connection_kwargs
self.max_connections = max_connections
self.reset()
def __repr__(self):
return "%s<%s>" % (
type(self).__name__,
self.connection_class.description_format % self.connection_kwargs,
)
def reset(self):
self.pid = os.getpid()
self._created_connections = 0
self._available_connections = []
self._in_use_connections = set()
self._check_lock = threading.Lock()
def _checkpid(self):
if self.pid != os.getpid():
with self._check_lock:
if self.pid == os.getpid():
# another thread already did the work while we waited
# on the lock.
return
self.disconnect()
self.reset()
def get_connection(self, command_name, *keys, **options):
"""
Get a connection from pool.
"""
self._checkpid()
try:
connection = self._available_connections.pop()
except IndexError:
connection = self.make_connection()
self._in_use_connections.add(connection)
return connection
def make_connection(self):
"""
Create a new connection
"""
if self._created_connections >= self.max_connections:
raise ConnectionError("Too many connections")
self._created_connections += 1
return self.connection_class(**self.connection_kwargs)
def release(self, connection):
"""
Release the connection back to the pool.
"""
self._checkpid()
if connection.pid != self.pid:
return
self._in_use_connections.remove(connection)
self._available_connections.append(connection)
def disconnect(self):
"""
Disconnects all connections in the pool.
"""
all_conns = chain(self._available_connections,
self._in_use_connections)
for connection in all_conns:
connection.disconnect()
class BlockingConnectionPool(ConnectionPool):
"""
Thread-safe blocking connection pool::
>>> from ssdb.client import SSDB
>>> client = SSDB(connection_pool=BlockingConnectionPool())
It performs the same function as default
``:py:class: ~ssdb.connection.ConnectionPool`` implementation, in that, it
maintains a pool of reusable connections that can be shared by multiple ssdb
clients (safely across threads if required).
The difference is that, in the event that a client tries to get a connection
from the pool when all of connections are in use, rather than raising a
``:py:class: ~ssdb.exceptions.ConnectionError`` (as the default ``:py:class:
~ssdb.connection.ConnectionPool`` implementation does), it makes the client
wait ("blocks") for a specified number of seconds until a connection becomes
available/].
Use ``max_connections`` to increase / decrease the pool size::
>>> pool = BlockingConnectionPool(max_connections=10)
Use ``timeout`` to tell it either how many seconds to wait for a connection
to become available, or to block forever:
>>> #Block forever.
>>> pool = BlockingConnectionPool(timeout=None)
>>> #Raise a ``ConnectionError`` after five seconds if a connection is not
>>> #available
>>> pool = BlockingConnectionPool(timeout=5)
"""
def __init__(self, max_connections=50, timeout=20,
connection_class=Connection,queue_class=LifoQueue,
**connection_kwargs):
self.queue_class = queue_class
self.timeout = timeout
super(BlockingConnectionPool, self).__init__(
connection_class=connection_class, max_connections=max_connections,
**connection_kwargs)
def reset(self):
self.pid = os.getpid()
self._check_lock = threading.Lock()
# Create and fill up a thread safe queue with ``None`` values.
self.pool = self.queue_class(self.max_connections)
while True:
try:
self.pool.put_nowait(None)
except Full:
break
# Keep a list of actual connection instances so that we can
# disconnect them later.
self._connections = []
def make_connection(self):
"Make a fresh connection."
connection = self.connection_class(**self.connection_kwargs)
self._connections.append(connection)
return connection
def get_connection(self, command_name, *keys, **options):
"""
Get a connection, blocking for ``self.timeout`` until a connection
is available from the pool.
If the connection returned is ``None`` then creates a new connection.
Because we use a last-in first-out queue, the existing connections
(having been returned to the pool after the initial ``None`` values
were added) will be returned before ``None`` values. This means we only
create new connections when we need to, i.e.: the actual number of
connections will only increase in response to demand.
"""
# Make sure we haven't changed process.
self._checkpid()
# Try and get a connection from the pool. If one isn't available within
# self.timeout then raise a ``ConnectionError``.
connection = None
try:
connection = self.pool.get(block=True,timeout=self.timeout)
except Empty:
# Note that this is not caught by the redis client and will be
# raised unless handled by application code. If you want never to
raise ConnectionError("No connection available.")
# If the ``connection`` is actually ``None`` then that's a cue to make
# a new connection to add to the pool.
if connection is None:
connection = self.make_connection()
return connection
def release(self, connection):
"Releases the connection back to the pool."
# Make sure we haven't changed process.
self._checkpid()
if connection.pid != self.pid:
return
# Put the connection back into the pool.
try:
self.pool.put_nowait(connection)
except Full:
# perhaps the pool has been reset() after a fork? regardless,
# we don't want this connection
pass
def disconnect(self):
"Disconnects all connections in the pool."
for connection in self._connections:
connection.disconnect()
|
carryLabs/carrydb
|
test/carrydb/connection.py
|
Python
|
bsd-3-clause
| 23,711
|
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy import sparse
from sklearn.base import BaseEstimator
from sklearn.dummy import DummyClassifier
from sklearn.model_selection import LeaveOneOut, train_test_split
from sklearn.utils._testing import (assert_array_almost_equal,
assert_almost_equal,
assert_array_equal,
ignore_warnings)
from sklearn.utils.extmath import softmax
from sklearn.exceptions import NotFittedError
from sklearn.datasets import make_classification, make_blobs
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold, cross_val_predict
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.isotonic import IsotonicRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.metrics import brier_score_loss
from sklearn.calibration import CalibratedClassifierCV, _CalibratedClassifier
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
@pytest.fixture(scope="module")
def data():
X, y = make_classification(
n_samples=200, n_features=6, random_state=42
)
return X, y
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration(data, method, ensemble):
# Test calibration objects with isotonic and sigmoid
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
cal_clf = CalibratedClassifierCV(clf, cv=y.size + 1, ensemble=ensemble)
with pytest.raises(ValueError):
cal_clf.fit(X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
# Note that this fit overwrites the fit on the entire training
# set
cal_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_cal_clf = cal_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
cal_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf,
prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
cal_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_cal_clf, prob_pos_cal_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
cal_clf.fit(this_X_train, (y_train + 1) % 2, sample_weight=sw_train)
prob_pos_cal_clf_relabeled = cal_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_cal_clf,
1 - prob_pos_cal_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss((y_test + 1) % 2,
prob_pos_cal_clf_relabeled))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_bad_method(data, ensemble):
# Check only "isotonic" and "sigmoid" are accepted as methods
X, y = data
clf = LinearSVC()
clf_invalid_method = CalibratedClassifierCV(
clf, method="foo", ensemble=ensemble
)
with pytest.raises(ValueError):
clf_invalid_method.fit(X, y)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_regressor(data, ensemble):
# `base-estimator` should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
X, y = data
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), ensemble=ensemble)
with pytest.raises(RuntimeError):
clf_base_regressor.fit(X, y)
def test_calibration_default_estimator(data):
# Check base_estimator default is LinearSVC
X, y = data
calib_clf = CalibratedClassifierCV(cv=2)
calib_clf.fit(X, y)
base_est = calib_clf.calibrated_classifiers_[0].base_estimator
assert isinstance(base_est, LinearSVC)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_cv_splitter(data, ensemble):
# Check when `cv` is a CV splitter
X, y = data
splits = 5
kfold = KFold(n_splits=splits)
calib_clf = CalibratedClassifierCV(cv=kfold, ensemble=ensemble)
assert isinstance(calib_clf.cv, KFold)
assert calib_clf.cv.n_splits == splits
calib_clf.fit(X, y)
expected_n_clf = splits if ensemble else 1
assert len(calib_clf.calibrated_classifiers_) == expected_n_clf
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_sample_weight(data, method, ensemble):
n_samples = 100
X, y = data
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(
base_estimator, method=method, ensemble=ensemble
)
calibrated_clf.fit(X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert diff > 0.1
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
def test_parallel_execution(data, method, ensemble):
"""Test parallel calibration"""
X, y = data
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)
base_estimator = LinearSVC(random_state=42)
cal_clf_parallel = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=2, ensemble=ensemble
)
cal_clf_parallel.fit(X_train, y_train)
probs_parallel = cal_clf_parallel.predict_proba(X_test)
cal_clf_sequential = CalibratedClassifierCV(
base_estimator, method=method, n_jobs=1, ensemble=ensemble
)
cal_clf_sequential.fit(X_train, y_train)
probs_sequential = cal_clf_sequential.predict_proba(X_test)
assert_allclose(probs_parallel, probs_sequential)
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
@pytest.mark.parametrize('ensemble', [True, False])
# increase the number of RNG seeds to assess the statistical stability of this
# test:
@pytest.mark.parametrize('seed', range(2))
def test_calibration_multiclass(method, ensemble, seed):
def multiclass_brier(y_true, proba_pred, n_classes):
Y_onehot = np.eye(n_classes)[y_true]
return np.sum((Y_onehot - proba_pred) ** 2) / Y_onehot.shape[0]
# Test calibration for multiclass with classifier that implements
# only decision function.
clf = LinearSVC(random_state=7)
X, y = make_blobs(n_samples=500, n_features=100, random_state=seed,
centers=10, cluster_std=15.0)
# Use an unbalanced dataset by collapsing 8 clusters into one class
# to make the naive calibration based on a softmax more unlikely
# to work.
y[y > 2] = 2
n_classes = np.unique(y).shape[0]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
# Check probabilities sum to 1
assert_allclose(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that the dataset is not too trivial, otherwise it's hard
# to get interesting calibration data during the internal
# cross-validation loop.
assert 0.65 < clf.score(X_test, y_test) < 0.95
# Check that the accuracy of the calibrated model is never degraded
# too much compared to the original classifier.
assert cal_clf.score(X_test, y_test) > 0.95 * clf.score(X_test, y_test)
# Check that Brier loss of calibrated classifier is smaller than
# loss obtained by naively turning OvR decision function to
# probabilities via a softmax
uncalibrated_brier = \
multiclass_brier(y_test, softmax(clf.decision_function(X_test)),
n_classes=n_classes)
calibrated_brier = multiclass_brier(y_test, probas,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
clf = RandomForestClassifier(n_estimators=30, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
uncalibrated_brier = multiclass_brier(y_test, clf_probs,
n_classes=n_classes)
cal_clf = CalibratedClassifierCV(
clf, method=method, cv=5, ensemble=ensemble
)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
calibrated_brier = multiclass_brier(y_test, cal_clf_probs,
n_classes=n_classes)
assert calibrated_brier < 1.1 * uncalibrated_brier
def test_calibration_zero_probability():
# Test an edge case where _CalibratedClassifier avoids numerical errors
# in the multiclass normalization step if all the calibrators output
# are zero all at once for a given sample and instead fallback to uniform
# probabilities.
class ZeroCalibrator():
# This function is called from _CalibratedClassifier.predict_proba.
def predict(self, X):
return np.zeros(X.shape[0])
X, y = make_blobs(n_samples=50, n_features=10, random_state=7,
centers=10, cluster_std=15.0)
clf = DummyClassifier().fit(X, y)
calibrator = ZeroCalibrator()
cal_clf = _CalibratedClassifier(
base_estimator=clf, calibrators=[calibrator], classes=clf.classes_)
probas = cal_clf.predict_proba(X)
# Check that all probabilities are uniformly 1. / clf.n_classes_
assert_allclose(probas, 1. / clf.n_classes_)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
# Check error if clf not prefit
unfit_clf = CalibratedClassifierCV(clf, cv="prefit")
with pytest.raises(NotFittedError):
unfit_clf.fit(X_calib, y_calib)
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
cal_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = cal_clf.predict_proba(this_X_test)
y_pred = cal_clf.predict(this_X_test)
prob_pos_cal_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert (brier_score_loss(y_test, prob_pos_clf) >
brier_score_loss(y_test, prob_pos_cal_clf))
@pytest.mark.parametrize('method', ['sigmoid', 'isotonic'])
def test_calibration_ensemble_false(data, method):
# Test that `ensemble=False` is the same as using predictions from
# `cross_val_predict` to train calibrator.
X, y = data
clf = LinearSVC(random_state=7)
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3, ensemble=False)
cal_clf.fit(X, y)
cal_probas = cal_clf.predict_proba(X)
# Get probas manually
unbiased_preds = cross_val_predict(
clf, X, y, cv=3, method='decision_function'
)
if method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
else:
calibrator = _SigmoidCalibration()
calibrator.fit(unbiased_preds, y)
# Use `clf` fit on all data
clf.fit(X, y)
clf_df = clf.decision_function(X)
manual_probas = calibrator.predict(clf_df)
assert_allclose(cal_probas[:, 1], manual_probas)
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
with pytest.raises(ValueError):
_SigmoidCalibration().fit(np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert len(prob_true) == len(prob_pred)
assert len(prob_true) == 2
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
with pytest.raises(ValueError):
calibration_curve([1.1], [-0.1], normalize=False)
# test that quantiles work as expected
y_true2 = np.array([0, 0, 0, 0, 1, 1])
y_pred2 = np.array([0., 0.1, 0.2, 0.5, 0.9, 1.])
prob_true_quantile, prob_pred_quantile = calibration_curve(
y_true2, y_pred2, n_bins=2, strategy='quantile')
assert len(prob_true_quantile) == len(prob_pred_quantile)
assert len(prob_true_quantile) == 2
assert_almost_equal(prob_true_quantile, [0, 2 / 3])
assert_almost_equal(prob_pred_quantile, [0.1, 0.8])
# Check that error is raised when invalid strategy is selected
with pytest.raises(ValueError):
calibration_curve(y_true2, y_pred2, strategy='percentile')
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_nan_imputer(ensemble):
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', SimpleImputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(
clf, cv=2, method='isotonic', ensemble=ensemble
)
clf_c.fit(X, y)
clf_c.predict(X)
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_prob_sum(ensemble):
# Test that sum of probabilities is 1. A non-regression test for
# issue #7796
num_classes = 2
X, y = make_classification(n_samples=10, n_features=5,
n_classes=num_classes)
clf = LinearSVC(C=1.0, random_state=7)
clf_prob = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
clf_prob.fit(X, y)
probs = clf_prob.predict_proba(X)
assert_array_almost_equal(probs.sum(axis=1), np.ones(probs.shape[0]))
@pytest.mark.parametrize('ensemble', [True, False])
def test_calibration_less_classes(ensemble):
# Test to check calibration works fine when train set in a test-train
# split does not contain all classes
# Since this test uses LOO, at each iteration train set will not contain a
# class label
X = np.random.randn(10, 5)
y = np.arange(10)
clf = LinearSVC(C=1.0, random_state=7)
cal_clf = CalibratedClassifierCV(
clf, method="sigmoid", cv=LeaveOneOut(), ensemble=ensemble
)
cal_clf.fit(X, y)
for i, calibrated_classifier in \
enumerate(cal_clf.calibrated_classifiers_):
proba = calibrated_classifier.predict_proba(X)
if ensemble:
# Check that the unobserved class has proba=0
assert_array_equal(proba[:, i], np.zeros(len(y)))
# Check for all other classes proba>0
assert np.all(proba[:, :i] > 0)
assert np.all(proba[:, i + 1:] > 0)
else:
# Check `proba` are all 1/n_classes
assert np.allclose(proba, 1 / proba.shape[0])
@ignore_warnings(category=FutureWarning)
@pytest.mark.parametrize('X', [np.random.RandomState(42).randn(15, 5, 2),
np.random.RandomState(42).randn(15, 5, 2, 6)])
def test_calibration_accepts_ndarray(X):
"""Test that calibration accepts n-dimensional arrays as input"""
y = [1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0]
class MockTensorClassifier(BaseEstimator):
"""A toy estimator that accepts tensor inputs"""
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def decision_function(self, X):
# toy decision function that just needs to have the right shape:
return X.reshape(X.shape[0], -1).sum(axis=1)
calibrated_clf = CalibratedClassifierCV(MockTensorClassifier())
# we should be able to fit this classifier with no error
calibrated_clf.fit(X, y)
@pytest.fixture
def dict_data():
dict_data = [
{'state': 'NY', 'age': 'adult'},
{'state': 'TX', 'age': 'adult'},
{'state': 'VT', 'age': 'child'},
]
text_labels = [1, 0, 1]
return dict_data, text_labels
@pytest.fixture
def dict_data_pipeline(dict_data):
X, y = dict_data
pipeline_prefit = Pipeline([
('vectorizer', DictVectorizer()),
('clf', RandomForestClassifier())
])
return pipeline_prefit.fit(X, y)
def test_calibration_dict_pipeline(dict_data, dict_data_pipeline):
"""Test that calibration works in prefit pipeline with transformer
`X` is not array-like, sparse matrix or dataframe at the start.
See https://github.com/scikit-learn/scikit-learn/issues/8710
Also test it can predict without running into validation errors.
See https://github.com/scikit-learn/scikit-learn/issues/19637
"""
X, y = dict_data
clf = dict_data_pipeline
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
calib_clf.fit(X, y)
# Check attributes are obtained from fitted estimator
assert_array_equal(calib_clf.classes_, clf.classes_)
# Neither the pipeline nor the calibration meta-estimator
# expose the n_features_in_ check on this kind of data.
assert not hasattr(clf, 'n_features_in_')
assert not hasattr(calib_clf, 'n_features_in_')
# Ensure that no error is thrown with predict and predict_proba
calib_clf.predict(X)
calib_clf.predict_proba(X)
@pytest.mark.parametrize('clf, cv', [
pytest.param(LinearSVC(C=1), 2),
pytest.param(LinearSVC(C=1), 'prefit'),
])
def test_calibration_attributes(clf, cv):
# Check that `n_features_in_` and `classes_` attributes created properly
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
if cv == 'prefit':
clf = clf.fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv=cv)
calib_clf.fit(X, y)
if cv == 'prefit':
assert_array_equal(calib_clf.classes_, clf.classes_)
assert calib_clf.n_features_in_ == clf.n_features_in_
else:
classes = LabelEncoder().fit(y).classes_
assert_array_equal(calib_clf.classes_, classes)
assert calib_clf.n_features_in_ == X.shape[1]
def test_calibration_inconsistent_prefit_n_features_in():
# Check that `n_features_in_` from prefit base estimator
# is consistent with training set
X, y = make_classification(n_samples=10, n_features=5,
n_classes=2, random_state=7)
clf = LinearSVC(C=1).fit(X, y)
calib_clf = CalibratedClassifierCV(clf, cv='prefit')
msg = "X has 3 features, but LinearSVC is expecting 5 features as input."
with pytest.raises(ValueError, match=msg):
calib_clf.fit(X[:, :3], y)
# FIXME: remove in 1.1
def test_calibrated_classifier_cv_deprecation(data):
# Check that we raise the proper deprecation warning if accessing
# `calibrators_` from the `_CalibratedClassifier`.
X, y = data
calib_clf = CalibratedClassifierCV(cv=2).fit(X, y)
with pytest.warns(FutureWarning):
calibrators = calib_clf.calibrated_classifiers_[0].calibrators_
for clf1, clf2 in zip(
calibrators, calib_clf.calibrated_classifiers_[0].calibrators
):
assert clf1 is clf2
|
glemaitre/scikit-learn
|
sklearn/tests/test_calibration.py
|
Python
|
bsd-3-clause
| 23,376
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
def get_client_ip(request):
"""
Given an HTTP request, returns the related IP address.
"""
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR', None)
if x_forwarded_for:
ip = x_forwarded_for.split(',')[0]
else:
ip = request.META.get('REMOTE_ADDR')
return ip
|
reinbach/django-machina
|
machina/apps/forum_conversation/utils.py
|
Python
|
bsd-3-clause
| 375
|
from __future__ import absolute_import
import uncertainties as U
from .. import asrootpy
__all__ = [
'as_ufloat',
'correlated_values',
]
def as_ufloat(roorealvar):
"""
Cast a `RooRealVar` to an `uncertainties.ufloat`
"""
if isinstance(roorealvar, (U.AffineScalarFunc, U.Variable)):
return roorealvar
return U.ufloat((roorealvar.getVal(), roorealvar.getError()))
def correlated_values(param_names, roofitresult):
"""
Return symbolic values from a `RooFitResult` taking into account covariance
This is useful for numerically computing the uncertainties for expressions
using correlated values arising from a fit.
Parameters
----------
param_names: list of strings
A list of parameters to extract from the result. The order of the names
is the order of the return value.
roofitresult : RooFitResult
A RooFitResult from a fit.
Returns
-------
list of correlated values from the uncertainties package.
Examples
--------
.. sourcecode:: python
# Fit a pdf to a histogram
pdf = some_roofit_pdf_with_variables("f(x, a, b, c)")
fitresult = pdf.fitTo(histogram, ROOT.RooFit.Save())
a, b, c = correlated_values(["a", "b", "c"], fitresult)
# Arbitrary math expression according to what the `uncertainties`
# package supports, automatically computes correct error propagation
sum_value = a + b + c
value, error = sum_value.nominal_value, sum_value.std_dev()
"""
pars = roofitresult.floatParsFinal()
#pars.Print()
pars = [pars[i] for i in range(pars.getSize())]
parnames = [p.GetName() for p in pars]
values = [(p.getVal(), p.getError()) for p in pars]
#values = [as_ufloat(p) for p in pars]
matrix = asrootpy(roofitresult.correlationMatrix()).to_numpy()
uvalues = U.correlated_values_norm(values, matrix.tolist())
uvalues = dict((n, v) for n, v in zip(parnames, uvalues))
assert all(n in uvalues for n in parnames), (
"name {0} isn't in parameter list {1}".format(n, parnames))
# Return a tuple in the order it was asked for
return tuple(uvalues[n] for n in param_names)
|
ndawe/rootpy
|
rootpy/stats/correlated_values.py
|
Python
|
bsd-3-clause
| 2,219
|
# "Copyright (c) 2000-2003 The Regents of the University of California.
# All rights reserved.
#
# Permission to use, copy, modify, and distribute this software and its
# documentation for any purpose, without fee, and without written agreement
# is hereby granted, provided that the above copyright notice, the following
# two paragraphs and the author appear in all copies of this software.
#
# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY FOR
# DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT
# OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE UNIVERSITY
# OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,
# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS
# ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION TO
# PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS."
#
# @author Kamin Whitehouse
#
import sys, math, Queue
import pytos.tools.Drain as Drain
import pytos.Comm as Comm
from struct import *
class Straw( object ) :
def __init__( self , app ) :
self.app=app
self.linkLatency = .1
if "StrawM" not in app._moduleNames:
raise Exception("The StrawM module is not compiled into the application.")
def read(self, nodeID, strawID, start, size):
data=[] #store the data in here
response=None
while response==None:
print "pinging node %d" % nodeID
response = self.app.StrawM.msgDataSize.peek(address=nodeID, timeout=3) #find num bytes/msg
dataSize = response[0].value['value'].value
numHops = self.app.enums.DRAIN_MAX_TTL - response[0].getParentMsg(self.app.enums.AM_DRAINMSG).ttl
self.app.StrawM.sendPeriod.poke(self.linkLatency * numHops * 1000, address=nodeID, responseDesired=False)
msgs = [0 for i in range(int(math.ceil(size/float(dataSize))))] #keep track of straw msgs in here
msgQueue = Comm.MessageQueue(10)
Drain.getDrainObject(self.app)[0].register(self.app.msgs.StrawMsg, msgQueue)
print "Sucking %d bytes from node %d through Straw %d:" % (size, nodeID, strawID)
while msgs.count(1) < len(msgs):
subStart = msgs.index(0) * dataSize
try:
subSize = min(size, (msgs.index(1, subStart)*dataSize - subStart) )
except:
subSize = size - subStart
response = []
#while response == []:
self.app.StrawM.read(strawID, subStart, subSize, address=nodeID)
sys.stdout.write("%d-%d: " % (subStart, subStart+subSize))
numPrintedChars=0
while True :
try:
(addr, msg) = msgQueue.get(block=True, timeout=self.linkLatency * numHops * 4)
if msg.parentMsg.source == nodeID :#and msgs[msg.startIndex//dataSize] == 0:
msgs[msg.startIndex//dataSize] = 1
data[msg.startIndex:msg.startIndex+dataSize-1] = msg.data[:]
strg = ""
for i in range(numPrintedChars) :
strg += "\b"
strg += "%s/%s" % (msgs.count(1),len(msgs))
sys.stdout.write(strg)
sys.stdout.flush()
numPrintedChars = len(strg)-numPrintedChars
except Queue.Empty:
print ""
break
#now, pack the data so that it can be easily unpacked
for i in range(len(data)):
data[i] = pack('B',data[i])
return ''.join(data[0:size])
|
ekiwi/tinyos-1.x
|
contrib/ucb/tools/python/pytos/tools/Straw.py
|
Python
|
bsd-3-clause
| 3,841
|
"""
Caching framework.
This package defines set of cache backends that all conform to a simple API.
In a nutshell, a cache is a set of values -- which can be any object that
may be pickled -- identified by string keys. For the complete API, see
the abstract BaseCache class in django.core.cache.backends.base.
Client code should not access a cache backend directly; instead it should
either use the "cache" variable made available here, or it should use the
get_cache() function made available here. get_cache() takes a backend URI
(e.g. "memcached://127.0.0.1:11211/") and returns an instance of a backend
cache class.
See docs/topics/cache.txt for information on the public API.
"""
from django.conf import settings
from django.core import signals
from django.core.cache.backends.base import (
InvalidCacheBackendError, CacheKeyWarning, BaseCache)
from django.core.exceptions import ImproperlyConfigured
from django.utils import importlib
try:
# The mod_python version is more efficient, so try importing it first.
from mod_python.util import parse_qsl
except ImportError:
try:
# Python 2.6 and greater
from urlparse import parse_qsl
except ImportError:
# Python 2.5, 2.4. Works on Python 2.6 but raises
# PendingDeprecationWarning
from cgi import parse_qsl
__all__ = [
'get_cache', 'cache', 'DEFAULT_CACHE_ALIAS'
]
# Name for use in settings file --> name of module in "backends" directory.
# Any backend scheme that is not in this dictionary is treated as a Python
# import path to a custom backend.
BACKENDS = {
'memcached': 'memcached',
'locmem': 'locmem',
'file': 'filebased',
'db': 'db',
'dummy': 'dummy',
}
DEFAULT_CACHE_ALIAS = 'default'
def parse_backend_uri(backend_uri):
"""
Converts the "backend_uri" into a cache scheme ('db', 'memcached', etc), a
host and any extra params that are required for the backend. Returns a
(scheme, host, params) tuple.
"""
if backend_uri.find(':') == -1:
raise InvalidCacheBackendError("Backend URI must start with scheme://")
scheme, rest = backend_uri.split(':', 1)
if not rest.startswith('//'):
raise InvalidCacheBackendError("Backend URI must start with scheme://")
host = rest[2:]
qpos = rest.find('?')
if qpos != -1:
params = dict(parse_qsl(rest[qpos+1:]))
host = rest[2:qpos]
else:
params = {}
if host.endswith('/'):
host = host[:-1]
return scheme, host, params
if not settings.CACHES:
import warnings
warnings.warn(
"settings.CACHE_* is deprecated; use settings.CACHES instead.",
PendingDeprecationWarning
)
# Mapping for new-style cache backend api
backend_classes = {
'memcached': 'memcached.CacheClass',
'locmem': 'locmem.LocMemCache',
'file': 'filebased.FileBasedCache',
'db': 'db.DatabaseCache',
'dummy': 'dummy.DummyCache',
}
engine, host, params = parse_backend_uri(settings.CACHE_BACKEND)
if engine in backend_classes:
engine = 'django.core.cache.backends.%s' % backend_classes[engine]
defaults = {
'BACKEND': engine,
'LOCATION': host,
}
defaults.update(params)
settings.CACHES[DEFAULT_CACHE_ALIAS] = defaults
if DEFAULT_CACHE_ALIAS not in settings.CACHES:
raise ImproperlyConfigured("You must define a '%s' cache" % DEFAULT_CACHE_ALIAS)
def parse_backend_conf(backend, **kwargs):
"""
Helper function to parse the backend configuration
that doesn't use the URI notation.
"""
# Try to get the CACHES entry for the given backend name first
conf = settings.CACHES.get(backend, None)
if conf is not None:
args = conf.copy()
backend = args.pop('BACKEND')
location = args.pop('LOCATION', '')
return backend, location, args
else:
# Trying to import the given backend, in case it's a dotted path
mod_path, cls_name = backend.rsplit('.', 1)
try:
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError):
raise InvalidCacheBackendError("Could not find backend '%s'" % backend)
location = kwargs.pop('LOCATION', '')
return backend, location, kwargs
raise InvalidCacheBackendError(
"Couldn't find a cache backend named '%s'" % backend)
def get_cache(backend, **kwargs):
"""
Function to load a cache backend dynamically. This is flexible by design
to allow different use cases:
To load a backend with the old URI-based notation::
cache = get_cache('locmem://')
To load a backend that is pre-defined in the settings::
cache = get_cache('default')
To load a backend with its dotted import path,
including arbitrary options::
cache = get_cache('django.core.cache.backends.memcached.MemcachedCache', **{
'LOCATION': '127.0.0.1:11211', 'TIMEOUT': 30,
})
"""
try:
if '://' in backend:
# for backwards compatibility
backend, location, params = parse_backend_uri(backend)
if backend in BACKENDS:
backend = 'django.core.cache.backends.%s' % BACKENDS[backend]
params.update(kwargs)
mod = importlib.import_module(backend)
backend_cls = mod.CacheClass
else:
backend, location, params = parse_backend_conf(backend, **kwargs)
mod_path, cls_name = backend.rsplit('.', 1)
mod = importlib.import_module(mod_path)
backend_cls = getattr(mod, cls_name)
except (AttributeError, ImportError), e:
raise InvalidCacheBackendError(
"Could not find backend '%s': %s" % (backend, e))
return backend_cls(location, params)
cache = get_cache(DEFAULT_CACHE_ALIAS)
# Some caches -- python-memcached in particular -- need to do a cleanup at the
# end of a request cycle. If the cache provides a close() method, wire it up
# here.
if hasattr(cache, 'close'):
signals.request_finished.connect(cache.close)
|
heracek/django-nonrel
|
django/core/cache/__init__.py
|
Python
|
bsd-3-clause
| 6,144
|
# -*- coding: utf-8 -*-
"""
wakatime.compat
~~~~~~~~~~~~~~~
For working with Python2 and Python3.
:copyright: (c) 2014 Alan Hamlett.
:license: BSD, see LICENSE for more details.
"""
import codecs
import os
import platform
import subprocess
import sys
is_py2 = (sys.version_info[0] == 2)
is_py3 = (sys.version_info[0] == 3)
is_win = platform.system() == 'Windows'
if is_py2: # pragma: nocover
def u(text):
if text is None:
return None
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
try:
return unicode(text)
except:
return text.decode('utf-8', 'replace')
open = codecs.open
basestring = basestring
elif is_py3: # pragma: nocover
def u(text):
if text is None:
return None
if isinstance(text, bytes):
try:
return text.decode('utf-8')
except:
try:
return text.decode(sys.getdefaultencoding())
except:
pass
try:
return str(text)
except:
return text.decode('utf-8', 'replace')
open = open
basestring = (str, bytes)
try:
from importlib import import_module
except ImportError: # pragma: nocover
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import.
It specifies the package to use as the anchor point from which to
resolve the relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' "
"argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
try:
from .packages import simplejson as json
except (ImportError, SyntaxError): # pragma: nocover
import json
class Popen(subprocess.Popen):
"""Patched Popen to prevent opening cmd window on Windows platform."""
def __init__(self, *args, **kwargs):
startupinfo = kwargs.get('startupinfo')
if is_win or True:
try:
startupinfo = startupinfo or subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
except AttributeError:
pass
kwargs['startupinfo'] = startupinfo
if 'env' not in kwargs:
kwargs['env'] = os.environ.copy()
kwargs['env']['LANG'] = 'en-US' if is_win else 'en_US.UTF-8'
subprocess.Popen.__init__(self, *args, **kwargs)
|
wakatime/komodo-wakatime
|
components/wakatime/compat.py
|
Python
|
bsd-3-clause
| 3,553
|
import sys
import unittest
# Run from the root dir
sys.path.insert(0, '.')
from pycparser import c_parser, c_generator, c_ast
_c_parser = c_parser.CParser(
lex_optimize=False,
yacc_debug=True,
yacc_optimize=False,
yacctab='yacctab')
def compare_asts(ast1, ast2):
if type(ast1) != type(ast2):
return False
if isinstance(ast1, tuple) and isinstance(ast2, tuple):
if ast1[0] != ast2[0]:
return False
ast1 = ast1[1]
ast2 = ast2[1]
return compare_asts(ast1, ast2)
for attr in ast1.attr_names:
if getattr(ast1, attr) != getattr(ast2, attr):
return False
for i, c1 in enumerate(ast1.children()):
if compare_asts(c1, ast2.children()[i]) == False:
return False
return True
def parse_to_ast(src):
return _c_parser.parse(src)
class TestFunctionDeclGeneration(unittest.TestCase):
class _FuncDeclVisitor(c_ast.NodeVisitor):
def __init__(self):
self.stubs = []
def visit_FuncDecl(self, node):
gen = c_generator.CGenerator()
self.stubs.append(gen.visit(node))
def test_partial_funcdecl_generation(self):
src = r'''
void noop(void);
void *something(void *thing);
int add(int x, int y);'''
ast = parse_to_ast(src)
v = TestFunctionDeclGeneration._FuncDeclVisitor()
v.visit(ast)
self.assertEqual(len(v.stubs), 3)
self.assertTrue(r'void noop(void)' in v.stubs)
self.assertTrue(r'void *something(void *thing)' in v.stubs)
self.assertTrue(r'int add(int x, int y)' in v.stubs)
class TestCtoC(unittest.TestCase):
def _run_c_to_c(self, src):
ast = parse_to_ast(src)
generator = c_generator.CGenerator()
return generator.visit(ast)
def _assert_ctoc_correct(self, src):
""" Checks that the c2c translation was correct by parsing the code
generated by c2c for src and comparing the AST with the original
AST.
"""
src2 = self._run_c_to_c(src)
self.assertTrue(compare_asts(parse_to_ast(src), parse_to_ast(src2)),
src2)
def test_trivial_decls(self):
self._assert_ctoc_correct('int a;')
self._assert_ctoc_correct('int b, a;')
self._assert_ctoc_correct('int c, b, a;')
def test_complex_decls(self):
self._assert_ctoc_correct('int** (*a)(void);')
self._assert_ctoc_correct('int** (*a)(void*, int);')
self._assert_ctoc_correct('int (*b)(char * restrict k, float);')
self._assert_ctoc_correct('int test(const char* const* arg);')
self._assert_ctoc_correct('int test(const char** const arg);')
#s = 'int test(const char* const* arg);'
#parse_to_ast(s).show()
def test_ternary(self):
self._assert_ctoc_correct('''
int main(void)
{
int a, b;
(a == 0) ? (b = 1) : (b = 2);
}''')
def test_casts(self):
self._assert_ctoc_correct(r'''
int main() {
int b = (int) f;
int c = (int*) f;
}''')
def test_initlist(self):
self._assert_ctoc_correct('int arr[] = {1, 2, 3};')
def test_exprs(self):
self._assert_ctoc_correct('''
int main(void)
{
int a;
int b = a++;
int c = ++a;
int d = a--;
int e = --a;
}''')
def test_statements(self):
# note two minuses here
self._assert_ctoc_correct(r'''
int main() {
int a;
a = 5;
;
b = - - a;
return a;
}''')
def test_casts(self):
self._assert_ctoc_correct(r'''
int main() {
int a = (int) b + 8;
int t = (int) c;
}
''')
def test_struct_decl(self):
self._assert_ctoc_correct(r'''
typedef struct node_t {
struct node_t* next;
int data;
} node;
''')
def test_krstyle(self):
self._assert_ctoc_correct(r'''
int main(argc, argv)
int argc;
char** argv;
{
return 0;
}
''')
def test_switchcase(self):
self._assert_ctoc_correct(r'''
int main() {
switch (myvar) {
case 10:
{
k = 10;
p = k + 1;
break;
}
case 20:
case 30:
return 20;
default:
break;
}
}
''')
def test_nest_initializer_list(self):
self._assert_ctoc_correct(r'''
int main()
{
int i[1][1] = { { 1 } };
}''')
def test_nest_named_initializer(self):
self._assert_ctoc_correct(r'''struct test
{
int i;
struct test_i_t
{
int k;
} test_i;
int j;
};
struct test test_var = {.i = 0, .test_i = {.k = 1}, .j = 2};
''')
def test_expr_list_in_initializer_list(self):
self._assert_ctoc_correct(r'''
int main()
{
int i[1] = { (1, 2) };
}''')
def test_issue36(self):
self._assert_ctoc_correct(r'''
int main() {
}''')
def test_issue37(self):
self._assert_ctoc_correct(r'''
int main(void)
{
unsigned size;
size = sizeof(size);
return 0;
}''')
def test_issue83(self):
self._assert_ctoc_correct(r'''
void x(void) {
int i = (9, k);
}
''')
def test_issue84(self):
self._assert_ctoc_correct(r'''
void x(void) {
for (int i = 0;;)
i;
}
''')
def test_exprlist_with_semi(self):
self._assert_ctoc_correct(r'''
void x() {
if (i < j)
tmp = C[i], C[i] = C[j], C[j] = tmp;
if (i <= j)
i++, j--;
}
''')
def test_exprlist_with_subexprlist(self):
self._assert_ctoc_correct(r'''
void x() {
(a = b, (b = c, c = a));
}
''')
def test_comma_operator_funcarg(self):
self._assert_ctoc_correct(r'''
void f(int x) { return x; }
int main(void) { f((1, 2)); return 0; }
''')
def test_comma_op_in_ternary(self):
self._assert_ctoc_correct(r'''
void f() {
(0, 0) ? (0, 0) : (0, 0);
}
''')
def test_comma_op_assignment(self):
self._assert_ctoc_correct(r'''
void f() {
i = (a, b, c);
}
''')
def test_pragma(self):
self._assert_ctoc_correct(r'''
#pragma foo
void f() {
#pragma bar
i = (a, b, c);
}
''')
if __name__ == "__main__":
unittest.main()
|
CtheSky/pycparser
|
tests/test_c_generator.py
|
Python
|
bsd-3-clause
| 7,444
|
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com> (main author)
# Mathieu Blondel (partial_fit support)
#
# License: BSD 3 clause
"""Classification, regression and One-Class SVM using Stochastic Gradient
Descent (SGD).
"""
import numpy as np
import warnings
from abc import ABCMeta, abstractmethod
from joblib import Parallel
from ..base import clone, is_classifier
from ._base import LinearClassifierMixin, SparseCoefMixin
from ._base import make_dataset
from ..base import BaseEstimator, RegressorMixin, OutlierMixin
from ..utils import check_random_state
from ..utils.metaestimators import available_if
from ..utils.extmath import safe_sparse_dot
from ..utils.multiclass import _check_partial_fit_first_call
from ..utils.validation import check_is_fitted, _check_sample_weight
from ..utils.fixes import delayed
from ..exceptions import ConvergenceWarning
from ..model_selection import StratifiedShuffleSplit, ShuffleSplit
from ._sgd_fast import _plain_sgd
from ..utils import compute_class_weight
from ._sgd_fast import Hinge
from ._sgd_fast import SquaredHinge
from ._sgd_fast import Log
from ._sgd_fast import ModifiedHuber
from ._sgd_fast import SquaredLoss
from ._sgd_fast import Huber
from ._sgd_fast import EpsilonInsensitive
from ._sgd_fast import SquaredEpsilonInsensitive
LEARNING_RATE_TYPES = {
"constant": 1,
"optimal": 2,
"invscaling": 3,
"adaptive": 4,
"pa1": 5,
"pa2": 6,
}
PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3}
DEFAULT_EPSILON = 0.1
# Default value of ``epsilon`` parameter.
MAX_INT = np.iinfo(np.int32).max
class _ValidationScoreCallback:
"""Callback for early stopping based on validation score"""
def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None):
self.estimator = clone(estimator)
self.estimator.t_ = 1 # to pass check_is_fitted
if classes is not None:
self.estimator.classes_ = classes
self.X_val = X_val
self.y_val = y_val
self.sample_weight_val = sample_weight_val
def __call__(self, coef, intercept):
est = self.estimator
est.coef_ = coef.reshape(1, -1)
est.intercept_ = np.atleast_1d(intercept)
return est.score(self.X_val, self.y_val, self.sample_weight_val)
class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta):
"""Base class for SGD classification and regression."""
def __init__(
self,
loss,
*,
penalty="l2",
alpha=0.0001,
C=1.0,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=0.1,
random_state=None,
learning_rate="optimal",
eta0=0.0,
power_t=0.5,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=False,
average=False,
):
self.loss = loss
self.penalty = penalty
self.learning_rate = learning_rate
self.epsilon = epsilon
self.alpha = alpha
self.C = C
self.l1_ratio = l1_ratio
self.fit_intercept = fit_intercept
self.shuffle = shuffle
self.random_state = random_state
self.verbose = verbose
self.eta0 = eta0
self.power_t = power_t
self.early_stopping = early_stopping
self.validation_fraction = validation_fraction
self.n_iter_no_change = n_iter_no_change
self.warm_start = warm_start
self.average = average
self.max_iter = max_iter
self.tol = tol
@abstractmethod
def fit(self, X, y):
"""Fit model."""
def _validate_params(self, for_partial_fit=False):
"""Validate input params."""
if not isinstance(self.shuffle, bool):
raise ValueError("shuffle must be either True or False")
if not isinstance(self.early_stopping, bool):
raise ValueError("early_stopping must be either True or False")
if self.early_stopping and for_partial_fit:
raise ValueError("early_stopping should be False with partial_fit")
if self.max_iter is not None and self.max_iter <= 0:
raise ValueError("max_iter must be > zero. Got %f" % self.max_iter)
if not (0.0 <= self.l1_ratio <= 1.0):
raise ValueError("l1_ratio must be in [0, 1]")
if not isinstance(self, SGDOneClassSVM) and self.alpha < 0.0:
raise ValueError("alpha must be >= 0")
if self.n_iter_no_change < 1:
raise ValueError("n_iter_no_change must be >= 1")
if not (0.0 < self.validation_fraction < 1.0):
raise ValueError("validation_fraction must be in range (0, 1)")
if self.learning_rate in ("constant", "invscaling", "adaptive"):
if self.eta0 <= 0.0:
raise ValueError("eta0 must be > 0")
if self.learning_rate == "optimal" and self.alpha == 0:
raise ValueError(
"alpha must be > 0 since "
"learning_rate is 'optimal'. alpha is used "
"to compute the optimal learning rate."
)
# raises ValueError if not registered
self._get_penalty_type(self.penalty)
self._get_learning_rate_type(self.learning_rate)
if self.loss not in self.loss_functions:
raise ValueError("The loss %s is not supported. " % self.loss)
if self.loss == "squared_loss":
warnings.warn(
"The loss 'squared_loss' was deprecated in v1.0 and will be "
"removed in version 1.2. Use `loss='squared_error'` which is "
"equivalent.",
FutureWarning,
)
def _get_loss_function(self, loss):
"""Get concrete ``LossFunction`` object for str ``loss``."""
try:
loss_ = self.loss_functions[loss]
loss_class, args = loss_[0], loss_[1:]
if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"):
args = (self.epsilon,)
return loss_class(*args)
except KeyError as e:
raise ValueError("The loss %s is not supported. " % loss) from e
def _get_learning_rate_type(self, learning_rate):
try:
return LEARNING_RATE_TYPES[learning_rate]
except KeyError as e:
raise ValueError(
"learning rate %s is not supported. " % learning_rate
) from e
def _get_penalty_type(self, penalty):
penalty = str(penalty).lower()
try:
return PENALTY_TYPES[penalty]
except KeyError as e:
raise ValueError("Penalty %s is not supported. " % penalty) from e
def _allocate_parameter_mem(
self, n_classes, n_features, coef_init=None, intercept_init=None, one_class=0
):
"""Allocate mem for parameters; initialize if provided."""
if n_classes > 2:
# allocate coef_ for multi-class
if coef_init is not None:
coef_init = np.asarray(coef_init, order="C")
if coef_init.shape != (n_classes, n_features):
raise ValueError("Provided ``coef_`` does not match dataset. ")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(
(n_classes, n_features), dtype=np.float64, order="C"
)
# allocate intercept_ for multi-class
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, order="C")
if intercept_init.shape != (n_classes,):
raise ValueError("Provided intercept_init does not match dataset.")
self.intercept_ = intercept_init
else:
self.intercept_ = np.zeros(n_classes, dtype=np.float64, order="C")
else:
# allocate coef_
if coef_init is not None:
coef_init = np.asarray(coef_init, dtype=np.float64, order="C")
coef_init = coef_init.ravel()
if coef_init.shape != (n_features,):
raise ValueError("Provided coef_init does not match dataset.")
self.coef_ = coef_init
else:
self.coef_ = np.zeros(n_features, dtype=np.float64, order="C")
# allocate intercept_
if intercept_init is not None:
intercept_init = np.asarray(intercept_init, dtype=np.float64)
if intercept_init.shape != (1,) and intercept_init.shape != ():
raise ValueError("Provided intercept_init does not match dataset.")
if one_class:
self.offset_ = intercept_init.reshape(
1,
)
else:
self.intercept_ = intercept_init.reshape(
1,
)
else:
if one_class:
self.offset_ = np.zeros(1, dtype=np.float64, order="C")
else:
self.intercept_ = np.zeros(1, dtype=np.float64, order="C")
# initialize average parameters
if self.average > 0:
self._standard_coef = self.coef_
self._average_coef = np.zeros(self.coef_.shape, dtype=np.float64, order="C")
if one_class:
self._standard_intercept = 1 - self.offset_
else:
self._standard_intercept = self.intercept_
self._average_intercept = np.zeros(
self._standard_intercept.shape, dtype=np.float64, order="C"
)
def _make_validation_split(self, y):
"""Split the dataset between training set and validation set.
Parameters
----------
y : ndarray of shape (n_samples, )
Target values.
Returns
-------
validation_mask : ndarray of shape (n_samples, )
Equal to 1 on the validation set, 0 on the training set.
"""
n_samples = y.shape[0]
validation_mask = np.zeros(n_samples, dtype=np.uint8)
if not self.early_stopping:
# use the full set for training, with an empty validation set
return validation_mask
if is_classifier(self):
splitter_type = StratifiedShuffleSplit
else:
splitter_type = ShuffleSplit
cv = splitter_type(
test_size=self.validation_fraction, random_state=self.random_state
)
idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y))
if idx_train.shape[0] == 0 or idx_val.shape[0] == 0:
raise ValueError(
"Splitting %d samples into a train set and a validation set "
"with validation_fraction=%r led to an empty set (%d and %d "
"samples). Please either change validation_fraction, increase "
"number of samples, or disable early_stopping."
% (
n_samples,
self.validation_fraction,
idx_train.shape[0],
idx_val.shape[0],
)
)
validation_mask[idx_val] = 1
return validation_mask
def _make_validation_score_cb(
self, validation_mask, X, y, sample_weight, classes=None
):
if not self.early_stopping:
return None
return _ValidationScoreCallback(
self,
X[validation_mask],
y[validation_mask],
sample_weight[validation_mask],
classes=classes,
)
def _prepare_fit_binary(est, y, i):
"""Initialization for fit_binary.
Returns y, coef, intercept, average_coef, average_intercept.
"""
y_i = np.ones(y.shape, dtype=np.float64, order="C")
y_i[y != est.classes_[i]] = -1.0
average_intercept = 0
average_coef = None
if len(est.classes_) == 2:
if not est.average:
coef = est.coef_.ravel()
intercept = est.intercept_[0]
else:
coef = est._standard_coef.ravel()
intercept = est._standard_intercept[0]
average_coef = est._average_coef.ravel()
average_intercept = est._average_intercept[0]
else:
if not est.average:
coef = est.coef_[i]
intercept = est.intercept_[i]
else:
coef = est._standard_coef[i]
intercept = est._standard_intercept[i]
average_coef = est._average_coef[i]
average_intercept = est._average_intercept[i]
return y_i, coef, intercept, average_coef, average_intercept
def fit_binary(
est,
i,
X,
y,
alpha,
C,
learning_rate,
max_iter,
pos_weight,
neg_weight,
sample_weight,
validation_mask=None,
random_state=None,
):
"""Fit a single binary classifier.
The i'th class is considered the "positive" class.
Parameters
----------
est : Estimator object
The estimator to fit
i : int
Index of the positive class
X : numpy array or sparse matrix of shape [n_samples,n_features]
Training data
y : numpy array of shape [n_samples, ]
Target values
alpha : float
The regularization parameter
C : float
Maximum step size for passive aggressive
learning_rate : str
The learning rate. Accepted values are 'constant', 'optimal',
'invscaling', 'pa1' and 'pa2'.
max_iter : int
The maximum number of iterations (epochs)
pos_weight : float
The weight of the positive class
neg_weight : float
The weight of the negative class
sample_weight : numpy array of shape [n_samples, ]
The weight of each sample
validation_mask : numpy array of shape [n_samples, ], default=None
Precomputed validation mask in case _fit_binary is called in the
context of a one-vs-rest reduction.
random_state : int, RandomState instance, default=None
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
# if average is not true, average_coef, and average_intercept will be
# unused
y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary(
est, y, i
)
assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0]
random_state = check_random_state(random_state)
dataset, intercept_decay = make_dataset(
X, y_i, sample_weight, random_state=random_state
)
penalty_type = est._get_penalty_type(est.penalty)
learning_rate_type = est._get_learning_rate_type(learning_rate)
if validation_mask is None:
validation_mask = est._make_validation_split(y_i)
classes = np.array([-1, 1], dtype=y_i.dtype)
validation_score_cb = est._make_validation_score_cb(
validation_mask, X, y_i, sample_weight, classes=classes
)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(MAX_INT)
tol = est.tol if est.tol is not None else -np.inf
coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd(
coef,
intercept,
average_coef,
average_intercept,
est.loss_function_,
penalty_type,
alpha,
C,
est.l1_ratio,
dataset,
validation_mask,
est.early_stopping,
validation_score_cb,
int(est.n_iter_no_change),
max_iter,
tol,
int(est.fit_intercept),
int(est.verbose),
int(est.shuffle),
seed,
pos_weight,
neg_weight,
learning_rate_type,
est.eta0,
est.power_t,
0,
est.t_,
intercept_decay,
est.average,
)
if est.average:
if len(est.classes_) == 2:
est._average_intercept[0] = average_intercept
else:
est._average_intercept[i] = average_intercept
return coef, intercept, n_iter_
class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta):
# TODO: Remove squared_loss in v1.2
loss_functions = {
"hinge": (Hinge, 1.0),
"squared_hinge": (SquaredHinge, 1.0),
"perceptron": (Hinge, 0.0),
"log": (Log,),
"modified_huber": (ModifiedHuber,),
"squared_error": (SquaredLoss,),
"squared_loss": (SquaredLoss,),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),
}
@abstractmethod
def __init__(
self,
loss="hinge",
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=DEFAULT_EPSILON,
n_jobs=None,
random_state=None,
learning_rate="optimal",
eta0=0.0,
power_t=0.5,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
warm_start=warm_start,
average=average,
)
self.class_weight = class_weight
self.n_jobs = n_jobs
def _partial_fit(
self,
X,
y,
alpha,
C,
loss,
learning_rate,
max_iter,
classes,
sample_weight,
coef_init,
intercept_init,
):
first_call = not hasattr(self, "classes_")
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
reset=first_call,
)
n_samples, n_features = X.shape
_check_partial_fit_first_call(self, classes)
n_classes = self.classes_.shape[0]
# Allocate datastructures from input arguments
self._expanded_class_weight = compute_class_weight(
self.class_weight, classes=self.classes_, y=y
)
sample_weight = _check_sample_weight(sample_weight, X)
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(
n_classes, n_features, coef_init, intercept_init
)
elif n_features != self.coef_.shape[-1]:
raise ValueError(
"Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1])
)
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
if n_classes > 2:
self._fit_multiclass(
X,
y,
alpha=alpha,
C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
elif n_classes == 2:
self._fit_binary(
X,
y,
alpha=alpha,
C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
else:
raise ValueError(
"The number of classes has to be greater than one; got %d class"
% n_classes
)
return self
def _fit(
self,
X,
y,
alpha,
C,
loss,
learning_rate,
coef_init=None,
intercept_init=None,
sample_weight=None,
):
self._validate_params()
if hasattr(self, "classes_"):
# delete the attribute otherwise _partial_fit thinks it's not the first call
delattr(self, "classes_")
# labels can be encoded as float, int, or string literals
# np.unique sorts in asc order; largest class id is positive class
y = self._validate_data(y=y)
classes = np.unique(y)
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
if self.average > 0:
self._standard_coef = self.coef_
self._standard_intercept = self.intercept_
self._average_coef = None
self._average_intercept = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(
X,
y,
alpha,
C,
loss,
learning_rate,
self.max_iter,
classes,
sample_weight,
coef_init,
intercept_init,
)
if (
self.tol is not None
and self.tol > -np.inf
and self.n_iter_ == self.max_iter
):
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
return self
def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter):
"""Fit a binary classifier on X and y."""
coef, intercept, n_iter_ = fit_binary(
self,
1,
X,
y,
alpha,
C,
learning_rate,
max_iter,
self._expanded_class_weight[1],
self._expanded_class_weight[0],
sample_weight,
random_state=self.random_state,
)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
# need to be 2d
if self.average > 0:
if self.average <= self.t_ - 1:
self.coef_ = self._average_coef.reshape(1, -1)
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef.reshape(1, -1)
self._standard_intercept = np.atleast_1d(intercept)
self.intercept_ = self._standard_intercept
else:
self.coef_ = coef.reshape(1, -1)
# intercept is a float, need to convert it to an array of length 1
self.intercept_ = np.atleast_1d(intercept)
def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter):
"""Fit a multi-class classifier by combining binary classifiers
Each binary classifier predicts one class versus all others. This
strategy is called OvA (One versus All) or OvR (One versus Rest).
"""
# Precompute the validation split using the multiclass labels
# to ensure proper balancing of the classes.
validation_mask = self._make_validation_split(y)
# Use joblib to fit OvA in parallel.
# Pick the random seed for each job outside of fit_binary to avoid
# sharing the estimator random state between threads which could lead
# to non-deterministic behavior
random_state = check_random_state(self.random_state)
seeds = random_state.randint(MAX_INT, size=len(self.classes_))
result = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem"
)(
delayed(fit_binary)(
self,
i,
X,
y,
alpha,
C,
learning_rate,
max_iter,
self._expanded_class_weight[i],
1.0,
sample_weight,
validation_mask=validation_mask,
random_state=seed,
)
for i, seed in enumerate(seeds)
)
# take the maximum of n_iter_ over every binary fit
n_iter_ = 0.0
for i, (_, intercept, n_iter_i) in enumerate(result):
self.intercept_[i] = intercept
n_iter_ = max(n_iter_, n_iter_i)
self.t_ += n_iter_ * X.shape[0]
self.n_iter_ = n_iter_
if self.average > 0:
if self.average <= self.t_ - 1.0:
self.coef_ = self._average_coef
self.intercept_ = self._average_intercept
else:
self.coef_ = self._standard_coef
self._standard_intercept = np.atleast_1d(self.intercept_)
self.intercept_ = self._standard_intercept
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence, early stopping, and
learning rate adjustments should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : ndarray of shape (n_samples,)
Subset of the target values.
classes : ndarray of shape (n_classes,), default=None
Classes across all calls to partial_fit.
Can be obtained by via `np.unique(y_all)`, where y_all is the
target vector of the entire dataset.
This argument is required for the first call to partial_fit
and can be omitted in the subsequent calls.
Note that y doesn't need to contain all labels in `classes`.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
if self.class_weight in ["balanced"]:
raise ValueError(
"class_weight '{0}' is not supported for "
"partial_fit. In order to use 'balanced' weights,"
" use compute_class_weight('{0}', "
"classes=classes, y=y). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.".format(self.class_weight)
)
return self._partial_fit(
X,
y,
alpha=self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
classes=classes,
sample_weight=sample_weight,
coef_init=None,
intercept_init=None,
)
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_classes, n_features), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (n_classes,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns an instance of self.
"""
return self._fit(
X,
y,
alpha=self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight,
)
class SGDClassifier(BaseSGDClassifier):
"""Linear classifiers (SVM, logistic regression, etc.) with SGD training.
This estimator implements regularized linear models with stochastic
gradient descent (SGD) learning: the gradient of the loss is estimated
each sample at a time and the model is updated along the way with a
decreasing strength schedule (aka learning rate). SGD allows minibatch
(online/out-of-core) learning via the `partial_fit` method.
For best results using the default learning rate schedule, the data should
have zero mean and unit variance.
This implementation works with data represented as dense or sparse arrays
of floating point values for the features. The model it fits can be
controlled with the loss parameter; by default, it fits a linear support
vector machine (SVM).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='hinge'
The loss function to be used. Defaults to 'hinge', which gives a
linear SVM.
The possible options are 'hinge', 'log', 'modified_huber',
'squared_hinge', 'perceptron', or a regression loss: 'squared_error',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
The 'log' loss gives logistic regression, a probabilistic classifier.
'modified_huber' is another smooth loss that brings tolerance to
outliers as well as probability estimates.
'squared_hinge' is like hinge but is quadratically penalized.
'perceptron' is the linear loss used by the perceptron algorithm.
The other losses are designed for regression but can be useful in
classification as well; see
:class:`~sklearn.linear_model.SGDRegressor` for a description.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
.. deprecated:: 1.0
The loss 'squared_loss' was deprecated in v1.0 and will be removed
in version 1.2. Use `loss='squared_error'` which is equivalent.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
Values must be in the range `[0.0, inf)`.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
Values must be in the range `[0.0, 1.0]`.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
Values must be in the range `[1, inf)`.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
Convergence is checked against the training loss or the
validation loss depending on the `early_stopping` parameter.
Values must be in the range `[0.0, inf)`.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
Values must be in the range `[0, inf)`.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
Values must be in the range `[0.0, inf)`.
n_jobs : int, default=None
The number of CPUs to use to do the OVA (One Versus All, for
multi-class problems) computation.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
Integer values must be in the range `[0, 2**32 - 1]`.
learning_rate : str, default='optimal'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where `t0` is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': `eta = eta0`, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
`early_stopping` is `True`, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : float, default=0.0
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
Values must be in the range `(0.0, inf)`.
power_t : float, default=0.5
The exponent for inverse scaling learning rate [default 0.5].
Values must be in the range `(-inf, inf)`.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to `True`, it will automatically set aside
a stratified fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least tol for n_iter_no_change consecutive epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
Values must be in the range `(0.0, 1.0)`.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before stopping
fitting.
Convergence is checked against the training loss or the
validation loss depending on the `early_stopping` parameter.
Integer values must be in the range `[1, max_iter)`.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
class_weight : dict, {class_label: weight} or "balanced", default=None
Preset for the class_weight fit parameter.
Weights associated with classes. If not given, all classes
are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``.
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to `True`, computes the averaged SGD weights across all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Integer values must be in the range `[1, n_samples]`.
Attributes
----------
coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \
(n_classes, n_features)
Weights assigned to the features.
intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,)
Constants in decision function.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
For multiclass fits, it is the maximum over every binary fit.
loss_function_ : concrete ``LossFunction``
classes_ : array of shape (n_classes,)
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.svm.LinearSVC : Linear support vector classification.
LogisticRegression : Logistic regression.
Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to
``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant",
penalty=None)``.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDClassifier
>>> from sklearn.preprocessing import StandardScaler
>>> from sklearn.pipeline import make_pipeline
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> Y = np.array([1, 1, 2, 2])
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> clf = make_pipeline(StandardScaler(),
... SGDClassifier(max_iter=1000, tol=1e-3))
>>> clf.fit(X, Y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdclassifier', SGDClassifier())])
>>> print(clf.predict([[-0.8, -1]]))
[1]
"""
def __init__(
self,
loss="hinge",
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=DEFAULT_EPSILON,
n_jobs=None,
random_state=None,
learning_rate="optimal",
eta0=0.0,
power_t=0.5,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
class_weight=None,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
n_jobs=n_jobs,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
class_weight=class_weight,
warm_start=warm_start,
average=average,
)
def _check_proba(self):
if self.loss not in ("log", "modified_huber"):
raise AttributeError(
"probability estimates are not available for loss=%r" % self.loss
)
return True
@available_if(_check_proba)
def predict_proba(self, X):
"""Probability estimates.
This method is only available for log loss and modified Huber loss.
Multiclass probability estimates are derived from binary (one-vs.-rest)
estimates by simple normalization, as recommended by Zadrozny and
Elkan.
Binary probability estimates for loss="modified_huber" are given by
(clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions
it is necessary to perform proper probability calibration by wrapping
the classifier with
:class:`~sklearn.calibration.CalibratedClassifierCV` instead.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data for prediction.
Returns
-------
ndarray of shape (n_samples, n_classes)
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in `self.classes_`.
References
----------
Zadrozny and Elkan, "Transforming classifier scores into multiclass
probability estimates", SIGKDD'02,
https://dl.acm.org/doi/pdf/10.1145/775047.775151
The justification for the formula in the loss="modified_huber"
case is in the appendix B in:
http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf
"""
check_is_fitted(self)
if self.loss == "log":
return self._predict_proba_lr(X)
elif self.loss == "modified_huber":
binary = len(self.classes_) == 2
scores = self.decision_function(X)
if binary:
prob2 = np.ones((scores.shape[0], 2))
prob = prob2[:, 1]
else:
prob = scores
np.clip(scores, -1, 1, prob)
prob += 1.0
prob /= 2.0
if binary:
prob2[:, 0] -= prob
prob = prob2
else:
# the above might assign zero to all classes, which doesn't
# normalize neatly; work around this to produce uniform
# probabilities
prob_sum = prob.sum(axis=1)
all_zero = prob_sum == 0
if np.any(all_zero):
prob[all_zero, :] = 1
prob_sum[all_zero] = len(self.classes_)
# normalize
prob /= prob_sum.reshape((prob.shape[0], -1))
return prob
else:
raise NotImplementedError(
"predict_(log_)proba only supported when"
" loss='log' or loss='modified_huber' "
"(%r given)"
% self.loss
)
@available_if(_check_proba)
def predict_log_proba(self, X):
"""Log of probability estimates.
This method is only available for log loss and modified Huber loss.
When loss="modified_huber", probability estimates may be hard zeros
and ones, so taking the logarithm is not possible.
See ``predict_proba`` for details.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Input data for prediction.
Returns
-------
T : array-like, shape (n_samples, n_classes)
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in
`self.classes_`.
"""
return np.log(self.predict_proba(X))
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
class BaseSGDRegressor(RegressorMixin, BaseSGD):
# TODO: Remove squared_loss in v1.2
loss_functions = {
"squared_error": (SquaredLoss,),
"squared_loss": (SquaredLoss,),
"huber": (Huber, DEFAULT_EPSILON),
"epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON),
"squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON),
}
@abstractmethod
def __init__(
self,
loss="squared_error",
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=DEFAULT_EPSILON,
random_state=None,
learning_rate="invscaling",
eta0=0.01,
power_t=0.25,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
warm_start=warm_start,
average=average,
)
def _partial_fit(
self,
X,
y,
alpha,
C,
loss,
learning_rate,
max_iter,
sample_weight,
coef_init,
intercept_init,
):
first_call = getattr(self, "coef_", None) is None
X, y = self._validate_data(
X,
y,
accept_sparse="csr",
copy=False,
order="C",
dtype=np.float64,
accept_large_sparse=False,
reset=first_call,
)
y = y.astype(np.float64, copy=False)
n_samples, n_features = X.shape
sample_weight = _check_sample_weight(sample_weight, X)
# Allocate datastructures from input arguments
if first_call:
self._allocate_parameter_mem(1, n_features, coef_init, intercept_init)
if self.average > 0 and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features, dtype=np.float64, order="C")
self._average_intercept = np.zeros(1, dtype=np.float64, order="C")
self._fit_regressor(
X, y, alpha, C, loss, learning_rate, sample_weight, max_iter
)
return self
def partial_fit(self, X, y, sample_weight=None):
"""Perform one epoch of stochastic gradient descent on given samples.
Internally, this method uses ``max_iter = 1``. Therefore, it is not
guaranteed that a minimum of the cost function is reached after calling
it once. Matters such as objective convergence and early stopping
should be handled by the user.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of training data.
y : numpy array of shape (n_samples,)
Subset of target values.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns an instance of self.
"""
self._validate_params(for_partial_fit=True)
return self._partial_fit(
X,
y,
self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
sample_weight=sample_weight,
coef_init=None,
intercept_init=None,
)
def _fit(
self,
X,
y,
alpha,
C,
loss,
learning_rate,
coef_init=None,
intercept_init=None,
sample_weight=None,
):
self._validate_params()
if self.warm_start and getattr(self, "coef_", None) is not None:
if coef_init is None:
coef_init = self.coef_
if intercept_init is None:
intercept_init = self.intercept_
else:
self.coef_ = None
self.intercept_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(
X,
y,
alpha,
C,
loss,
learning_rate,
self.max_iter,
sample_weight,
coef_init,
intercept_init,
)
if (
self.tol is not None
and self.tol > -np.inf
and self.n_iter_ == self.max_iter
):
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
return self
def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None):
"""Fit linear model with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray of shape (n_samples,)
Target values.
coef_init : ndarray of shape (n_features,), default=None
The initial coefficients to warm-start the optimization.
intercept_init : ndarray of shape (1,), default=None
The initial intercept to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), default=None
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Fitted `SGDRegressor` estimator.
"""
return self._fit(
X,
y,
alpha=self.alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
intercept_init=intercept_init,
sample_weight=sample_weight,
)
def _decision_function(self, X):
"""Predict using the linear model
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
check_is_fitted(self)
X = self._validate_data(X, accept_sparse="csr", reset=False)
scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_
return scores.ravel()
def predict(self, X):
"""Predict using the linear model.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Input data.
Returns
-------
ndarray of shape (n_samples,)
Predicted target values per element in X.
"""
return self._decision_function(X)
def _fit_regressor(
self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter
):
dataset, intercept_decay = make_dataset(X, y, sample_weight)
loss_function = self._get_loss_function(loss)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
if not hasattr(self, "t_"):
self.t_ = 1.0
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight
)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = self.intercept_
average_coef = None # Not used
average_intercept = [0] # Not used
coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
coef,
intercept[0],
average_coef,
average_intercept[0],
loss_function,
penalty_type,
alpha,
C,
self.l1_ratio,
dataset,
validation_mask,
self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter,
tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
1.0,
1.0,
learning_rate_type,
self.eta0,
self.power_t,
0,
self.t_,
intercept_decay,
self.average,
)
self.t_ += self.n_iter_ * X.shape[0]
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.intercept_ = np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.intercept_ = np.atleast_1d(intercept)
else:
self.intercept_ = np.atleast_1d(intercept)
class SGDRegressor(BaseSGDRegressor):
"""Linear model fitted by minimizing a regularized empirical loss with SGD.
SGD stands for Stochastic Gradient Descent: the gradient of the loss is
estimated each sample at a time and the model is updated along the way with
a decreasing strength schedule (aka learning rate).
The regularizer is a penalty added to the loss function that shrinks model
parameters towards the zero vector using either the squared euclidean norm
L2 or the absolute norm L1 or a combination of both (Elastic Net). If the
parameter update crosses the 0.0 value because of the regularizer, the
update is truncated to 0.0 to allow for learning sparse models and achieve
online feature selection.
This implementation works with data represented as dense numpy arrays of
floating point values for the features.
Read more in the :ref:`User Guide <sgd>`.
Parameters
----------
loss : str, default='squared_error'
The loss function to be used. The possible values are 'squared_error',
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'
The 'squared_error' refers to the ordinary least squares fit.
'huber' modifies 'squared_error' to focus less on getting outliers
correct by switching from squared to linear loss past a distance of
epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is
linear past that; this is the loss function used in SVR.
'squared_epsilon_insensitive' is the same but becomes squared loss past
a tolerance of epsilon.
More details about the losses formulas can be found in the
:ref:`User Guide <sgd_mathematical_formulation>`.
.. deprecated:: 1.0
The loss 'squared_loss' was deprecated in v1.0 and will be removed
in version 1.2. Use `loss='squared_error'` which is equivalent.
penalty : {'l2', 'l1', 'elasticnet'}, default='l2'
The penalty (aka regularization term) to be used. Defaults to 'l2'
which is the standard regularizer for linear SVM models. 'l1' and
'elasticnet' might bring sparsity to the model (feature selection)
not achievable with 'l2'.
alpha : float, default=0.0001
Constant that multiplies the regularization term. The higher the
value, the stronger the regularization.
Also used to compute the learning rate when set to `learning_rate` is
set to 'optimal'.
l1_ratio : float, default=0.15
The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1.
l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1.
Only used if `penalty` is 'elasticnet'.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. If False, the
data is assumed to be already centered.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
:meth:`partial_fit` method.
.. versionadded:: 0.19
tol : float, default=1e-3
The stopping criterion. If it is not None, training will stop
when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive
epochs.
Convergence is checked against the training loss or the
validation loss depending on the `early_stopping` parameter.
.. versionadded:: 0.19
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
verbose : int, default=0
The verbosity level.
epsilon : float, default=0.1
Epsilon in the epsilon-insensitive loss functions; only if `loss` is
'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'.
For 'huber', determines the threshold at which it becomes less
important to get the prediction exactly right.
For epsilon-insensitive, any differences between the current prediction
and the correct label are ignored if they are less than this threshold.
random_state : int, RandomState instance, default=None
Used for shuffling the data, when ``shuffle`` is set to ``True``.
Pass an int for reproducible output across multiple function calls.
See :term:`Glossary <random_state>`.
learning_rate : str, default='invscaling'
The learning rate schedule:
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
.. versionadded:: 0.20
Added 'adaptive' option
eta0 : float, default=0.01
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.01.
power_t : float, default=0.25
The exponent for inverse scaling learning rate.
early_stopping : bool, default=False
Whether to use early stopping to terminate training when validation
score is not improving. If set to True, it will automatically set aside
a fraction of training data as validation and terminate
training when validation score returned by the `score` method is not
improving by at least `tol` for `n_iter_no_change` consecutive
epochs.
.. versionadded:: 0.20
Added 'early_stopping' option
validation_fraction : float, default=0.1
The proportion of training data to set aside as validation set for
early stopping. Must be between 0 and 1.
Only used if `early_stopping` is True.
.. versionadded:: 0.20
Added 'validation_fraction' option
n_iter_no_change : int, default=5
Number of iterations with no improvement to wait before stopping
fitting.
Convergence is checked against the training loss or the
validation loss depending on the `early_stopping` parameter.
.. versionadded:: 0.20
Added 'n_iter_no_change' option
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights across all
updates and stores the result in the ``coef_`` attribute. If set to
an int greater than 1, averaging will begin once the total number of
samples seen reaches `average`. So ``average=10`` will begin
averaging after seeing 10 samples.
Attributes
----------
coef_ : ndarray of shape (n_features,)
Weights assigned to the features.
intercept_ : ndarray of shape (1,)
The intercept term.
n_iter_ : int
The actual number of iterations before reaching the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
HuberRegressor : Linear regression model that is robust to outliers.
Lars : Least Angle Regression model.
Lasso : Linear Model trained with L1 prior as regularizer.
RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm.
Ridge : Linear least squares with l2 regularization.
sklearn.svm.SVR : Epsilon-Support Vector Regression.
TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model.
Examples
--------
>>> import numpy as np
>>> from sklearn.linear_model import SGDRegressor
>>> from sklearn.pipeline import make_pipeline
>>> from sklearn.preprocessing import StandardScaler
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> # Always scale the input. The most convenient way is to use a pipeline.
>>> reg = make_pipeline(StandardScaler(),
... SGDRegressor(max_iter=1000, tol=1e-3))
>>> reg.fit(X, y)
Pipeline(steps=[('standardscaler', StandardScaler()),
('sgdregressor', SGDRegressor())])
"""
def __init__(
self,
loss="squared_error",
*,
penalty="l2",
alpha=0.0001,
l1_ratio=0.15,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
epsilon=DEFAULT_EPSILON,
random_state=None,
learning_rate="invscaling",
eta0=0.01,
power_t=0.25,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=False,
average=False,
):
super().__init__(
loss=loss,
penalty=penalty,
alpha=alpha,
l1_ratio=l1_ratio,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=epsilon,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=early_stopping,
validation_fraction=validation_fraction,
n_iter_no_change=n_iter_no_change,
warm_start=warm_start,
average=average,
)
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
),
}
}
class SGDOneClassSVM(BaseSGD, OutlierMixin):
"""Solves linear One-Class SVM using Stochastic Gradient Descent.
This implementation is meant to be used with a kernel approximation
technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results
similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by
default.
Read more in the :ref:`User Guide <sgd_online_one_class_svm>`.
.. versionadded:: 1.0
Parameters
----------
nu : float, default=0.5
The nu parameter of the One Class SVM: an upper bound on the
fraction of training errors and a lower bound of the fraction of
support vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
fit_intercept : bool, default=True
Whether the intercept should be estimated or not. Defaults to True.
max_iter : int, default=1000
The maximum number of passes over the training data (aka epochs).
It only impacts the behavior in the ``fit`` method, and not the
`partial_fit`. Defaults to 1000.
tol : float or None, default=1e-3
The stopping criterion. If it is not None, the iterations will stop
when (loss > previous_loss - tol). Defaults to 1e-3.
shuffle : bool, default=True
Whether or not the training data should be shuffled after each epoch.
Defaults to True.
verbose : int, default=0
The verbosity level.
random_state : int, RandomState instance or None, default=None
The seed of the pseudo random number generator to use when shuffling
the data. If int, random_state is the seed used by the random number
generator; If RandomState instance, random_state is the random number
generator; If None, the random number generator is the RandomState
instance used by `np.random`.
learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal'
The learning rate schedule to use with `fit`. (If using `partial_fit`,
learning rate must be controlled directly).
- 'constant': `eta = eta0`
- 'optimal': `eta = 1.0 / (alpha * (t + t0))`
where t0 is chosen by a heuristic proposed by Leon Bottou.
- 'invscaling': `eta = eta0 / pow(t, power_t)`
- 'adaptive': eta = eta0, as long as the training keeps decreasing.
Each time n_iter_no_change consecutive epochs fail to decrease the
training loss by tol or fail to increase validation score by tol if
early_stopping is True, the current learning rate is divided by 5.
eta0 : float, default=0.0
The initial learning rate for the 'constant', 'invscaling' or
'adaptive' schedules. The default value is 0.0 as eta0 is not used by
the default schedule 'optimal'.
power_t : float, default=0.5
The exponent for inverse scaling learning rate [default 0.5].
warm_start : bool, default=False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
See :term:`the Glossary <warm_start>`.
Repeatedly calling fit or partial_fit when warm_start is True can
result in a different solution than when calling fit a single time
because of the way the data is shuffled.
If a dynamic learning rate is used, the learning rate is adapted
depending on the number of samples already seen. Calling ``fit`` resets
this counter, while ``partial_fit`` will result in increasing the
existing counter.
average : bool or int, default=False
When set to True, computes the averaged SGD weights and stores the
result in the ``coef_`` attribute. If set to an int greater than 1,
averaging will begin once the total number of samples seen reaches
average. So ``average=10`` will begin averaging after seeing 10
samples.
Attributes
----------
coef_ : ndarray of shape (1, n_features)
Weights assigned to the features.
offset_ : ndarray of shape (1,)
Offset used to define the decision function from the raw scores.
We have the relation: decision_function = score_samples - offset.
n_iter_ : int
The actual number of iterations to reach the stopping criterion.
t_ : int
Number of weight updates performed during training.
Same as ``(n_iter_ * n_samples)``.
loss_function_ : concrete ``LossFunction``
n_features_in_ : int
Number of features seen during :term:`fit`.
.. versionadded:: 0.24
feature_names_in_ : ndarray of shape (`n_features_in_`,)
Names of features seen during :term:`fit`. Defined only when `X`
has feature names that are all strings.
.. versionadded:: 1.0
See Also
--------
sklearn.svm.OneClassSVM : Unsupervised Outlier Detection.
Notes
-----
This estimator has a linear complexity in the number of training samples
and is thus better suited than the `sklearn.svm.OneClassSVM`
implementation for datasets with a large number of training samples (say
> 10,000).
Examples
--------
>>> import numpy as np
>>> from sklearn import linear_model
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> clf = linear_model.SGDOneClassSVM(random_state=42)
>>> clf.fit(X)
SGDOneClassSVM(random_state=42)
>>> print(clf.predict([[4, 4]]))
[1]
"""
loss_functions = {"hinge": (Hinge, 1.0)}
def __init__(
self,
nu=0.5,
fit_intercept=True,
max_iter=1000,
tol=1e-3,
shuffle=True,
verbose=0,
random_state=None,
learning_rate="optimal",
eta0=0.0,
power_t=0.5,
warm_start=False,
average=False,
):
alpha = nu / 2
self.nu = nu
super(SGDOneClassSVM, self).__init__(
loss="hinge",
penalty="l2",
alpha=alpha,
C=1.0,
l1_ratio=0,
fit_intercept=fit_intercept,
max_iter=max_iter,
tol=tol,
shuffle=shuffle,
verbose=verbose,
epsilon=DEFAULT_EPSILON,
random_state=random_state,
learning_rate=learning_rate,
eta0=eta0,
power_t=power_t,
early_stopping=False,
validation_fraction=0.1,
n_iter_no_change=5,
warm_start=warm_start,
average=average,
)
def _validate_params(self, for_partial_fit=False):
"""Validate input params."""
if not (0 < self.nu <= 1):
raise ValueError("nu must be in (0, 1], got nu=%f" % self.nu)
super(SGDOneClassSVM, self)._validate_params(for_partial_fit=for_partial_fit)
def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter):
"""Uses SGD implementation with X and y=np.ones(n_samples)."""
# The One-Class SVM uses the SGD implementation with
# y=np.ones(n_samples).
n_samples = X.shape[0]
y = np.ones(n_samples, dtype=np.float64, order="C")
dataset, offset_decay = make_dataset(X, y, sample_weight)
penalty_type = self._get_penalty_type(self.penalty)
learning_rate_type = self._get_learning_rate_type(learning_rate)
# early stopping is set to False for the One-Class SVM. thus
# validation_mask and validation_score_cb will be set to values
# associated to early_stopping=False in _make_validation_split and
# _make_validation_score_cb respectively.
validation_mask = self._make_validation_split(y)
validation_score_cb = self._make_validation_score_cb(
validation_mask, X, y, sample_weight
)
random_state = check_random_state(self.random_state)
# numpy mtrand expects a C long which is a signed 32 bit integer under
# Windows
seed = random_state.randint(0, np.iinfo(np.int32).max)
tol = self.tol if self.tol is not None else -np.inf
one_class = 1
# There are no class weights for the One-Class SVM and they are
# therefore set to 1.
pos_weight = 1
neg_weight = 1
if self.average:
coef = self._standard_coef
intercept = self._standard_intercept
average_coef = self._average_coef
average_intercept = self._average_intercept
else:
coef = self.coef_
intercept = 1 - self.offset_
average_coef = None # Not used
average_intercept = [0] # Not used
coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd(
coef,
intercept[0],
average_coef,
average_intercept[0],
self.loss_function_,
penalty_type,
alpha,
C,
self.l1_ratio,
dataset,
validation_mask,
self.early_stopping,
validation_score_cb,
int(self.n_iter_no_change),
max_iter,
tol,
int(self.fit_intercept),
int(self.verbose),
int(self.shuffle),
seed,
neg_weight,
pos_weight,
learning_rate_type,
self.eta0,
self.power_t,
one_class,
self.t_,
offset_decay,
self.average,
)
self.t_ += self.n_iter_ * n_samples
if self.average > 0:
self._average_intercept = np.atleast_1d(average_intercept)
self._standard_intercept = np.atleast_1d(intercept)
if self.average <= self.t_ - 1.0:
# made enough updates for averaging to be taken into account
self.coef_ = average_coef
self.offset_ = 1 - np.atleast_1d(average_intercept)
else:
self.coef_ = coef
self.offset_ = 1 - np.atleast_1d(intercept)
else:
self.offset_ = 1 - np.atleast_1d(intercept)
def _partial_fit(
self,
X,
alpha,
C,
loss,
learning_rate,
max_iter,
sample_weight,
coef_init,
offset_init,
):
first_call = getattr(self, "coef_", None) is None
X = self._validate_data(
X,
None,
accept_sparse="csr",
dtype=np.float64,
order="C",
accept_large_sparse=False,
reset=first_call,
)
n_features = X.shape[1]
# Allocate datastructures from input arguments
sample_weight = _check_sample_weight(sample_weight, X)
# We use intercept = 1 - offset where intercept is the intercept of
# the SGD implementation and offset is the offset of the One-Class SVM
# optimization problem.
if getattr(self, "coef_", None) is None or coef_init is not None:
self._allocate_parameter_mem(1, n_features, coef_init, offset_init, 1)
elif n_features != self.coef_.shape[-1]:
raise ValueError(
"Number of features %d does not match previous data %d."
% (n_features, self.coef_.shape[-1])
)
if self.average and getattr(self, "_average_coef", None) is None:
self._average_coef = np.zeros(n_features, dtype=np.float64, order="C")
self._average_intercept = np.zeros(1, dtype=np.float64, order="C")
self.loss_function_ = self._get_loss_function(loss)
if not hasattr(self, "t_"):
self.t_ = 1.0
# delegate to concrete training procedure
self._fit_one_class(
X,
alpha=alpha,
C=C,
learning_rate=learning_rate,
sample_weight=sample_weight,
max_iter=max_iter,
)
return self
def partial_fit(self, X, y=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Subset of the training data.
y : Ignored
Not used, present for API consistency by convention.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed.
Returns
-------
self : object
Returns a fitted instance of self.
"""
alpha = self.nu / 2
self._validate_params(for_partial_fit=True)
return self._partial_fit(
X,
alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
max_iter=1,
sample_weight=sample_weight,
coef_init=None,
offset_init=None,
)
def _fit(
self,
X,
alpha,
C,
loss,
learning_rate,
coef_init=None,
offset_init=None,
sample_weight=None,
):
self._validate_params()
if self.warm_start and hasattr(self, "coef_"):
if coef_init is None:
coef_init = self.coef_
if offset_init is None:
offset_init = self.offset_
else:
self.coef_ = None
self.offset_ = None
# Clear iteration count for multiple call to fit.
self.t_ = 1.0
self._partial_fit(
X,
alpha,
C,
loss,
learning_rate,
self.max_iter,
sample_weight,
coef_init,
offset_init,
)
if (
self.tol is not None
and self.tol > -np.inf
and self.n_iter_ == self.max_iter
):
warnings.warn(
"Maximum number of iteration reached before "
"convergence. Consider increasing max_iter to "
"improve the fit.",
ConvergenceWarning,
)
return self
def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None):
"""Fit linear One-Class SVM with Stochastic Gradient Descent.
This solves an equivalent optimization problem of the
One-Class SVM primal optimization problem and returns a weight vector
w and an offset rho such that the decision function is given by
<w, x> - rho.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
coef_init : array, shape (n_classes, n_features)
The initial coefficients to warm-start the optimization.
offset_init : array, shape (n_classes,)
The initial offset to warm-start the optimization.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples.
If not provided, uniform weights are assumed. These weights will
be multiplied with class_weight (passed through the
constructor) if class_weight is specified.
Returns
-------
self : object
Returns a fitted instance of self.
"""
alpha = self.nu / 2
self._fit(
X,
alpha=alpha,
C=1.0,
loss=self.loss,
learning_rate=self.learning_rate,
coef_init=coef_init,
offset_init=offset_init,
sample_weight=sample_weight,
)
return self
def decision_function(self, X):
"""Signed distance to the separating hyperplane.
Signed distance is positive for an inlier and negative for an
outlier.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
dec : array-like, shape (n_samples,)
Decision function values of the samples.
"""
check_is_fitted(self, "coef_")
X = self._validate_data(X, accept_sparse="csr", reset=False)
decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_
return decisions.ravel()
def score_samples(self, X):
"""Raw scoring function of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
score_samples : array-like, shape (n_samples,)
Unshiffted scoring function values of the samples.
"""
score_samples = self.decision_function(X) + self.offset_
return score_samples
def predict(self, X):
"""Return labels (1 inlier, -1 outlier) of the samples.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Testing data.
Returns
-------
y : array, shape (n_samples,)
Labels of the samples.
"""
y = (self.decision_function(X) >= 0).astype(np.int32)
y[y == 0] = -1 # for consistency with outlier detectors
return y
def _more_tags(self):
return {
"_xfail_checks": {
"check_sample_weights_invariance": (
"zero sample_weight is not equivalent to removing samples"
)
}
}
|
manhhomienbienthuy/scikit-learn
|
sklearn/linear_model/_stochastic_gradient.py
|
Python
|
bsd-3-clause
| 85,464
|
#!/usr/bin/env python3
# Copyright 2020 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=logging-fstring-interpolation
"""Flashes firmware using Segger J-Link.
This script requires Segger hardware attached via JTAG/SWD.
See
https://chromium.googlesource.com/chromiumos/platform/ec/+/HEAD/docs/fingerprint/fingerprint-debugging.md#flash
for instructions.
"""
import argparse
import logging
import os
import shutil
import socket
import subprocess
import sys
import tempfile
import time
DEFAULT_SEGGER_REMOTE_PORT = 19020
# Commands are documented here: https://wiki.segger.com/J-Link_Commander
JLINK_COMMANDS = """
exitonerror 1
r
loadfile {FIRMWARE} {FLASH_ADDRESS}
r
go
exit
"""
class BoardConfig:
"""Board configuration."""
def __init__(self, interface, device, flash_address):
self.interface = interface
self.device = device
self.flash_address = flash_address
SWD_INTERFACE = 'SWD'
STM32_DEFAULT_FLASH_ADDRESS = '0x8000000'
DRAGONCLAW_CONFIG = BoardConfig(interface=SWD_INTERFACE, device='STM32F412CG',
flash_address=STM32_DEFAULT_FLASH_ADDRESS)
ICETOWER_CONFIG = BoardConfig(interface=SWD_INTERFACE, device='STM32H743ZI',
flash_address=STM32_DEFAULT_FLASH_ADDRESS)
BOARD_CONFIGS = {
'dragonclaw': DRAGONCLAW_CONFIG,
'bloonchipper': DRAGONCLAW_CONFIG,
'nucleo-f412zg': DRAGONCLAW_CONFIG,
'dartmonkey': ICETOWER_CONFIG,
'icetower': ICETOWER_CONFIG,
'nucleo-dartmonkey': ICETOWER_CONFIG,
'nucleo-h743zi': ICETOWER_CONFIG,
}
def is_tcp_port_open(host: str, tcp_port: int) -> bool:
"""Checks if the TCP host port is open."""
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.settimeout(2) # 2 Second Timeout
try:
sock.connect((host, tcp_port))
sock.shutdown(socket.SHUT_RDWR)
except ConnectionRefusedError:
return False
except socket.timeout:
return False
finally:
sock.close()
# Other errors are propagated as odd exceptions.
# We shutdown and closed the connection, but the server may need a second
# to start listening again. If the following error is seen, this timeout
# should be increased. 300ms seems to be the minimum.
#
# Connecting to J-Link via IP...FAILED: Can not connect to J-Link via \
# TCP/IP (127.0.0.1, port 19020)
time.sleep(0.5)
return True
def create_jlink_command_file(firmware_file, config):
tmp = tempfile.NamedTemporaryFile()
tmp.write(JLINK_COMMANDS.format(FIRMWARE=firmware_file,
FLASH_ADDRESS=config.flash_address).encode(
'utf-8'))
tmp.flush()
return tmp
def flash(jlink_exe, remote, device, interface, cmd_file):
cmd = [
jlink_exe,
]
if remote:
logging.debug(f'Connecting to J-Link over TCP/IP {remote}.')
remote_components = remote.split(':')
if len(remote_components) not in [1, 2]:
logging.debug(f'Given remote "{remote}" is malformed.')
return 1
host = remote_components[0]
try:
ip = socket.gethostbyname(host)
except socket.gaierror as e:
logging.error(f'Failed to resolve host "{host}": {e}.')
return 1
logging.debug(f'Resolved {host} as {ip}.')
port = DEFAULT_SEGGER_REMOTE_PORT
if len(remote_components) == 2:
try:
port = int(remote_components[1])
except ValueError:
logging.error(
f'Given remote port "{remote_components[1]}" is malformed.')
return 1
remote = f'{ip}:{port}'
logging.debug(f'Checking connection to {remote}.')
if not is_tcp_port_open(ip, port):
logging.error(
f"JLink server doesn't seem to be listening on {remote}.")
logging.error('Ensure that JLinkRemoteServerCLExe is running.')
return 1
cmd.extend(['-ip', remote])
cmd.extend([
'-device', device,
'-if', interface,
'-speed', 'auto',
'-autoconnect', '1',
'-CommandFile', cmd_file,
])
logging.debug('Running command: "%s"', ' '.join(cmd))
completed_process = subprocess.run(cmd) # pylint: disable=subprocess-run-check
logging.debug('JLink return code: %d', completed_process.returncode)
return completed_process.returncode
def main(argv: list):
parser = argparse.ArgumentParser()
default_jlink = './JLink_Linux_V684a_x86_64/JLinkExe'
if shutil.which(default_jlink) is None:
default_jlink = 'JLinkExe'
parser.add_argument(
'--jlink', '-j',
help='JLinkExe path (default: ' + default_jlink + ')',
default=default_jlink)
parser.add_argument(
'--remote', '-n',
help='Use TCP/IP host[:port] to connect to a J-Link or '
'JLinkRemoteServerCLExe. If unspecified, connect over USB.')
default_board = 'bloonchipper'
parser.add_argument(
'--board', '-b',
help='Board (default: ' + default_board + ')',
default=default_board)
default_firmware = os.path.join('./build', default_board, 'ec.bin')
parser.add_argument(
'--image', '-i',
help='Firmware binary (default: ' + default_firmware + ')',
default=default_firmware)
log_level_choices = ['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL']
parser.add_argument(
'--log_level', '-l',
choices=log_level_choices,
default='DEBUG'
)
args = parser.parse_args(argv)
logging.basicConfig(level=args.log_level)
if args.board not in BOARD_CONFIGS:
logging.error('Unable to find a config for board: "%s"', args.board)
sys.exit(1)
config = BOARD_CONFIGS[args.board]
args.image = os.path.realpath(args.image)
args.jlink = args.jlink
cmd_file = create_jlink_command_file(args.image, config)
ret_code = flash(args.jlink, args.remote, config.device, config.interface,
cmd_file.name)
cmd_file.close()
return ret_code
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
coreboot/chrome-ec
|
util/flash_jlink.py
|
Python
|
bsd-3-clause
| 6,344
|
import unittest
import numpy as np
from chainer import testing
from chainercv.utils import generate_random_bbox
from chainercv.visualizations import vis_bbox
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': None,
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': None,
'label_names': None},
{
'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 0, 'label': (), 'score': (),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), 'no_img': True},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'),
'instance_colors': [
(255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBbox(unittest.TestCase):
def setUp(self):
if hasattr(self, 'no_img'):
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = generate_random_bbox(
self.n_bbox, (48, 32), 8, 16)
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox(self):
ax = vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
self.assertIsInstance(ax, matplotlib.axes.Axes)
@testing.parameterize(*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBboxInvalidInputs(unittest.TestCase):
def setUp(self):
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = np.random.uniform(size=(self.n_bbox, 4))
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox_invalid_inputs(self):
with self.assertRaises(ValueError):
vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
testing.run_module(__name__, __file__)
|
chainer/chainercv
|
tests/visualizations_tests/test_vis_bbox.py
|
Python
|
mit
| 4,256
|
#!/usr/bin/env python
'''
ooiui.core.app.science
Defines the application for the Science UI
'''
import os
from flask import Flask
from flask.ext.cache import Cache
from flask_environments import Environments
app = Flask(__name__, static_url_path='', template_folder='../../templates', static_folder='../../static')
env = Environments(app, default_env='DEVELOPMENT')
basedir = 'ooiui/config'
if os.path.exists(os.path.join(basedir, 'config_local.yml')):
env.from_yaml(os.path.join(basedir, 'config_local.yml'))
else:
env.from_yaml(os.path.join(basedir, 'config.yml'))
cache = Cache(app, config={'CACHE_TYPE': app.config['CACHE_TYPE']})
|
maka-io/ooi-ui
|
ooiui/core/app/__init__.py
|
Python
|
mit
| 651
|
from . import Event
class PlayerEvent(Event):
def setup(s):
s.username = s.username.encode('ascii')
#Raised in manager
class PlayerJoin(PlayerEvent):
username = Event.Arg(required=True)
ip = Event.Arg(required=True)
class PlayerQuit(PlayerEvent):
username = Event.Arg(required=True)
reason = Event.Arg(required=True)
class PlayerChat(PlayerEvent):
username = Event.Arg(required=True)
message = Event.Arg(required=True)
class PlayerDeath(PlayerEvent):
text = Event.Arg()
username = Event.Arg(required=True)
cause = Event.Arg(required=True)
killer = Event.Arg()
weapon = Event.Arg()
format = Event.Arg(default="{username} died")
def get_text(self, **kw):
d = dict(((k, getattr(self, k)) for k in ('username', 'killer', 'weapon')))
d.update(kw)
return self.format.format(**d)
def setup(self):
self.text = self.get_text()
|
frostyfrog/mark2
|
mk2/events/player.py
|
Python
|
mit
| 952
|
"""
Auto-discovers all unittests in the tests directory and runs them
"""
import unittest
loader = unittest.TestLoader()
tests = loader.discover('tests', pattern='*.py', top_level_dir='.')
testRunner = unittest.TextTestRunner()
testRunner.run(tests)
|
jimrybarski/fylm_critic
|
tests.py
|
Python
|
mit
| 250
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'CultivosVariedad'
db.create_table(u'indicador11_cultivosvariedad', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cultivo', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'indicador11', ['CultivosVariedad'])
# Adding model 'Variedades'
db.create_table(u'indicador11_variedades', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cultivo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['indicador11.CultivosVariedad'])),
('variedad', self.gf('django.db.models.fields.CharField')(max_length=200)),
))
db.send_create_signal(u'indicador11', ['Variedades'])
# Adding model 'Semilla'
db.create_table(u'indicador11_semilla', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cultivo', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['indicador11.Variedades'])),
('origen', self.gf('django.db.models.fields.IntegerField')()),
('encuesta', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['monitoreo.Encuesta'])),
))
db.send_create_signal(u'indicador11', ['Semilla'])
def backwards(self, orm):
# Deleting model 'CultivosVariedad'
db.delete_table(u'indicador11_cultivosvariedad')
# Deleting model 'Variedades'
db.delete_table(u'indicador11_variedades')
# Deleting model 'Semilla'
db.delete_table(u'indicador11_semilla')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'configuracion.areaaccion': {
'Meta': {'object_name': 'AreaAccion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'configuracion.plataforma': {
'Meta': {'object_name': 'Plataforma'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'configuracion.sector': {
'Meta': {'object_name': 'Sector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'configuracion.sitioaccion': {
'Meta': {'object_name': 'SitioAccion'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '250'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'indicador11.cultivosvariedad': {
'Meta': {'ordering': "['cultivo']", 'object_name': 'CultivosVariedad'},
'cultivo': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'indicador11.semilla': {
'Meta': {'object_name': 'Semilla'},
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['indicador11.Variedades']"}),
'encuesta': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['monitoreo.Encuesta']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'origen': ('django.db.models.fields.IntegerField', [], {})
},
u'indicador11.variedades': {
'Meta': {'ordering': "['cultivo']", 'object_name': 'Variedades'},
'cultivo': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['indicador11.CultivosVariedad']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'variedad': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'lugar.comunidad': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Comunidad'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '40'})
},
u'lugar.departamento': {
'Meta': {'ordering': "['nombre']", 'object_name': 'Departamento'},
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.municipio': {
'Meta': {'ordering': "['departamento__nombre', 'nombre']", 'object_name': 'Municipio'},
'departamento': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'extension': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'latitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'longitud': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '8', 'decimal_places': '5', 'blank': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'unique': 'True', 'null': 'True'})
},
u'lugar.pais': {
'Meta': {'object_name': 'Pais'},
'codigo': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'mapeo.organizaciones': {
'Meta': {'ordering': "[u'nombre']", 'unique_together': "((u'font_color', u'nombre'),)", 'object_name': 'Organizaciones'},
'area_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.AreaAccion']"}),
'contacto': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'correo_electronico': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'direccion': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'font_color': ('mapeo.models.ColorField', [], {'unique': 'True', 'max_length': '10', 'blank': 'True'}),
'fundacion': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'generalidades': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'logo': (u'sorl.thumbnail.fields.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'plataforma': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Plataforma']"}),
'rss': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.Sector']"}),
'siglas': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'sitio_accion': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['configuracion.SitioAccion']"}),
'sitio_web': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'telefono': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'temas': ('ckeditor.fields.RichTextField', [], {'null': 'True', 'blank': 'True'})
},
u'mapeo.persona': {
'Meta': {'object_name': 'Persona'},
'cedula': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'comunidad': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Comunidad']"}),
'departamento': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Departamento']"}),
'edad': ('django.db.models.fields.IntegerField', [], {}),
'finca': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'municipio': ('smart_selects.db_fields.ChainedForeignKey', [], {'to': u"orm['lugar.Municipio']"}),
'nivel_educacion': ('django.db.models.fields.IntegerField', [], {}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'organizacion': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "u'org'", 'symmetrical': 'False', 'to': u"orm['mapeo.Organizaciones']"}),
'pais': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['lugar.Pais']"}),
'sexo': ('django.db.models.fields.IntegerField', [], {})
},
u'monitoreo.encuesta': {
'Meta': {'object_name': 'Encuesta'},
'fecha': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'jefe': ('django.db.models.fields.IntegerField', [], {}),
'productor': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mapeo.Persona']"}),
'recolector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['monitoreo.Recolector']"}),
'tipo_encuesta': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
u'monitoreo.recolector': {
'Meta': {'object_name': 'Recolector'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nombre': ('django.db.models.fields.CharField', [], {'max_length': '200'})
}
}
complete_apps = ['indicador11']
|
shiminasai/ciat_plataforma
|
monitoreo/indicador11/migrations/0001_initial.py
|
Python
|
mit
| 14,973
|
import asyncore
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import socket
import smtpd
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import unittest
from test import support, mock_socket
try:
import threading
except ImportError:
threading = None
HOST = support.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except socket.timeout:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port)
smtp.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
smtp = smtplib.SMTP(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(smtp.source_address, ('127.0.0.1', 19876))
smtp.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
smtp = smtplib.SMTP("%s:%s" % (HOST, self.port))
smtp.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
smtp = smtplib.SMTP(HOST, self.port, local_hostname="testhost")
self.assertEqual(smtp.local_hostname, "testhost")
smtp.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
smtp = smtplib.SMTP(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
smtp = smtplib.SMTP(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(smtp.sock.gettimeout())
smtp.close()
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
smtp = smtplib.SMTP(HOST, self.port, timeout=30)
self.assertEqual(smtp.sock.gettimeout(), 30)
smtp.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(1)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
smtp = smtplib.SMTP()
smtp.set_debuglevel(2)
with support.captured_stderr() as stderr:
smtp.connect(HOST, self.port)
smtp.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except socket.timeout:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
@unittest.skipUnless(threading, 'Threading required for this test.')
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.quit()
def testSourceAddress(self):
# connect
port = support.find_unused_port()
try:
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=3, source_address=('127.0.0.1', port))
self.assertEqual(smtp.source_address, ('127.0.0.1', port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to port %d" % port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('nobody@nowhere.com'), expected)
self.assertEqual(smtp.verify('nobody@nowhere.com'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<warped@silly.walks.com>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'warped@silly.walks.com'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: foo@bar.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m, from_addr='joe@example.com', to_addrs='foo@example.net')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: joe@example.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'foo@example.net'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = 'the_rescuers@Rescue-Aid-Society.com'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: the_rescuers@Rescue-Aid-Society.com$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Add the X-Peer header that DebuggingServer adds
m['X-Peer'] = socket.gethostbyname('localhost')
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: holy@grail.net$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('my_mom@great.cooker.com', 'Jeff', 'doe@losthope.net'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = 'foo@bar.com'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <warped@silly.walks.com>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = 'holy@grail.net'
m['Resent-To'] = 'Martha <my_mom@great.cooker.com>, Jeff'
m['Resent-Bcc'] = 'doe@losthope.net'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = 'holy@grail.net'
m['Resent-From'] = 'Martha <my_mom@great.cooker.com>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=3)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
# test response of client to a non-successful HELO message
@unittest.skipUnless(threading, 'Threading required for this test.')
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
@unittest.skipUnless(threading, 'Threading required for this test.')
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = support.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
threading.Thread(target=server, args=servargs).start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'Mr.A@somewhere.com':'John A',
'Ms.B@xn--fo-fka.com':'Sally B',
'Mrs.C@somewhereesle.com':'Ruth C',
}
sim_auth = ('Mr.A@somewhere.com', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_auth_credentials = {
'login': 'TXIuQUBzb21ld2hlcmUuY29t',
'plain': 'AE1yLkFAc29tZXdoZXJlLmNvbQBzb21lcGFzc3dvcmQ=',
'cram-md5': ('TXIUQUBZB21LD2HLCMUUY29TIDG4OWQ0MJ'
'KWZGQ4ODNMNDA4NTGXMDRLZWMYZJDMODG1'),
}
sim_auth_login_user = 'TXIUQUBZB21LD2HLCMUUY29T'
sim_auth_plain = 'AE1YLKFAC29TZXDOZXJLLMNVBQBZB21LCGFZC3DVCMQ='
sim_lists = {'list-1':['Mr.A@somewhere.com','Mrs.C@somewhereesle.com'],
'list-2':['Ms.B@xn--fo-fka.com',],
}
# Simulated SMTP channel & server
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_AUTH(self, arg):
mech = arg.strip().lower()
if mech=='cram-md5':
self.push('334 {}'.format(sim_cram_md5_challenge))
elif mech not in sim_auth_credentials:
self.push('504 auth type unimplemented')
return
elif mech=='plain':
self.push('334 ')
elif mech=='login':
self.push('334 ')
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
pass
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = 'nobody@nowhere.com'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
# SimSMTPChannel doesn't fully support AUTH because it requires a
# synchronous read to obtain the credentials...so instead smtpd
# sees the credential sent by smtplib's login method as an unknown command,
# which results in smtplib raising an auth error. Fortunately the error
# message contains the encoded credential, so we can partially check that it
# was generated correctly (partially, because the 'word' is uppercased in
# the error message).
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1], initial_response_ok=False)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_plain, str(err))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials['cram-md5'], str(err))
smtp.close()
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
try: smtp.login(sim_auth[0], sim_auth[1])
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_login_user, str(err))
smtp.close()
def test_auth_function(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
self.serv.add_feature("AUTH CRAM-MD5")
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
supported = {'CRAM-MD5': smtp.auth_cram_md5,
'PLAIN': smtp.auth_plain,
'LOGIN': smtp.auth_login,
}
for mechanism, method in supported.items():
try: smtp.auth(mechanism, method, initial_response_ok=False)
except smtplib.SMTPAuthenticationError as err:
self.assertIn(sim_auth_credentials[mechanism.lower()].upper(),
str(err))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=15)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost', timeout=15)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('John@foo.org', ['Sally@foo.org'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <főo@bar.com>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'főo@bar.com')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
msg = EmailMessage()
msg['From'] = "Páolo <főo@bar.com>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost', timeout=3)
self.addCleanup(smtp.close)
self.assertRaises(smtplib.SMTPNotSupportedError,
smtp.send_message(msg))
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
@unittest.skipUnless(threading, 'Threading required for this test.')
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
self.thread.join()
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost', timeout=15)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
@support.reap_threads
def test_main(verbose=None):
support.run_unittest(
BadHELOServerTests,
DebuggingServerTests,
GeneralTests,
NonConnectingTests,
SMTPAUTHInitialResponseSimTests,
SMTPSimTests,
TooLongLineTests,
)
if __name__ == '__main__':
test_main()
|
MalloyPower/parsing-python
|
front-end/testsuite-python-lib/Python-3.5.0/Lib/test/test_smtplib.py
|
Python
|
mit
| 47,462
|
from .dual_structured_quad import (
DualRectilinearGraph,
DualStructuredQuadGraph,
DualUniformRectilinearGraph,
)
from .structured_quad import (
RectilinearGraph,
StructuredQuadGraph,
UniformRectilinearGraph,
)
__all__ = [
"StructuredQuadGraph",
"RectilinearGraph",
"UniformRectilinearGraph",
"DualUniformRectilinearGraph",
"DualRectilinearGraph",
"DualStructuredQuadGraph",
]
|
landlab/landlab
|
landlab/graph/structured_quad/__init__.py
|
Python
|
mit
| 426
|
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15 (https://github.com/warner/python-versioneer)
import errno
import os
import re
import subprocess
import sys
def get_keywords():
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
pass
def get_config():
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "pep440"
cfg.tag_prefix = ""
cfg.parentdir_prefix = ""
cfg.versionfile_source = "doctr/_version.py"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
pass
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
def decorate(f):
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(refs-tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags"}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag, this yields TAG-NUM-gHEX[-dirty]
# if there are no tags, this yields HEX[-dirty] (no NUM)
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long"],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def plus_or_dot(pieces):
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
# now build up version string, with post-release "local version
# identifier". Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
# get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
# exceptions:
# 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
# TAG[.post.devDISTANCE] . No -dirty
# exceptions:
# 1: no tags. 0.post.devDISTANCE
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
# TAG[.postDISTANCE[.dev0]+gHEX] . The ".dev0" means dirty. Note that
# .dev0 sorts backwards (a dirty tree will appear "older" than the
# corresponding clean one), but you shouldn't be releasing software with
# -dirty anyways.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
# TAG[.postDISTANCE[.dev0]] . The ".dev0" means dirty.
# exceptions:
# 1: no tags. 0.postDISTANCE[.dev0]
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
# TAG[-DISTANCE-gHEX][-dirty], like 'git describe --tags --dirty
# --always'
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
# TAG-DISTANCE-gHEX[-dirty], like 'git describe --tags --dirty
# --always -long'. The distance/hash is unconditional.
# exceptions:
# 1: no tags. HEX[-dirty] (note: no 'g' prefix)
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render(pieces, style):
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style or style == "default":
style = "pep440" # the default
if style == "pep440":
rendered = render_pep440(pieces)
elif style == "pep440-pre":
rendered = render_pep440_pre(pieces)
elif style == "pep440-post":
rendered = render_pep440_post(pieces)
elif style == "pep440-old":
rendered = render_pep440_old(pieces)
elif style == "git-describe":
rendered = render_git_describe(pieces)
elif style == "git-describe-long":
rendered = render_git_describe_long(pieces)
else:
raise ValueError("unknown style '%s'" % style)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
|
drdoctr/doctr
|
doctr/_version.py
|
Python
|
mit
| 15,755
|
"""Module containing routines for estimating covariance models.
"""
import math
from pyvol.tseries.Transforms import SeqXform
import CovUtils
class CovEstimator:
"""Base class illustrating the API covariance estimators should follow
All covariance estimators should subclass this class and override at
least the Estimate method.
"""
def __init__(self):
self.returnColName = None #Set later by SetReturnColName
self.returnCol = None #Set later by Startup
def SetReturnColName(self, returnColName):
"""Set the return column name.
INPUTS:
-- returnColName: String indicating which column to look at
for lowest level return data.
-------------------------------------------------------
PURPOSE: This method tells the estimator what column in
a Sequence.TimeSeq object will contain the lowest
level set of return data.
"""
self.returnColName = returnColName
def Startup(self, query):
"""Prepare to do estimation on data in a given query.
INPUTS:
-- query: A Sequence.TimeSeq object with raw data for estimation.
-------------------------------------------------------
PURPOSE: Prepare to do estimation on data in a given query.
Subclasses may override if desired.
"""
self.returnCol = query.GetColIndex(self.returnColName)
def Shutdown(self, query):
"""Finishing doing estimation on data in a given query.
INPUTS:
-- query: A Sequence.TimeSeq object with raw data for estimation.
-------------------------------------------------------
PURPOSE: Finish doing estimation on data in a given query.
Subclasses may override if desired.
"""
_ignore = self, query
def Estimate(self, args, query, line):
"""Estimate volatility
INPUTS:
-- args: Dictionary where keys are columns in query and values
are the values for the current line.
-- query: Full TimeSeq object containing past data that can
be used in doing the estimate.
-- line: Integer line number in query representing row we
are doing estimation for.
-------------------------------------------------------
RETURNS: Return a floating point number representing an
estimate of the volatility for the next period.
This method should NOT modify query.
-------------------------------------------------------
PURPOSE: Subclasses should override this method to produce
an estimate of the next period volatility.
"""
raise NotImplementedError
class FixedLookackCovEstimator(CovEstimator):
"""Covariance estimator with a fixed lookback.
"""
def __init__(self, lookback, *args, **kw):
"""Initializer.
INPUTS:
-- lookback: Integer lookback to indicate how far back to look in
the data.
-- *args, **kw: Passed to CovEstimator.__init__.
"""
self.lookback = lookback
CovEstimator.__init__(self, *args, **kw)
def Estimate(self, args, query, line):
"""Override Estimate as required by CovEstimator class
Here is the function where we do the actual work. In this case,
we simply look at the sample variance for data from self.lookback
to the current line (not including current line).
"""
_ignore = args
c = self.returnCol
startLine = line - self.lookback
if (startLine < 0):
return None
returns = [row[c] for row in query.data[startLine:line]]
volEst = math.sqrt(CovUtils.Var(returns))
return volEst
class ThirteenWeekLookbackEstimator(FixedLookackCovEstimator):
"Just like FixedLookackCovEstimator but with lookback of 13 weeks."
def __init__(self, *args, **kw):
FixedLookackCovEstimator.__init__(self, 13*5, *args, **kw)
class EstimateVols(SeqXform):
"""Transform to take an existing estimator and estimate vols.
This transform is useful to simplify the process of computing estimates.
>>> import random
>>> import CovEst
>>> from pyvol.sims import DataSimulator, SimAPI
>>> seed = 64
>>> random.seed(seed)
>>> simulator = DataSimulator.PriceSimulator()
>>> returnColName = simulator.levelParams.retNames[-1]
>>> estimator = CovEst.FixedLookackCovEstimator(lookback=26*5)
>>> estimator.SetReturnColName(returnColName)
>>> estimatorTx = CovEst.EstimateVols(estimator, 'estimatedVol')
>>> query = simulator.MakeSimulatedData([estimatorTx])
>>> stats = SimAPI.MakeStats(query, simulator.levelParams)
>>> print stats
yearly : ret = 0.026, vol = 0.195, sqCorrs = 1.000, 0.123, -0.084
quarterly : ret = 0.025, vol = 0.222, sqCorrs = 1.000, 0.103, 0.138
monthly : ret = 0.026, vol = 0.223, sqCorrs = 1.000, 0.054, 0.158
weekly : ret = 0.026, vol = 0.242, sqCorrs = 1.000, 0.175, 0.249
daily : ret = 0.026, vol = 0.250, sqCorrs = 1.000, 0.422, 0.276
"""
def __init__(self, estimator, estName):
self.estimator = estimator
SeqXform.__init__(self, [estimator.returnColName], [estName])
def Startup(self, query):
"Prepare to start doing estimation."
self.estimator.Startup(query)
def Shutdown(self, query):
"Finish doing estimation."
self.estimator.Shutdown(query)
def ProcessRowInSeq(self, args, query, line):
"Call our estimator and return results."
volEst = self.estimator.Estimate(args, query, line)
return [volEst]
class ScaleToConstantRisk(SeqXform):
"""Transform to scale positions to constant risk based on vol forecast.
This class is useful in taking a target volatility level, existing
positions, and forecast volatility and scaling the positions to have
the given target volatility.
"""
def __init__(self, targetVol, positionName, estimateName, scaledPos):
"""Initializer.
INPUTS:
-- targetVol: Floating point target volatility.
-- positionName: String naming position field.
-- estimateName: String naming vol estimate field.
-- scaledPos: String naming output scaled position.
"""
self.targetVol = targetVol
SeqXform.__init__(self, [positionName, estimateName], [scaledPos])
def ProcessRow(self, args):
"Override as required by SeqXform to compute scaled position"
position, estimate = [args[n] for n in self.inputFields]
if (position is None or estimate is None):
return [position]
else:
return [position / estimate * self.targetVol]
def _test():
"Test docstrings in module."
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
print 'Test finished.'
|
eiriks/pyvol
|
pyvol/est/CovEst.py
|
Python
|
mit
| 7,719
|
# -*- coding: utf-8 -*-
#
# This file is part of INSPIRE.
# Copyright (C) 2014, 2015 CERN.
#
# INSPIRE is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# INSPIRE is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with INSPIRE. If not, see <http://www.gnu.org/licenses/>.
#
# In applying this licence, CERN does not waive the privileges and immunities
# granted to it by virtue of its status as an Intergovernmental Organization
# or submit itself to any jurisdiction.
from invenio_workflows.definitions import WorkflowBase
class HarvestingWorkflowBase(WorkflowBase):
"""Base harvesting definition."""
@staticmethod
def get_title(bwo, **kwargs):
"""Return the value to put in the title column of HoldingPen."""
args = bwo.get_extra_data().get("args", {})
return "Summary of {0} harvesting from {1} to {2}".format(
args.get("workflow", "unknown"),
args.get("from_date", "unknown"),
args.get("to_date", "unknown"),
)
@staticmethod
def get_description(bwo, **kwargs):
"""Return the value to put in the title column of HoldingPen."""
return "No description. See log for details."
@staticmethod
def formatter(obj, **kwargs):
"""Format the object."""
return "No data. See log for details."
|
jalavik/inspire-next
|
inspire/modules/harvester/definitions.py
|
Python
|
gpl-2.0
| 1,759
|
from client import get_client
from client import (
BIGQUERY_SCOPE,
BIGQUERY_SCOPE_READ_ONLY,
JOB_CREATE_IF_NEEDED,
JOB_CREATE_NEVER,
JOB_SOURCE_FORMAT_NEWLINE_DELIMITED_JSON,
JOB_SOURCE_FORMAT_DATASTORE_BACKUP,
JOB_SOURCE_FORMAT_CSV,
JOB_WRITE_TRUNCATE,
JOB_WRITE_APPEND,
JOB_WRITE_EMPTY,
JOB_ENCODING_UTF_8,
JOB_ENCODING_ISO_8859_1
)
from schema_builder import schema_from_record
|
thinksource/angularbigquery
|
lib/bigquery/__init__.py
|
Python
|
gpl-2.0
| 430
|
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2005, 2006, 2007, 2008, 2009, 2010, 2011 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""This is file handles the command line interface
* We parse the options for both daemon and standalone usage
* When using using the standalone mode, we use the function "main"
defined here to begin the extraction of references
"""
__revision__ = "$Id$"
import traceback
import optparse
import sys
from invenio.docextract_record import print_records
from invenio.docextract_utils import write_message, setup_loggers
from invenio.bibtask import task_update_progress
from invenio.refextract_api import extract_references_from_file, \
extract_references_from_string
# Is refextract running standalone? (Default = yes)
RUNNING_INDEPENDENTLY = False
DESCRIPTION = ""
# Help message, used by bibtask's 'task_init()' and 'usage()'
HELP_MESSAGE = """
--kb-journals Manually specify the location of a journal title
knowledge-base file.
--kb-journals-re Manually specify the location of a journal title regexps
knowledge-base file.
--kb-report-numbers Manually specify the location of a report number
knowledge-base file.
--kb-authors Manually specify the location of an author
knowledge-base file.
--kb-books Manually specify the location of a book
knowledge-base file.
--no-overwrite Do not touch record if it already has references
"""
HELP_STANDALONE_MESSAGE = """
Standalone Refextract options:
-o, --out Write the extracted references, in xml form, to a file
rather than standard output.
--dictfile Write statistics about all matched title abbreviations
(i.e. LHS terms in the titles knowledge base) to a file.
--output-raw-refs Output raw references, as extracted from the document.
No MARC XML mark-up - just each extracted line, prefixed
by the recid of the document that it came from.
--raw-references Treat the input file as pure references. i.e. skip the
stage of trying to locate the reference section within a
document and instead move to the stage of recognition
and standardisation of citations within lines.
"""
USAGE_MESSAGE = """Usage: docextract [options] file1 [file2 ...]
Command options: %s%s
Examples:
docextract -o /home/chayward/refs.xml /home/chayward/thesis.pdf
""" % (HELP_MESSAGE, HELP_STANDALONE_MESSAGE)
def get_cli_options():
"""Get the various arguments and options from the command line and populate
a dictionary of cli_options.
@return: (tuple) of 2 elements. First element is a dictionary of cli
options and flags, set as appropriate; Second element is a list of cli
arguments.
"""
parser = optparse.OptionParser(description=DESCRIPTION,
usage=USAGE_MESSAGE,
add_help_option=False)
# Display help and exit
parser.add_option('-h', '--help', action='store_true')
# Display version and exit
parser.add_option('-V', '--version', action='store_true')
# Output recognised journal titles in the Inspire compatible format
parser.add_option('-i', '--inspire', action='store_true')
# The location of the report number kb requested to override
# a 'configuration file'-specified kb
parser.add_option('--kb-report-numbers', dest='kb_report_numbers')
# The location of the journal title kb requested to override
# a 'configuration file'-specified kb, holding
# 'seek---replace' terms, used when matching titles in references
parser.add_option('--kb-journals', dest='kb_journals')
parser.add_option('--kb-journals-re', dest='kb_journals_re')
# The location of the author kb requested to override
parser.add_option('--kb-authors', dest='kb_authors')
# The location of the author kb requested to override
parser.add_option('--kb-books', dest='kb_books')
# The location of the author kb requested to override
parser.add_option('--kb-conferences', dest='kb_conferences')
# Write out the statistics of all titles matched during the
# extraction job to the specified file
parser.add_option('--dictfile')
# Write out MARC XML references to the specified file
parser.add_option('-o', '--out', dest='xmlfile')
# Handle verbosity
parser.add_option('-v', '--verbose', type=int, dest='verbosity', default=0)
# Output a raw list of refs
parser.add_option('--output-raw-refs', action='store_true',
dest='output_raw')
# Treat input as pure reference lines:
# (bypass the reference section lookup)
parser.add_option('--raw-references', action='store_true',
dest='treat_as_reference_section')
return parser.parse_args()
def halt(err=StandardError, msg=None, exit_code=1):
""" Stop extraction, and deal with the error in the appropriate
manner, based on whether Refextract is running in standalone or
bibsched mode.
@param err: (exception) The exception raised from an error, if any
@param msg: (string) The brief error message, either displayed
on the bibsched interface, or written to stderr.
@param exit_code: (integer) Either 0 or 1, depending on the cause
of the halting. This is only used when running standalone."""
# If refextract is running independently, exit.
# 'RUNNING_INDEPENDENTLY' is a global variable
if RUNNING_INDEPENDENTLY:
if msg:
write_message(msg, stream=sys.stderr, verbose=0)
sys.exit(exit_code)
# Else, raise an exception so Bibsched will flag this task.
else:
if msg:
# Update the status of refextract inside the Bibsched UI
task_update_progress(msg.strip())
raise err(msg)
def usage(wmsg=None, err_code=0):
"""Display a usage message for refextract on the standard error stream and
then exit.
@param wmsg: (string) some kind of brief warning message for the user.
@param err_code: (integer) an error code to be passed to halt,
which is called after the usage message has been printed.
@return: None.
"""
if wmsg:
wmsg = wmsg.strip()
# Display the help information and the warning in the stderr stream
# 'help_message' is global
print >> sys.stderr, USAGE_MESSAGE
# Output error message, either to the stderr stream also or
# on the interface. Stop the extraction procedure
halt(msg=wmsg, exit_code=err_code)
def main(config, args, run):
"""Main wrapper function for begin_extraction, and is
always accessed in a standalone/independent way. (i.e. calling main
will cause refextract to run in an independent mode)"""
# Flag as running out of bibtask
global RUNNING_INDEPENDENTLY
RUNNING_INDEPENDENTLY = True
if config.verbosity not in range(0, 10):
usage("Error: Verbosity must be an integer between 0 and 10")
setup_loggers(config.verbosity)
if config.version:
# version message and exit
write_message(__revision__, verbose=0)
halt(exit_code=0)
if config.help:
usage()
if not args:
# no files provided for reference extraction - error message
usage("Error: No valid input file specified (file1 [file2 ...])")
try:
run(config, args)
write_message("Extraction complete", verbose=2)
except StandardError, e:
# Remove extra '\n'
write_message(traceback.format_exc()[:-1], verbose=9)
write_message("Error: %s" % e, verbose=0)
halt(exit_code=1)
def extract_one(config, pdf_path):
"""Extract references from one file"""
# If necessary, locate the reference section:
if config.treat_as_reference_section:
docbody = open(pdf_path).read().decode('utf-8')
record = extract_references_from_string(docbody)
else:
write_message("* processing pdffile: %s" % pdf_path, verbose=2)
record = extract_references_from_file(pdf_path)
return record
def begin_extraction(config, files):
"""Starts the core extraction procedure. [Entry point from main]
Only refextract_daemon calls this directly, from _task_run_core()
@param daemon_cli_options: contains the pre-assembled list of cli flags
and values processed by the Refextract Daemon. This is full only when
called as a scheduled bibtask inside bibsched.
"""
# Store records here
records = []
for num, path in enumerate(files):
# Announce the document extraction number
write_message("Extracting %d of %d" % (num + 1, len(files)),
verbose=1)
# Parse references
rec = extract_one(config, path)
records.append(rec)
# Write our references
write_references(config, records)
def write_references(config, records):
"""Write in marcxml"""
if config.xmlfile:
ofilehdl = open(config.xmlfile, 'w')
else:
ofilehdl = sys.stdout
if config.xmlfile:
for rec in records:
for subfield in rec.find_subfields('999C5m'):
if len(subfield.value) > 2048:
subfield.value = subfield.value[:2048]
try:
xml = print_records(records)
print >>ofilehdl, xml
ofilehdl.flush()
except IOError, err:
write_message("%s\n%s\n" % (config.xmlfile, err),
sys.stderr, verbose=0)
halt(err=IOError, msg="Error: Unable to write to '%s'"
% config.xmlfile, exit_code=1)
|
CERNDocumentServer/invenio
|
modules/docextract/lib/refextract_cli.py
|
Python
|
gpl-2.0
| 10,531
|
#!/usr/bin/python3
# requirements: partition /dev/sdc1 with swap
from storage import *
from storageitu import *
set_logger(get_logfile_logger())
environment = Environment(False)
storage = Storage(environment)
storage.probe()
staging = storage.get_staging()
print(staging)
sdc1 = BlkDevice.find_by_name(staging, "/dev/sdc1")
blk_filesystem = sdc1.get_blk_filesystem()
blk_filesystem.set_uuid("7420b069-cd50-464e-b0b2-66c1fdc75bcd")
print(staging)
commit(storage)
|
aschnell/libstorage-ng
|
integration-tests/filesystems/set-uuid.py
|
Python
|
gpl-2.0
| 477
|
#!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
def paint_icon_inventory(what, row, tags, customer_vars):
if (what == "host" or row.get("service_check_command","").startswith("check_mk_active-cmk_inv!")) \
and inventory.has_inventory(row["host_name"]):
return link_to_view(html.render_icon('inv', _("Show Hardware/Software-Inventory of this host")),
row, 'inv_host' )
multisite_icons.append({
'host_columns': [ "name" ],
'paint': paint_icon_inventory,
})
|
alberts/check_mk
|
web/plugins/icons/inventory.py
|
Python
|
gpl-2.0
| 1,927
|
# Copyright 2014 Diamond Light Source Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
.. module:: monitor_correction_test
:platform: Unix
:synopsis: tests the monitor correction
.. moduleauthor:: Aaron Parsons <scientificsoftware@diamond.ac.uk>
"""
import unittest
from savu.test import test_utils as tu
from savu.test.travis.framework_tests.plugin_runner_test import \
run_protected_plugin_runner
class MonitorCorrectionTest(unittest.TestCase):
def test_monitor_correction(self):
data_file = tu.get_test_data_path('mm.nxs')
process_file = tu.get_test_process_path('monitor_correction_test.nxs')
run_protected_plugin_runner(tu.set_options(data_file,
process_file=process_file))
if __name__ == "__main__":
unittest.main()
|
FedeMPouzols/Savu
|
savu/test/travis/plugin_tests/filter_tests/monitor_correction_test.py
|
Python
|
gpl-3.0
| 1,328
|
# -*- coding: utf-8 -*-
#########################################################################
#
# Copyright (C) 2016 OSGeo
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
#########################################################################
import taggit
from django import forms
from django.contrib.auth import get_user_model
from django.contrib.auth.forms import UserCreationForm, UserChangeForm
from django.utils.translation import ugettext as _
from geonode.base.models import ContactRole
from captcha.fields import ReCaptchaField
# Ported in from django-registration
attrs_dict = {'class': 'required'}
class AllauthReCaptchaSignupForm(forms.Form):
captcha = ReCaptchaField()
def signup(self, request, user):
""" Required, or else it thorws deprecation warnings """
pass
class ProfileCreationForm(UserCreationForm):
class Meta:
model = get_user_model()
fields = ("username",)
def clean_username(self):
# Since User.username is unique, this check is redundant,
# but it sets a nicer error message than the ORM. See #13147.
username = self.cleaned_data["username"]
try:
get_user_model().objects.get(username=username)
except get_user_model().DoesNotExist:
return username
raise forms.ValidationError(
self.error_messages['duplicate_username'],
code='duplicate_username',
)
class ProfileChangeForm(UserChangeForm):
class Meta:
model = get_user_model()
fields = '__all__'
class ForgotUsernameForm(forms.Form):
email = forms.EmailField(widget=forms.TextInput(attrs=dict(attrs_dict,
maxlength=75)),
label=_('Email Address'))
class RoleForm(forms.ModelForm):
class Meta:
model = ContactRole
exclude = ('contact', 'layer')
class PocForm(forms.Form):
contact = forms.ModelChoiceField(label="New point of contact",
queryset=get_user_model().objects.all())
class ProfileForm(forms.ModelForm):
keywords = taggit.forms.TagField(
label=_("Keywords"),
required=False,
help_text=_("A space or comma-separated list of keywords"))
class Meta:
model = get_user_model()
exclude = (
'user',
'password',
'last_login',
'groups',
'user_permissions',
'username',
'is_staff',
'is_superuser',
'is_active',
'date_joined'
)
|
francbartoli/geonode
|
geonode/people/forms.py
|
Python
|
gpl-3.0
| 3,226
|
import frappe
from frappe import _
change_icons_map = [
{
"module_name": "Patient",
"color": "#6BE273",
"icon": "fa fa-user",
"doctype": "Patient",
"type": "link",
"link": "List/Patient",
"label": _("Patient")
},
{
"module_name": "Patient Encounter",
"color": "#2ecc71",
"icon": "fa fa-stethoscope",
"doctype": "Patient Encounter",
"type": "link",
"link": "List/Patient Encounter",
"label": _("Patient Encounter"),
},
{
"module_name": "Healthcare Practitioner",
"color": "#2ecc71",
"icon": "fa fa-user-md",
"doctype": "Healthcare Practitioner",
"type": "link",
"link": "List/Healthcare Practitioner",
"label": _("Healthcare Practitioner")
},
{
"module_name": "Patient Appointment",
"color": "#934F92",
"icon": "fa fa-calendar-plus-o",
"doctype": "Patient Appointment",
"type": "link",
"link": "List/Patient Appointment",
"label": _("Patient Appointment")
},
{
"module_name": "Lab Test",
"color": "#7578f6",
"icon": "octicon octicon-beaker",
"doctype": "Lab Test",
"type": "link",
"link": "List/Lab Test",
"label": _("Lab Test")
}
]
def execute():
change_healthcare_desktop_icons()
def change_healthcare_desktop_icons():
doctypes = ["patient", "patient_encounter", "healthcare_practitioner",
"patient_appointment", "lab_test"]
for doctype in doctypes:
frappe.reload_doc("healthcare", "doctype", doctype)
for spec in change_icons_map:
frappe.db.sql("""
delete from `tabDesktop Icon`
where _doctype = '{0}'
""".format(spec['doctype']))
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = spec['icon']
desktop_icon.color = spec['color']
desktop_icon.module_name = spec['module_name']
desktop_icon.label = spec['label']
desktop_icon.app = "erpnext"
desktop_icon.type = spec['type']
desktop_icon._doctype = spec['doctype']
desktop_icon.link = spec['link']
desktop_icon.save(ignore_permissions=True)
frappe.db.sql("""
delete from `tabDesktop Icon`
where module_name = 'Healthcare' and type = 'module'
""")
desktop_icon = frappe.new_doc("Desktop Icon")
desktop_icon.hidden = 1
desktop_icon.standard = 1
desktop_icon.icon = "fa fa-heartbeat"
desktop_icon.color = "#FF888B"
desktop_icon.module_name = "Healthcare"
desktop_icon.label = _("Healthcare")
desktop_icon.app = "erpnext"
desktop_icon.type = 'module'
desktop_icon.save(ignore_permissions=True)
|
chdecultot/erpnext
|
erpnext/patches/v11_0/change_healthcare_desktop_icons.py
|
Python
|
gpl-3.0
| 2,450
|
# This file is part of Checkbox.
#
# Copyright 2012 Canonical Ltd.
# Written by:
# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>
# Daniel Manrique <roadmr@ubuntu.com>
#
# Checkbox is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Checkbox is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.
"""
plainbox.impl.exporter.test_text
================================
Test definitions for plainbox.impl.exporter.text module
"""
from io import BytesIO
from unittest import TestCase
from plainbox.impl.exporter.text import TextSessionStateExporter
class TextSessionStateExporterTests(TestCase):
def test_default_dump(self):
exporter = TextSessionStateExporter()
# Text exporter expects this data format
data = {'result_map': {'job_name': {'outcome': 'fail'}}}
stream = BytesIO()
exporter.dump(data, stream)
expected_bytes = "job_name: fail\n".encode('UTF-8')
self.assertEqual(stream.getvalue(), expected_bytes)
|
jds2001/ocp-checkbox
|
plainbox/plainbox/impl/exporter/test_text.py
|
Python
|
gpl-3.0
| 1,478
|
# (c) 2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ctypes.util
import errno
import fcntl
import getpass
import locale
import logging
import os
import random
import subprocess
import sys
import textwrap
import time
from struct import unpack, pack
from termios import TIOCGWINSZ
from ansible import constants as C
from ansible.errors import AnsibleError, AnsibleAssertionError
from ansible.module_utils._text import to_bytes, to_text
from ansible.module_utils.six import text_type
from ansible.utils.color import stringc
from ansible.utils.singleton import Singleton
from ansible.utils.unsafe_proxy import wrap_var
_LIBC = ctypes.cdll.LoadLibrary(ctypes.util.find_library('c'))
# Set argtypes, to avoid segfault if the wrong type is provided,
# restype is assumed to be c_int
_LIBC.wcwidth.argtypes = (ctypes.c_wchar,)
_LIBC.wcswidth.argtypes = (ctypes.c_wchar_p, ctypes.c_int)
# Max for c_int
_MAX_INT = 2 ** (ctypes.sizeof(ctypes.c_int) * 8 - 1) - 1
_LOCALE_INITIALIZED = False
_LOCALE_INITIALIZATION_ERR = None
def initialize_locale():
"""Set the locale to the users default setting
and set ``_LOCALE_INITIALIZED`` to indicate whether
``get_text_width`` may run into trouble
"""
global _LOCALE_INITIALIZED, _LOCALE_INITIALIZATION_ERR
if _LOCALE_INITIALIZED is False:
try:
locale.setlocale(locale.LC_ALL, '')
except locale.Error as e:
_LOCALE_INITIALIZATION_ERR = e
else:
_LOCALE_INITIALIZED = True
def get_text_width(text):
"""Function that utilizes ``wcswidth`` or ``wcwidth`` to determine the
number of columns used to display a text string.
We try first with ``wcswidth``, and fallback to iterating each
character and using wcwidth individually, falling back to a value of 0
for non-printable wide characters
On Py2, this depends on ``locale.setlocale(locale.LC_ALL, '')``,
that in the case of Ansible is done in ``bin/ansible``
"""
if not isinstance(text, text_type):
raise TypeError('get_text_width requires text, not %s' % type(text))
if _LOCALE_INITIALIZATION_ERR:
Display().warning(
'An error occurred while calling ansible.utils.display.initialize_locale '
'(%s). This may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths' % _LOCALE_INITIALIZATION_ERR
)
elif not _LOCALE_INITIALIZED:
Display().warning(
'ansible.utils.display.initialize_locale has not been called, '
'this may result in incorrectly calculated text widths that can '
'cause Display to print incorrect line lengths'
)
try:
width = _LIBC.wcswidth(text, _MAX_INT)
except ctypes.ArgumentError:
width = -1
if width != -1:
return width
width = 0
counter = 0
for c in text:
counter += 1
if c in (u'\x08', u'\x7f', u'\x94', u'\x1b'):
# A few characters result in a subtraction of length:
# BS, DEL, CCH, ESC
# ESC is slightly different in that it's part of an escape sequence, and
# while ESC is non printable, it's part of an escape sequence, which results
# in a single non printable length
width -= 1
counter -= 1
continue
try:
w = _LIBC.wcwidth(c)
except ctypes.ArgumentError:
w = -1
if w == -1:
# -1 signifies a non-printable character
# use 0 here as a best effort
w = 0
width += w
if width == 0 and counter and not _LOCALE_INITIALIZED:
raise EnvironmentError(
'ansible.utils.display.initialize_locale has not been called, '
'and get_text_width could not calculate text width of %r' % text
)
# It doesn't make sense to have a negative printable width
return width if width >= 0 else 0
class FilterBlackList(logging.Filter):
def __init__(self, blacklist):
self.blacklist = [logging.Filter(name) for name in blacklist]
def filter(self, record):
return not any(f.filter(record) for f in self.blacklist)
class FilterUserInjector(logging.Filter):
"""
This is a filter which injects the current user as the 'user' attribute on each record. We need to add this filter
to all logger handlers so that 3rd party libraries won't print an exception due to user not being defined.
"""
try:
username = getpass.getuser()
except KeyError:
# people like to make containers w/o actual valid passwd/shadow and use host uids
username = 'uid=%s' % os.getuid()
def filter(self, record):
record.user = FilterUserInjector.username
return True
logger = None
# TODO: make this a callback event instead
if getattr(C, 'DEFAULT_LOG_PATH'):
path = C.DEFAULT_LOG_PATH
if path and (os.path.exists(path) and os.access(path, os.W_OK)) or os.access(os.path.dirname(path), os.W_OK):
# NOTE: level is kept at INFO to avoid security disclosures caused by certain libraries when using DEBUG
logging.basicConfig(filename=path, level=logging.INFO, # DO NOT set to logging.DEBUG
format='%(asctime)s p=%(process)d u=%(user)s n=%(name)s | %(message)s')
logger = logging.getLogger('ansible')
for handler in logging.root.handlers:
handler.addFilter(FilterBlackList(getattr(C, 'DEFAULT_LOG_FILTER', [])))
handler.addFilter(FilterUserInjector())
else:
print("[WARNING]: log file at %s is not writeable and we cannot create it, aborting\n" % path, file=sys.stderr)
# map color to log levels
color_to_log_level = {C.COLOR_ERROR: logging.ERROR,
C.COLOR_WARN: logging.WARNING,
C.COLOR_OK: logging.INFO,
C.COLOR_SKIP: logging.WARNING,
C.COLOR_UNREACHABLE: logging.ERROR,
C.COLOR_DEBUG: logging.DEBUG,
C.COLOR_CHANGED: logging.INFO,
C.COLOR_DEPRECATE: logging.WARNING,
C.COLOR_VERBOSE: logging.INFO}
b_COW_PATHS = (
b"/usr/bin/cowsay",
b"/usr/games/cowsay",
b"/usr/local/bin/cowsay", # BSD path for cowsay
b"/opt/local/bin/cowsay", # MacPorts path for cowsay
)
class Display(metaclass=Singleton):
def __init__(self, verbosity=0):
self.columns = None
self.verbosity = verbosity
# list of all deprecation messages to prevent duplicate display
self._deprecations = {}
self._warns = {}
self._errors = {}
self.b_cowsay = None
self.noncow = C.ANSIBLE_COW_SELECTION
self.set_cowsay_info()
if self.b_cowsay:
try:
cmd = subprocess.Popen([self.b_cowsay, "-l"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
if cmd.returncode:
raise Exception
self.cows_available = {to_text(c) for c in out.split()} # set comprehension
if C.ANSIBLE_COW_ACCEPTLIST and any(C.ANSIBLE_COW_ACCEPTLIST):
self.cows_available = set(C.ANSIBLE_COW_ACCEPTLIST).intersection(self.cows_available)
except Exception:
# could not execute cowsay for some reason
self.b_cowsay = False
self._set_column_width()
def set_cowsay_info(self):
if C.ANSIBLE_NOCOWS:
return
if C.ANSIBLE_COW_PATH:
self.b_cowsay = C.ANSIBLE_COW_PATH
else:
for b_cow_path in b_COW_PATHS:
if os.path.exists(b_cow_path):
self.b_cowsay = b_cow_path
def display(self, msg, color=None, stderr=False, screen_only=False, log_only=False, newline=True):
""" Display a message to the user
Note: msg *must* be a unicode string to prevent UnicodeError tracebacks.
"""
nocolor = msg
if not log_only:
has_newline = msg.endswith(u'\n')
if has_newline:
msg2 = msg[:-1]
else:
msg2 = msg
if color:
msg2 = stringc(msg2, color)
if has_newline or newline:
msg2 = msg2 + u'\n'
msg2 = to_bytes(msg2, encoding=self._output_encoding(stderr=stderr))
# Convert back to text string
# We first convert to a byte string so that we get rid of
# characters that are invalid in the user's locale
msg2 = to_text(msg2, self._output_encoding(stderr=stderr), errors='replace')
# Note: After Display() class is refactored need to update the log capture
# code in 'bin/ansible-connection' (and other relevant places).
if not stderr:
fileobj = sys.stdout
else:
fileobj = sys.stderr
fileobj.write(msg2)
try:
fileobj.flush()
except IOError as e:
# Ignore EPIPE in case fileobj has been prematurely closed, eg.
# when piping to "head -n1"
if e.errno != errno.EPIPE:
raise
if logger and not screen_only:
# We first convert to a byte string so that we get rid of
# color and characters that are invalid in the user's locale
msg2 = to_bytes(nocolor.lstrip(u'\n'))
# Convert back to text string
msg2 = to_text(msg2, self._output_encoding(stderr=stderr))
lvl = logging.INFO
if color:
# set logger level based on color (not great)
try:
lvl = color_to_log_level[color]
except KeyError:
# this should not happen, but JIC
raise AnsibleAssertionError('Invalid color supplied to display: %s' % color)
# actually log
logger.log(lvl, msg2)
def v(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=0)
def vv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=1)
def vvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=2)
def vvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=3)
def vvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=4)
def vvvvvv(self, msg, host=None):
return self.verbose(msg, host=host, caplevel=5)
def debug(self, msg, host=None):
if C.DEFAULT_DEBUG:
if host is None:
self.display("%6d %0.5f: %s" % (os.getpid(), time.time(), msg), color=C.COLOR_DEBUG)
else:
self.display("%6d %0.5f [%s]: %s" % (os.getpid(), time.time(), host, msg), color=C.COLOR_DEBUG)
def verbose(self, msg, host=None, caplevel=2):
to_stderr = C.VERBOSE_TO_STDERR
if self.verbosity > caplevel:
if host is None:
self.display(msg, color=C.COLOR_VERBOSE, stderr=to_stderr)
else:
self.display("<%s> %s" % (host, msg), color=C.COLOR_VERBOSE, stderr=to_stderr)
def get_deprecation_message(self, msg, version=None, removed=False, date=None, collection_name=None):
''' used to print out a deprecation message.'''
msg = msg.strip()
if msg and msg[-1] not in ['!', '?', '.']:
msg += '.'
if collection_name == 'ansible.builtin':
collection_name = 'ansible-core'
if removed:
header = '[DEPRECATED]: {0}'.format(msg)
removal_fragment = 'This feature was removed'
help_text = 'Please update your playbooks.'
else:
header = '[DEPRECATION WARNING]: {0}'.format(msg)
removal_fragment = 'This feature will be removed'
# FUTURE: make this a standalone warning so it only shows up once?
help_text = 'Deprecation warnings can be disabled by setting deprecation_warnings=False in ansible.cfg.'
if collection_name:
from_fragment = 'from {0}'.format(collection_name)
else:
from_fragment = ''
if date:
when = 'in a release after {0}.'.format(date)
elif version:
when = 'in version {0}.'.format(version)
else:
when = 'in a future release.'
message_text = ' '.join(f for f in [header, removal_fragment, from_fragment, when, help_text] if f)
return message_text
def deprecated(self, msg, version=None, removed=False, date=None, collection_name=None):
if not removed and not C.DEPRECATION_WARNINGS:
return
message_text = self.get_deprecation_message(msg, version=version, removed=removed, date=date, collection_name=collection_name)
if removed:
raise AnsibleError(message_text)
wrapped = textwrap.wrap(message_text, self.columns, drop_whitespace=False)
message_text = "\n".join(wrapped) + "\n"
if message_text not in self._deprecations:
self.display(message_text.strip(), color=C.COLOR_DEPRECATE, stderr=True)
self._deprecations[message_text] = 1
def warning(self, msg, formatted=False):
if not formatted:
new_msg = "[WARNING]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = "\n".join(wrapped) + "\n"
else:
new_msg = "\n[WARNING]: \n%s" % msg
if new_msg not in self._warns:
self.display(new_msg, color=C.COLOR_WARN, stderr=True)
self._warns[new_msg] = 1
def system_warning(self, msg):
if C.SYSTEM_WARNINGS:
self.warning(msg)
def banner(self, msg, color=None, cows=True):
'''
Prints a header-looking line with cowsay or stars with length depending on terminal width (3 minimum)
'''
msg = to_text(msg)
if self.b_cowsay and cows:
try:
self.banner_cowsay(msg)
return
except OSError:
self.warning("somebody cleverly deleted cowsay or something during the PB run. heh.")
msg = msg.strip()
try:
star_len = self.columns - get_text_width(msg)
except EnvironmentError:
star_len = self.columns - len(msg)
if star_len <= 3:
star_len = 3
stars = u"*" * star_len
self.display(u"\n%s %s" % (msg, stars), color=color)
def banner_cowsay(self, msg, color=None):
if u": [" in msg:
msg = msg.replace(u"[", u"")
if msg.endswith(u"]"):
msg = msg[:-1]
runcmd = [self.b_cowsay, b"-W", b"60"]
if self.noncow:
thecow = self.noncow
if thecow == 'random':
thecow = random.choice(list(self.cows_available))
runcmd.append(b'-f')
runcmd.append(to_bytes(thecow))
runcmd.append(to_bytes(msg))
cmd = subprocess.Popen(runcmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
(out, err) = cmd.communicate()
self.display(u"%s\n" % to_text(out), color=color)
def error(self, msg, wrap_text=True):
if wrap_text:
new_msg = u"\n[ERROR]: %s" % msg
wrapped = textwrap.wrap(new_msg, self.columns)
new_msg = u"\n".join(wrapped) + u"\n"
else:
new_msg = u"ERROR! %s" % msg
if new_msg not in self._errors:
self.display(new_msg, color=C.COLOR_ERROR, stderr=True)
self._errors[new_msg] = 1
@staticmethod
def prompt(msg, private=False):
prompt_string = to_bytes(msg, encoding=Display._output_encoding())
# Convert back into text. We do this double conversion
# to get rid of characters that are illegal in the user's locale
prompt_string = to_text(prompt_string)
if private:
return getpass.getpass(prompt_string)
else:
return input(prompt_string)
def do_var_prompt(self, varname, private=True, prompt=None, encrypt=None, confirm=False, salt_size=None, salt=None, default=None, unsafe=None):
result = None
if sys.__stdin__.isatty():
do_prompt = self.prompt
if prompt and default is not None:
msg = "%s [%s]: " % (prompt, default)
elif prompt:
msg = "%s: " % prompt
else:
msg = 'input for %s: ' % varname
if confirm:
while True:
result = do_prompt(msg, private)
second = do_prompt("confirm " + msg, private)
if result == second:
break
self.display("***** VALUES ENTERED DO NOT MATCH ****")
else:
result = do_prompt(msg, private)
else:
result = None
self.warning("Not prompting as we are not in interactive mode")
# if result is false and default is not None
if not result and default is not None:
result = default
if encrypt:
# Circular import because encrypt needs a display class
from ansible.utils.encrypt import do_encrypt
result = do_encrypt(result, encrypt, salt_size, salt)
# handle utf-8 chars
result = to_text(result, errors='surrogate_or_strict')
if unsafe:
result = wrap_var(result)
return result
@staticmethod
def _output_encoding(stderr=False):
encoding = locale.getpreferredencoding()
# https://bugs.python.org/issue6202
# Python2 hardcodes an obsolete value on Mac. Use MacOSX defaults
# instead.
if encoding in ('mac-roman',):
encoding = 'utf-8'
return encoding
def _set_column_width(self):
if os.isatty(1):
tty_size = unpack('HHHH', fcntl.ioctl(1, TIOCGWINSZ, pack('HHHH', 0, 0, 0, 0)))[1]
else:
tty_size = 0
self.columns = max(79, tty_size - 1)
|
ansible/ansible
|
lib/ansible/utils/display.py
|
Python
|
gpl-3.0
| 19,081
|
# This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot.util import netstrings
from twisted.protocols import basic
from twisted.trial import unittest
class NetstringParser(unittest.TestCase):
def test_valid_netstrings(self):
p = netstrings.NetstringParser()
p.feed("5:hello,5:world,")
self.assertEqual(p.strings, ['hello', 'world'])
def test_valid_netstrings_byte_by_byte(self):
# (this is really testing twisted's support, but oh well)
p = netstrings.NetstringParser()
[p.feed(c) for c in "5:hello,5:world,"]
self.assertEqual(p.strings, ['hello', 'world'])
def test_invalid_netstring(self):
p = netstrings.NetstringParser()
self.assertRaises(basic.NetstringParseError,
lambda: p.feed("5-hello!"))
def test_incomplete_netstring(self):
p = netstrings.NetstringParser()
p.feed("11:hello world,6:foob")
# note that the incomplete 'foobar' does not appear here
self.assertEqual(p.strings, ['hello world'])
|
zozo123/buildbot
|
master/buildbot/test/unit/test_util_netstrings.py
|
Python
|
gpl-3.0
| 1,718
|
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import with_statement
import os.path
import datetime
import threading
import re
import glob
import stat
import traceback
import sickbeard
import xml.etree.cElementTree as etree
from name_parser.parser import NameParser, InvalidNameException, InvalidShowException
import subliminal
try:
from send2trash import send2trash
except ImportError:
pass
from imdb import imdb
from sickbeard import db
from sickbeard import helpers, exceptions, logger
from sickbeard.exceptions import ex
from sickbeard import image_cache
from sickbeard import notifiers
from sickbeard import postProcessor
from sickbeard import subtitles
from sickbeard import history
from sickbeard.blackandwhitelist import BlackAndWhiteList
from sickbeard import sbdatetime
from sickbeard import network_timezones
from sickbeard.indexers.indexer_config import INDEXER_TVRAGE
from dateutil.tz import *
from sickbeard import encodingKludge as ek
from common import Quality, Overview, statusStrings
from common import DOWNLOADED, SNATCHED, SNATCHED_PROPER, SNATCHED_BEST, ARCHIVED, IGNORED, UNAIRED, WANTED, SKIPPED, \
UNKNOWN, FAILED
from common import NAMING_DUPLICATE, NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_SEPARATED_REPEAT, \
NAMING_LIMITED_EXTEND_E_PREFIXED
import shutil
import shutil_custom
shutil.copyfile = shutil_custom.copyfile_custom
def dirty_setter(attr_name):
def wrapper(self, val):
if getattr(self, attr_name) != val:
setattr(self, attr_name, val)
self.dirty = True
return wrapper
class TVShow(object):
def __init__(self, indexer, indexerid, lang=""):
self._indexerid = int(indexerid)
self._indexer = int(indexer)
self._name = ""
self._imdbid = ""
self._network = ""
self._genre = ""
self._classification = ""
self._runtime = 0
self._imdb_info = {}
self._quality = int(sickbeard.QUALITY_DEFAULT)
self._flatten_folders = int(sickbeard.FLATTEN_FOLDERS_DEFAULT)
self._status = "Unknown"
self._airs = ""
self._startyear = 0
self._paused = 0
self._air_by_date = 0
self._subtitles = int(sickbeard.SUBTITLES_DEFAULT)
self._dvdorder = 0
self._archive_firstmatch = 0
self._lang = lang
self._last_update_indexer = 1
self._sports = 0
self._anime = 0
self._scene = 0
self._rls_ignore_words = ""
self._rls_require_words = ""
self._default_ep_status = SKIPPED
self.dirty = True
self._location = ""
self.lock = threading.Lock()
self.isDirGood = False
self.episodes = {}
self.nextaired = ""
self.release_groups = None
otherShow = helpers.findCertainShow(sickbeard.showList, self.indexerid)
if otherShow != None:
raise exceptions.MultipleShowObjectsException("Can't create a show if it already exists")
self.loadFromDB()
name = property(lambda self: self._name, dirty_setter("_name"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
# location = property(lambda self: self._location, dirty_setter("_location"))
imdbid = property(lambda self: self._imdbid, dirty_setter("_imdbid"))
network = property(lambda self: self._network, dirty_setter("_network"))
genre = property(lambda self: self._genre, dirty_setter("_genre"))
classification = property(lambda self: self._classification, dirty_setter("_classification"))
runtime = property(lambda self: self._runtime, dirty_setter("_runtime"))
imdb_info = property(lambda self: self._imdb_info, dirty_setter("_imdb_info"))
quality = property(lambda self: self._quality, dirty_setter("_quality"))
flatten_folders = property(lambda self: self._flatten_folders, dirty_setter("_flatten_folders"))
status = property(lambda self: self._status, dirty_setter("_status"))
airs = property(lambda self: self._airs, dirty_setter("_airs"))
startyear = property(lambda self: self._startyear, dirty_setter("_startyear"))
paused = property(lambda self: self._paused, dirty_setter("_paused"))
air_by_date = property(lambda self: self._air_by_date, dirty_setter("_air_by_date"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
dvdorder = property(lambda self: self._dvdorder, dirty_setter("_dvdorder"))
archive_firstmatch = property(lambda self: self._archive_firstmatch, dirty_setter("_archive_firstmatch"))
lang = property(lambda self: self._lang, dirty_setter("_lang"))
last_update_indexer = property(lambda self: self._last_update_indexer, dirty_setter("_last_update_indexer"))
sports = property(lambda self: self._sports, dirty_setter("_sports"))
anime = property(lambda self: self._anime, dirty_setter("_anime"))
scene = property(lambda self: self._scene, dirty_setter("_scene"))
rls_ignore_words = property(lambda self: self._rls_ignore_words, dirty_setter("_rls_ignore_words"))
rls_require_words = property(lambda self: self._rls_require_words, dirty_setter("_rls_require_words"))
default_ep_status = property(lambda self: self._default_ep_status, dirty_setter("_default_ep_status"))
@property
def is_anime(self):
if int(self.anime) > 0:
return True
else:
return False
@property
def is_sports(self):
if int(self.sports) > 0:
return True
else:
return False
@property
def is_scene(self):
if int(self.scene) > 0:
return True
else:
return False
@property
def network_logo_name(self):
return self.network.replace(u'\u00C9', 'e').replace(u'\u00E9', 'e').lower()
def _getLocation(self):
# no dir check needed if missing show dirs are created during post-processing
if sickbeard.CREATE_MISSING_SHOW_DIRS:
return self._location
if ek.ek(os.path.isdir, self._location):
return self._location
else:
raise exceptions.ShowDirNotFoundException("Show folder doesn't exist, you shouldn't be using it")
def _setLocation(self, newLocation):
logger.log(u"Setter sets location to " + newLocation, logger.DEBUG)
# Don't validate dir if user wants to add shows without creating a dir
if sickbeard.ADD_SHOWS_WO_DIR or ek.ek(os.path.isdir, newLocation):
dirty_setter("_location")(self, newLocation)
self._isDirGood = True
else:
raise exceptions.NoNFOException("Invalid folder for the show!")
location = property(_getLocation, _setLocation)
# delete references to anything that's not in the internal lists
def flushEpisodes(self):
for curSeason in self.episodes:
for curEp in self.episodes[curSeason]:
myEp = self.episodes[curSeason][curEp]
self.episodes[curSeason][curEp] = None
del myEp
def getAllEpisodes(self, season=None, has_location=False):
sql_selection = "SELECT season, episode, "
# subselection to detect multi-episodes early, share_location > 0
sql_selection = sql_selection + " (SELECT COUNT (*) FROM tv_episodes WHERE showid = tve.showid AND season = tve.season AND location != '' AND location = tve.location AND episode != tve.episode) AS share_location "
sql_selection = sql_selection + " FROM tv_episodes tve WHERE showid = " + str(self.indexerid)
if season is not None:
sql_selection = sql_selection + " AND season = " + str(season)
if has_location:
sql_selection = sql_selection + " AND location != '' "
# need ORDER episode ASC to rename multi-episodes in order S01E01-02
sql_selection = sql_selection + " ORDER BY season ASC, episode ASC"
myDB = db.DBConnection()
results = myDB.select(sql_selection)
ep_list = []
for cur_result in results:
cur_ep = self.getEpisode(int(cur_result["season"]), int(cur_result["episode"]))
if not cur_ep:
continue
cur_ep.relatedEps = []
if cur_ep.location:
# if there is a location, check if it's a multi-episode (share_location > 0) and put them in relatedEps
if cur_result["share_location"] > 0:
related_eps_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND location = ? AND episode != ? ORDER BY episode ASC",
[self.indexerid, cur_ep.season, cur_ep.location, cur_ep.episode])
for cur_related_ep in related_eps_result:
related_ep = self.getEpisode(int(cur_related_ep["season"]), int(cur_related_ep["episode"]))
if related_ep and related_ep not in cur_ep.relatedEps:
cur_ep.relatedEps.append(related_ep)
ep_list.append(cur_ep)
return ep_list
def getEpisode(self, season=None, episode=None, file=None, noCreate=False, absolute_number=None, forceUpdate=False):
# if we get an anime get the real season and episode
if self.is_anime and absolute_number and not season and not episode:
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ? AND absolute_number = ? AND season != 0"
sqlResults = myDB.select(sql, [self.indexerid, absolute_number])
if len(sqlResults) == 1:
episode = int(sqlResults[0]["episode"])
season = int(sqlResults[0]["season"])
logger.log(
"Found episode by absolute_number %s which is S%02dE%02d" % (absolute_number, season, episode), logger.DEBUG)
elif len(sqlResults) > 1:
logger.log("Multiple entries for absolute number: " + str(
absolute_number) + " in show: " + self.name + " found ", logger.ERROR)
return None
else:
logger.log(
"No entries for absolute number: " + str(absolute_number) + " in show: " + self.name + " found.",
logger.DEBUG)
return None
if not season in self.episodes:
self.episodes[season] = {}
if not episode in self.episodes[season] or self.episodes[season][episode] is None:
if noCreate:
return None
logger.log(str(self.indexerid) + u": An object for episode S%02dE%02d didn't exist in the cache, trying to create it" % (season, episode), logger.DEBUG)
if file:
ep = TVEpisode(self, season, episode, file)
else:
ep = TVEpisode(self, season, episode)
if ep != None:
self.episodes[season][episode] = ep
return self.episodes[season][episode]
def should_update(self, update_date=datetime.date.today()):
# if show is not 'Ended' always update (status 'Continuing')
if self.status == 'Continuing':
return True
# run logic against the current show latest aired and next unaired data to see if we should bypass 'Ended' status
graceperiod = datetime.timedelta(days=30)
last_airdate = datetime.date.fromordinal(1)
# get latest aired episode to compare against today - graceperiod and today + graceperiod
myDB = db.DBConnection()
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status > '1' ORDER BY airdate DESC LIMIT 1",
[self.indexerid])
if sql_result:
last_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if last_airdate >= (update_date - graceperiod) and last_airdate <= (update_date + graceperiod):
return True
# get next upcoming UNAIRED episode to compare against today + graceperiod
sql_result = myDB.select(
"SELECT * FROM tv_episodes WHERE showid = ? AND season > '0' AND airdate > '1' AND status = '1' ORDER BY airdate ASC LIMIT 1",
[self.indexerid])
if sql_result:
next_airdate = datetime.date.fromordinal(sql_result[0]['airdate'])
if next_airdate <= (update_date + graceperiod):
return True
last_update_indexer = datetime.date.fromordinal(self.last_update_indexer)
# in the first year after ended (last airdate), update every 30 days
if (update_date - last_airdate) < datetime.timedelta(days=450) and (
update_date - last_update_indexer) > datetime.timedelta(days=30):
return True
return False
def writeShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Writing NFOs for show", logger.DEBUG)
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_show_metadata(self) or result
return result
def writeMetadata(self, show_only=False):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.getImages()
self.writeShowNFO()
if not show_only:
self.writeEpisodeNFOs()
def writeEpisodeNFOs(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
logger.log(str(self.indexerid) + u": Writing NFOs for all episodes", logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
for epResult in sqlResults:
logger.log(str(self.indexerid) + u": Retrieving/creating episode S%02dE%02d" % (epResult["season"], epResult["episode"]), logger.DEBUG)
curEp = self.getEpisode(epResult["season"], epResult["episode"])
if not curEp:
continue
curEp.createMetaFiles()
def updateMetadata(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return
self.updateShowNFO()
def updateShowNFO(self):
result = False
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, skipping NFO generation")
return False
logger.log(str(self.indexerid) + u": Updating NFOs for show with new indexer info")
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.update_show_indexer_metadata(self) or result
return result
# find all media files in the show folder and create episodes for as many as possible
def loadEpisodesFromDir(self):
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + u": Show dir doesn't exist, not loading episodes from disk", logger.DEBUG)
return
logger.log(str(self.indexerid) + u": Loading all episodes from the show directory " + self._location, logger.DEBUG)
# get file list
mediaFiles = helpers.listMediaFiles(self._location)
logger.log(u"%s: Found files: %s" %
(self.indexerid, mediaFiles), logger.DEBUG)
# create TVEpisodes from each media file (if possible)
sql_l = []
for mediaFile in mediaFiles:
parse_result = None
curEpisode = None
logger.log(str(self.indexerid) + u": Creating episode from " + mediaFile, logger.DEBUG)
try:
curEpisode = self.makeEpFromFile(ek.ek(os.path.join, self._location, mediaFile))
except (exceptions.ShowNotFoundException, exceptions.EpisodeNotFoundException), e:
logger.log(u"Episode " + mediaFile + " returned an exception: " + ex(e), logger.ERROR)
continue
except exceptions.EpisodeDeletedException:
logger.log(u"The episode deleted itself when I tried making an object for it", logger.DEBUG)
if curEpisode is None:
continue
# see if we should save the release name in the db
ep_file_name = ek.ek(os.path.basename, curEpisode.location)
ep_file_name = ek.ek(os.path.splitext, ep_file_name)[0]
try:
parse_result = None
np = NameParser(False, showObj=self, tryIndexers=True)
parse_result = np.parse(ep_file_name)
except (InvalidNameException, InvalidShowException):
pass
if not ' ' in ep_file_name and parse_result and parse_result.release_group:
logger.log(
u"Name " + ep_file_name + u" gave release group of " + parse_result.release_group + ", seems valid",
logger.DEBUG)
curEpisode.release_name = ep_file_name
# store the reference in the show
if curEpisode != None:
if self.subtitles:
try:
curEpisode.refreshSubtitles()
except:
logger.log("%s: Could not refresh subtitles" % self.indexerid, logger.ERROR)
logger.log(traceback.format_exc(), logger.DEBUG)
sql_l.append(curEpisode.get_sql())
if sql_l:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def loadEpisodesFromDB(self):
logger.log(u"Loading all episodes from the DB", logger.DEBUG)
myDB = db.DBConnection()
sql = "SELECT * FROM tv_episodes WHERE showid = ?"
sqlResults = myDB.select(sql, [self.indexerid])
scannedEps = {}
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
logger.log(u"Using language: " + str(self.lang), logger.DEBUG)
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
logger.log(u"lINDEXER_API_PARMS: " + str(lINDEXER_API_PARMS), logger.DEBUG)
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
cachedShow = t[self.indexerid]
cachedSeasons = {}
for curResult in sqlResults:
logger.log(u"loadEpisodesFromDB curResult: " + str(curResult), logger.DEBUG)
deleteEp = False
curSeason = int(curResult["season"])
curEpisode = int(curResult["episode"])
if curSeason not in cachedSeasons:
try:
cachedSeasons[curSeason] = cachedShow[curSeason]
except sickbeard.indexer_seasonnotfound, e:
logger.log(u"Error when trying to load the episode from " + sickbeard.indexerApi(
self.indexer).name + ": " + e.message, logger.WARNING)
deleteEp = True
if not curSeason in scannedEps:
logger.log(u"Not curSeason in scannedEps", logger.DEBUG)
scannedEps[curSeason] = {}
logger.log(u"Loading episode S%02dE%02d from the DB" % (curSeason, curEpisode), logger.DEBUG)
try:
curEp = self.getEpisode(curSeason, curEpisode)
if not curEp:
raise exceptions.EpisodeNotFoundException
# if we found out that the ep is no longer on TVDB then delete it from our database too
if deleteEp:
curEp.deleteEpisode()
curEp.loadFromDB(curSeason, curEpisode)
curEp.loadFromIndexer(tvapi=t, cachedSeason=cachedSeasons[curSeason])
scannedEps[curSeason][curEpisode] = True
except exceptions.EpisodeDeletedException:
logger.log(u"Tried loading an episode from the DB that should have been deleted, skipping it",
logger.DEBUG)
continue
logger.log(u"Finished loading all episodes from the DB", logger.DEBUG)
return scannedEps
def loadEpisodesFromIndexer(self, cache=True):
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
try:
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
showObj = t[self.indexerid]
except sickbeard.indexer_error:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out, unable to update episodes from " + sickbeard.indexerApi(
self.indexer).name, logger.WARNING)
return None
logger.log(
str(self.indexerid) + u": Loading all episodes from " + sickbeard.indexerApi(self.indexer).name + "..", logger.DEBUG)
scannedEps = {}
sql_l = []
for season in showObj:
scannedEps[season] = {}
for episode in showObj[season]:
# need some examples of wtf episode 0 means to decide if we want it or not
if episode == 0:
continue
try:
ep = self.getEpisode(season, episode)
if not ep:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log("%s: %s object for S%02dE%02d is incomplete, skipping this episode" % (self.indexerid, sickbeard.indexerApi(self.indexer).name, season, episode))
continue
else:
try:
ep.loadFromIndexer(tvapi=t)
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted, skipping the rest of the load")
continue
with ep.lock:
logger.log("%s: Loading info from %s for episode S%02dE%02d" % (self.indexerid, sickbeard.indexerApi(self.indexer).name, season, episode),logger.DEBUG)
ep.loadFromIndexer(season, episode, tvapi=t)
sql_l.append(ep.get_sql())
scannedEps[season][episode] = True
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# Done updating save last update date
self.last_update_indexer = datetime.date.today().toordinal()
self.saveToDB()
return scannedEps
def getImages(self, fanart=None, poster=None):
fanart_result = poster_result = banner_result = False
season_posters_result = season_banners_result = season_all_poster_result = season_all_banner_result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
# FIXME: Needs to not show this message if the option is not enabled?
logger.log(u"Running metadata routines for " + cur_provider.name, logger.DEBUG)
fanart_result = cur_provider.create_fanart(self) or fanart_result
poster_result = cur_provider.create_poster(self) or poster_result
banner_result = cur_provider.create_banner(self) or banner_result
season_posters_result = cur_provider.create_season_posters(self) or season_posters_result
season_banners_result = cur_provider.create_season_banners(self) or season_banners_result
season_all_poster_result = cur_provider.create_season_all_poster(self) or season_all_poster_result
season_all_banner_result = cur_provider.create_season_all_banner(self) or season_all_banner_result
return fanart_result or poster_result or banner_result or season_posters_result or season_banners_result or season_all_poster_result or season_all_banner_result
# make a TVEpisode object from a media file
def makeEpFromFile(self, file):
if not ek.ek(os.path.isfile, file):
logger.log(str(self.indexerid) + u": That isn't even a real file dude... " + file)
return None
logger.log(str(self.indexerid) + u": Creating episode object from " + file, logger.DEBUG)
try:
myParser = NameParser(showObj=self, tryIndexers=True)
parse_result = myParser.parse(file)
except InvalidNameException:
logger.log(u"Unable to parse the filename " + file + " into a valid episode", logger.DEBUG)
return None
except InvalidShowException:
logger.log(u"Unable to parse the filename " + file + " into a valid show", logger.DEBUG)
return None
if not len(parse_result.episode_numbers):
logger.log("parse_result: " + str(parse_result))
logger.log(u"No episode number found in " + file + ", ignoring it", logger.WARNING)
return None
# for now lets assume that any episode in the show dir belongs to that show
season = parse_result.season_number if parse_result.season_number != None else 1
episodes = parse_result.episode_numbers
rootEp = None
sql_l = []
for curEpNum in episodes:
episode = int(curEpNum)
logger.log("%s: %s parsed to %s S%02dE%02d" % (self.indexerid, file, self.name, season, episode), logger.DEBUG)
checkQualityAgain = False
same_file = False
curEp = self.getEpisode(season, episode)
if not curEp:
try:
curEp = self.getEpisode(season, episode, file)
if not curEp:
raise exceptions.EpisodeNotFoundException
except exceptions.EpisodeNotFoundException:
logger.log(str(self.indexerid) + u": Unable to figure out what this file is, skipping",
logger.ERROR)
continue
else:
# if there is a new file associated with this ep then re-check the quality
if curEp.location and ek.ek(os.path.normpath, curEp.location) != ek.ek(os.path.normpath, file):
logger.log(
u"The old episode had a different file associated with it, I will re-check the quality based on the new filename " + file,
logger.DEBUG)
checkQualityAgain = True
with curEp.lock:
old_size = curEp.file_size
curEp.location = file
# if the sizes are the same then it's probably the same file
if old_size and curEp.file_size == old_size:
same_file = True
else:
same_file = False
curEp.checkForMetaFiles()
if rootEp == None:
rootEp = curEp
else:
if curEp not in rootEp.relatedEps:
with rootEp.lock:
rootEp.relatedEps.append(curEp)
# if it's a new file then
if not same_file:
with curEp.lock:
curEp.release_name = ''
# if they replace a file on me I'll make some attempt at re-checking the quality unless I know it's the same file
if checkQualityAgain and not same_file:
newQuality = Quality.nameQuality(file, self.is_anime)
logger.log(u"Since this file has been renamed, I checked " + file + " and found quality " +
Quality.qualityStrings[newQuality], logger.DEBUG)
if newQuality != Quality.UNKNOWN:
with curEp.lock:
curEp.status = Quality.compositeStatus(DOWNLOADED, newQuality)
# check for status/quality changes as long as it's a new file
elif not same_file and sickbeard.helpers.isMediaFile(file) and curEp.status not in Quality.DOWNLOADED + [
ARCHIVED, IGNORED]:
oldStatus, oldQuality = Quality.splitCompositeStatus(curEp.status)
newQuality = Quality.nameQuality(file, self.is_anime)
if newQuality == Quality.UNKNOWN:
newQuality = Quality.assumeQuality(file)
newStatus = None
# if it was snatched and now exists then set the status correctly
if oldStatus == SNATCHED and oldQuality <= newQuality:
logger.log(u"STATUS: this ep used to be snatched with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
# if it was snatched proper and we found a higher quality one then allow the status change
elif oldStatus == SNATCHED_PROPER and oldQuality < newQuality:
logger.log(u"STATUS: this ep used to be snatched proper with quality " + Quality.qualityStrings[
oldQuality] + u" but a file exists with quality " + Quality.qualityStrings[
newQuality] + u" so I'm setting the status to DOWNLOADED", logger.DEBUG)
newStatus = DOWNLOADED
elif oldStatus not in (SNATCHED, SNATCHED_PROPER):
newStatus = DOWNLOADED
if newStatus is not None:
with curEp.lock:
logger.log(u"STATUS: we have an associated file, so setting the status from " + str(
curEp.status) + u" to DOWNLOADED/" + str(Quality.statusFromName(file, anime=self.is_anime)),
logger.DEBUG)
curEp.status = Quality.compositeStatus(newStatus, newQuality)
with curEp.lock:
sql_l.append(curEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
# creating metafiles on the root should be good enough
if rootEp:
with rootEp.lock:
rootEp.createMetaFiles()
return rootEp
def loadFromDB(self, skipNFO=False):
logger.log(str(self.indexerid) + u": Loading show info from database", logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_shows WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) > 1:
raise exceptions.MultipleDBShowsException()
elif len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find the show in the database")
return
else:
self.indexer = int(sqlResults[0]["indexer"] or 0)
if not self.name:
self.name = sqlResults[0]["show_name"]
if not self.network:
self.network = sqlResults[0]["network"]
if not self.genre:
self.genre = sqlResults[0]["genre"]
if not self.classification:
self.classification = sqlResults[0]["classification"]
self.runtime = sqlResults[0]["runtime"]
self.status = sqlResults[0]["status"]
if self.status is None:
self.status = "Unknown"
self.airs = sqlResults[0]["airs"]
if self.airs is None:
self.airs = ""
self.startyear = int(sqlResults[0]["startyear"] or 0)
self.air_by_date = int(sqlResults[0]["air_by_date"] or 0)
self.anime = int(sqlResults[0]["anime"] or 0)
self.sports = int(sqlResults[0]["sports"] or 0)
self.scene = int(sqlResults[0]["scene"] or 0)
self.subtitles = int(sqlResults[0]["subtitles"] or 0)
self.dvdorder = int(sqlResults[0]["dvdorder"] or 0)
self.archive_firstmatch = int(sqlResults[0]["archive_firstmatch"] or 0)
self.quality = int(sqlResults[0]["quality"] or UNKNOWN)
self.flatten_folders = int(sqlResults[0]["flatten_folders"] or 0)
self.paused = int(sqlResults[0]["paused"] or 0)
try:
self.location = sqlResults[0]["location"]
except Exception:
dirty_setter("_location")(self, sqlResults[0]["location"])
self._isDirGood = False
if not self.lang:
self.lang = sqlResults[0]["lang"]
self.last_update_indexer = sqlResults[0]["last_update_indexer"]
self.rls_ignore_words = sqlResults[0]["rls_ignore_words"]
self.rls_require_words = sqlResults[0]["rls_require_words"]
self.default_ep_status = int(sqlResults[0]["default_ep_status"] or SKIPPED)
if not self.imdbid:
self.imdbid = sqlResults[0]["imdb_id"]
if self.is_anime:
self.release_groups = BlackAndWhiteList(self.indexerid)
# Get IMDb_info from database
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM imdb_info WHERE indexer_id = ?", [self.indexerid])
if len(sqlResults) == 0:
logger.log(str(self.indexerid) + ": Unable to find IMDb show info in the database")
return
else:
self.imdb_info = dict(zip(sqlResults[0].keys(), sqlResults[0]))
self.dirty = False
return True
def loadFromIndexer(self, cache=True, tvapi=None, cachedSeason=None):
if self.indexer is not INDEXER_TVRAGE:
logger.log(str(self.indexerid) + u": Loading show info from " + sickbeard.indexerApi(self.indexer).name, logger.DEBUG)
# There's gotta be a better way of doing this but we don't wanna
# change the cache value elsewhere
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if self.lang:
lINDEXER_API_PARMS['language'] = self.lang
if self.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.indexerid]
try:
self.name = myEp['seriesname'].strip()
except AttributeError:
raise sickbeard.indexer_attributenotfound(
"Found %s, but attribute 'seriesname' was empty." % (self.indexerid))
self.classification = getattr(myEp, 'classification', 'Scripted')
self.genre = getattr(myEp, 'genre', '')
self.network = getattr(myEp, 'network', '')
self.runtime = getattr(myEp, 'runtime', '')
self.imdbid = getattr(myEp, 'imdb_id', '')
if getattr(myEp, 'airs_dayofweek', None) is not None and getattr(myEp, 'airs_time', None) is not None:
self.airs = myEp["airs_dayofweek"] + " " + myEp["airs_time"]
if self.airs is None:
self.airs = ''
if getattr(myEp, 'firstaired', None) is not None:
self.startyear = int(str(myEp["firstaired"]).split('-')[0])
self.status = getattr(myEp, 'status', 'Unknown')
else:
logger.log(str(self.indexerid) + u": NOT loading info from " + sickbeard.indexerApi(self.indexer).name + " as it is temporarily disabled.", logger.WARNING)
def loadIMDbInfo(self, imdbapi=None):
imdb_info = {'imdb_id': self.imdbid,
'title': '',
'year': '',
'akas': [],
'runtimes': '',
'genres': [],
'countries': '',
'country_codes': [],
'certificates': [],
'rating': '',
'votes': '',
'last_update': ''
}
i = imdb.IMDb()
if not self.imdbid:
self.imdbid = i.title2imdbID(self.name, kind='tv series')
if self.imdbid:
logger.log(str(self.indexerid) + u": Loading show info from IMDb", logger.DEBUG)
imdbTv = i.get_movie(str(re.sub("[^0-9]", "", self.imdbid)))
for key in filter(lambda x: x.replace('_', ' ') in imdbTv.keys(), imdb_info.keys()):
# Store only the first value for string type
if type(imdb_info[key]) == type('') and type(imdbTv.get(key)) == type([]):
imdb_info[key] = imdbTv.get(key.replace('_', ' '))[0]
else:
imdb_info[key] = imdbTv.get(key.replace('_', ' '))
# Filter only the value
if imdb_info['runtimes']:
imdb_info['runtimes'] = re.search('\d+', imdb_info['runtimes']).group(0)
else:
imdb_info['runtimes'] = self.runtime
if imdb_info['akas']:
imdb_info['akas'] = '|'.join(imdb_info['akas'])
else:
imdb_info['akas'] = ''
# Join all genres in a string
if imdb_info['genres']:
imdb_info['genres'] = '|'.join(imdb_info['genres'])
else:
imdb_info['genres'] = ''
# Get only the production country certificate if any
if imdb_info['certificates'] and imdb_info['countries']:
dct = {}
try:
for item in imdb_info['certificates']:
dct[item.split(':')[0]] = item.split(':')[1]
imdb_info['certificates'] = dct[imdb_info['countries']]
except:
imdb_info['certificates'] = ''
else:
imdb_info['certificates'] = ''
if imdb_info['country_codes']:
imdb_info['country_codes'] = '|'.join(imdb_info['country_codes'])
else:
imdb_info['country_codes'] = ''
imdb_info['last_update'] = datetime.date.today().toordinal()
# Rename dict keys without spaces for DB upsert
self.imdb_info = dict(
(k.replace(' ', '_'), k(v) if hasattr(v, 'keys') else v) for k, v in imdb_info.iteritems())
logger.log(str(self.indexerid) + u": Obtained info from IMDb ->" + str(self.imdb_info), logger.DEBUG)
def nextEpisode(self):
logger.log(str(self.indexerid) + ": Finding the episode which airs next", logger.DEBUG)
curDate = datetime.date.today().toordinal()
if not self.nextaired or self.nextaired and curDate > self.nextaired:
myDB = db.DBConnection()
sqlResults = myDB.select(
"SELECT airdate, season, episode FROM tv_episodes WHERE showid = ? AND airdate >= ? AND status IN (?,?) ORDER BY airdate ASC LIMIT 1",
[self.indexerid, datetime.date.today().toordinal(), UNAIRED, WANTED])
if sqlResults == None or len(sqlResults) == 0:
logger.log(str(self.indexerid) + u": No episode found... need to implement a show status",
logger.DEBUG)
self.nextaired = ""
else:
logger.log(u"%s: Found episode S%02dE%02d" % (self.indexerid, sqlResults[0]["season"], sqlResults[0]["episode"] ) , logger.DEBUG)
self.nextaired = sqlResults[0]['airdate']
return self.nextaired
def deleteShow(self, full=False):
sql_l = [["DELETE FROM tv_episodes WHERE showid = ?", [self.indexerid]],
["DELETE FROM tv_shows WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM imdb_info WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM xem_refresh WHERE indexer_id = ?", [self.indexerid]],
["DELETE FROM scene_numbering WHERE indexer_id = ?", [self.indexerid]]]
myDB = db.DBConnection()
myDB.mass_action(sql_l)
action = ('delete', 'trash')[sickbeard.TRASH_REMOVE_SHOW]
# remove self from show list
sickbeard.showList = [x for x in sickbeard.showList if int(x.indexerid) != self.indexerid]
# clear the cache
image_cache_dir = ek.ek(os.path.join, sickbeard.CACHE_DIR, 'images')
for cache_file in ek.ek(glob.glob, ek.ek(os.path.join, image_cache_dir, str(self.indexerid) + '.*')):
logger.log(u'Attempt to %s cache file %s' % (action, cache_file))
try:
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(cache_file)
else:
os.remove(cache_file)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, cache_file, repr(e), str(e)), logger.WARNING)
# remove entire show folder
if full:
try:
logger.log(u'Attempt to %s show folder %s' % (action, self._location))
# check first the read-only attribute
file_attribute = ek.ek(os.stat, self.location)[0]
if (not file_attribute & stat.S_IWRITE):
# File is read-only, so make it writeable
logger.log('Attempting to make writeable the read only folder %s' % self._location, logger.DEBUG)
try:
ek.ek(os.chmod, self.location, stat.S_IWRITE)
except:
logger.log(u'Unable to change permissions of %s' % self._location, logger.WARNING)
if sickbeard.TRASH_REMOVE_SHOW:
send2trash(self.location)
else:
ek.ek(shutil.rmtree, self.location)
logger.log(u'%s show folder %s' %
(('Deleted', 'Trashed')[sickbeard.TRASH_REMOVE_SHOW],
self._location))
except exceptions.ShowDirNotFoundException:
logger.log(u"Show folder does not exist, no need to %s %s" % (action, self._location), logger.WARNING)
except OSError, e:
logger.log(u'Unable to %s %s: %s / %s' % (action, self._location, repr(e), str(e)), logger.WARNING)
if sickbeard.USE_TRAKT and sickbeard.TRAKT_SYNC_WATCHLIST:
logger.log(u"Removing show: indexerid " + str(self.indexerid) + ", Title " + str(self.name) + " from Watchlist", logger.DEBUG)
notifiers.trakt_notifier.update_watchlist(self, update="remove")
def populateCache(self):
cache_inst = image_cache.ImageCache()
logger.log(u"Checking & filling cache for show " + self.name, logger.DEBUG)
cache_inst.fill_cache(self)
def refreshDir(self):
# make sure the show dir is where we think it is unless dirs are created on the fly
if not ek.ek(os.path.isdir, self._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS:
return False
# load from dir
self.loadEpisodesFromDir()
# run through all locations from DB, check that they exist
logger.log(str(self.indexerid) + u": Loading all episodes with a location from the database", logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND location != ''", [self.indexerid])
sql_l = []
for ep in sqlResults:
curLoc = os.path.normpath(ep["location"])
season = int(ep["season"])
episode = int(ep["episode"])
try:
curEp = self.getEpisode(season, episode)
if not curEp:
raise exceptions.EpisodeDeletedException
except exceptions.EpisodeDeletedException:
logger.log(u"The episode was deleted while we were refreshing it, moving on to the next one",
logger.DEBUG)
continue
# if the path doesn't exist or if it's not in our show dir
if not ek.ek(os.path.isfile, curLoc) or not os.path.normpath(curLoc).startswith(
os.path.normpath(self.location)):
# check if downloaded files still exist, update our data if this has changed
if not sickbeard.SKIP_REMOVED_FILES:
with curEp.lock:
# if it used to have a file associated with it and it doesn't anymore then set it to sickbeard.EP_DEFAULT_DELETED_STATUS
if curEp.location and curEp.status in Quality.DOWNLOADED:
logger.log(u"%s: Location for S%02dE%02d doesn't exist, removing it and changing our status to %s" %
(self.indexerid, season, episode, statusStrings[sickbeard.EP_DEFAULT_DELETED_STATUS]) ,logger.DEBUG)
curEp.status = sickbeard.EP_DEFAULT_DELETED_STATUS
curEp.subtitles = list()
curEp.subtitles_searchcount = 0
curEp.subtitles_lastsearch = str(datetime.datetime.min)
curEp.location = ''
curEp.hasnfo = False
curEp.hastbn = False
curEp.release_name = ''
sql_l.append(curEp.get_sql())
else:
# the file exists, set its modify file stamp
if sickbeard.AIRDATE_EPISODES:
with curEp.lock:
curEp.airdateModifyStamp()
if sql_l:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def downloadSubtitles(self, force=False):
# TODO: Add support for force option
if not ek.ek(os.path.isdir, self._location):
logger.log(str(self.indexerid) + ": Show dir doesn't exist, can't download subtitles", logger.DEBUG)
return
logger.log("%s: Downloading subtitles" % self.indexerid, logger.DEBUG)
try:
episodes = self.getAllEpisodes(has_location=True)
if not episodes:
logger.log("%s: No episodes to download subtitles for %s" % (self.indexerid, self.name), logger.DEBUG)
return
for episode in episodes:
episode.downloadSubtitles(force=force)
except Exception:
logger.log("%s: Error occurred when downloading subtitles for %s" % (self.indexerid, self.name), logger.DEBUG)
logger.log(traceback.format_exc(), logger.ERROR)
def saveToDB(self, forceSave=False):
if not self.dirty and not forceSave:
logger.log(str(self.indexerid) + ": Not saving show to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.indexerid) + u": Saving show info to database", logger.DEBUG)
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = {"indexer": self.indexer,
"show_name": self.name,
"location": self._location,
"network": self.network,
"genre": self.genre,
"classification": self.classification,
"runtime": self.runtime,
"quality": self.quality,
"airs": self.airs,
"status": self.status,
"flatten_folders": self.flatten_folders,
"paused": self.paused,
"air_by_date": self.air_by_date,
"anime": self.anime,
"scene": self.scene,
"sports": self.sports,
"subtitles": self.subtitles,
"dvdorder": self.dvdorder,
"archive_firstmatch": self.archive_firstmatch,
"startyear": self.startyear,
"lang": self.lang,
"imdb_id": self.imdbid,
"last_update_indexer": self.last_update_indexer,
"rls_ignore_words": self.rls_ignore_words,
"rls_require_words": self.rls_require_words,
"default_ep_status": self.default_ep_status
}
myDB = db.DBConnection()
myDB.upsert("tv_shows", newValueDict, controlValueDict)
helpers.update_anime_support()
if self.imdbid:
controlValueDict = {"indexer_id": self.indexerid}
newValueDict = self.imdb_info
myDB = db.DBConnection()
myDB.upsert("imdb_info", newValueDict, controlValueDict)
def __str__(self):
toReturn = ""
toReturn += "indexerid: " + str(self.indexerid) + "\n"
toReturn += "indexer: " + str(self.indexer) + "\n"
toReturn += "name: " + self.name + "\n"
toReturn += "location: " + self._location + "\n"
if self.network:
toReturn += "network: " + self.network + "\n"
if self.airs:
toReturn += "airs: " + self.airs + "\n"
toReturn += "status: " + self.status + "\n"
toReturn += "startyear: " + str(self.startyear) + "\n"
if self.genre:
toReturn += "genre: " + self.genre + "\n"
toReturn += "classification: " + self.classification + "\n"
toReturn += "runtime: " + str(self.runtime) + "\n"
toReturn += "quality: " + str(self.quality) + "\n"
toReturn += "scene: " + str(self.is_scene) + "\n"
toReturn += "sports: " + str(self.is_sports) + "\n"
toReturn += "anime: " + str(self.is_anime) + "\n"
return toReturn
def qualitiesToString(self, qualities=[]):
result = u''
for quality in qualities:
if Quality.qualityStrings.has_key(quality):
result += Quality.qualityStrings[quality] + u', '
else:
logger.log(u"Bad quality value: " + str(quality))
result = re.sub(', $', '', result)
if not len(result):
result = u'None'
return result
def wantEpisode(self, season, episode, quality, manualSearch=False, downCurQuality=False):
logger.log(u"Checking if found episode %s S%02dE%02d is wanted at quality %s" % (self.name, season, episode, Quality.qualityStrings[quality]) , logger.DEBUG)
# if the quality isn't one we want under any circumstances then just say no
anyQualities, bestQualities = Quality.splitQuality(self.quality)
logger.log(u"Any,Best = [ %s ] [ %s ] Found = [ %s ]" % (self.qualitiesToString(anyQualities),
self.qualitiesToString(bestQualities), self.qualitiesToString([quality])), logger.DEBUG)
if quality not in anyQualities + bestQualities:
logger.log(u"Don't want this quality, ignoring found episode", logger.DEBUG)
return False
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT status FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.indexerid, season, episode])
if not sqlResults or not len(sqlResults):
logger.log(u"Unable to find a matching episode in database, ignoring found episode", logger.DEBUG)
return False
epStatus = int(sqlResults[0]["status"])
epStatus_text = statusStrings[epStatus]
logger.log(u"Existing episode status: " + str(epStatus) + " (" + epStatus_text + ")", logger.DEBUG)
# if we know we don't want it then just say no
if epStatus in (UNAIRED, SKIPPED, IGNORED, ARCHIVED) and not manualSearch:
logger.log(u"Existing episode status is unaired/skipped/ignored/archived, ignoring found episode", logger.DEBUG)
return False
curStatus, curQuality = Quality.splitCompositeStatus(epStatus)
# if it's one of these then we want it as long as it's in our allowed initial qualities
if quality in anyQualities + bestQualities:
if epStatus in (WANTED, SKIPPED):
logger.log(u"Existing episode status is wanted or skipped, getting found episode", logger.DEBUG)
return True
elif manualSearch:
if (downCurQuality and quality >= curQuality) or (not downCurQuality and quality > curQuality):
logger.log(
u"Usually ignoring found episode, but forced search allows the quality, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Quality is on wanted list, need to check if it's better than existing quality",
logger.DEBUG)
# if we are re-downloading then we only want it if it's in our bestQualities list and better than what we have, or we only have one bestQuality and we do not have that quality yet
if curStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER and quality in bestQualities and (quality > curQuality or curQuality not in bestQualities):
logger.log(u"Episode already exists but the found episode quality is wanted more, getting found episode",
logger.DEBUG)
return True
elif curStatus == Quality.UNKNOWN and manualSearch:
logger.log(u"Episode already exists but quality is Unknown, getting found episode",
logger.DEBUG)
return True
else:
logger.log(u"Episode already exists and the found episode has same/lower quality, ignoring found episode",
logger.DEBUG)
logger.log(u"None of the conditions were met, ignoring found episode", logger.DEBUG)
return False
def getOverview(self, epStatus):
if epStatus == WANTED:
return Overview.WANTED
elif epStatus in (UNAIRED, UNKNOWN):
return Overview.UNAIRED
elif epStatus in (SKIPPED, IGNORED):
return Overview.SKIPPED
elif epStatus == ARCHIVED:
return Overview.GOOD
elif epStatus in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER + Quality.FAILED + Quality.SNATCHED_BEST + Quality.ARCHIVED:
anyQualities, bestQualities = Quality.splitQuality(self.quality) # @UnusedVariable
if bestQualities:
maxBestQuality = max(bestQualities)
minBestQuality = min(bestQualities)
else:
maxBestQuality = None
minBestQuality = None
epStatus, curQuality = Quality.splitCompositeStatus(epStatus)
if epStatus == FAILED:
return Overview.WANTED
if epStatus == DOWNLOADED and curQuality == Quality.UNKNOWN:
return Overview.QUAL
elif epStatus in (SNATCHED, SNATCHED_PROPER, SNATCHED_BEST):
return Overview.SNATCHED
# if they don't want re-downloads then we call it good if they have anything
elif maxBestQuality == None:
return Overview.GOOD
# if the want only first match and already have one call it good
elif self.archive_firstmatch and curQuality in bestQualities:
return Overview.GOOD
# if they want only first match and current quality is higher than minimal best quality call it good
elif self.archive_firstmatch and minBestQuality != None and curQuality > minBestQuality:
return Overview.GOOD
# if they have one but it's not the best they want then mark it as qual
elif curQuality < maxBestQuality:
return Overview.QUAL
# if it's >= maxBestQuality then it's good
else:
return Overview.GOOD
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
class TVEpisode(object):
def __init__(self, show, season, episode, file=""):
self._name = ""
self._season = season
self._episode = episode
self._absolute_number = 0
self._description = ""
self._subtitles = list()
self._subtitles_searchcount = 0
self._subtitles_lastsearch = str(datetime.datetime.min)
self._airdate = datetime.date.fromordinal(1)
self._hasnfo = False
self._hastbn = False
self._status = UNKNOWN
self._indexerid = 0
self._file_size = 0
self._release_name = ''
self._is_proper = False
self._version = 0
self._release_group = ''
# setting any of the above sets the dirty flag
self.dirty = True
self.show = show
self.scene_season = 0
self.scene_episode = 0
self.scene_absolute_number = 0
self._location = file
self._indexer = int(self.show.indexer)
self.lock = threading.Lock()
self.specifyEpisode(self.season, self.episode)
self.relatedEps = []
self.checkForMetaFiles()
self.wantedQuality = []
name = property(lambda self: self._name, dirty_setter("_name"))
season = property(lambda self: self._season, dirty_setter("_season"))
episode = property(lambda self: self._episode, dirty_setter("_episode"))
absolute_number = property(lambda self: self._absolute_number, dirty_setter("_absolute_number"))
description = property(lambda self: self._description, dirty_setter("_description"))
subtitles = property(lambda self: self._subtitles, dirty_setter("_subtitles"))
subtitles_searchcount = property(lambda self: self._subtitles_searchcount, dirty_setter("_subtitles_searchcount"))
subtitles_lastsearch = property(lambda self: self._subtitles_lastsearch, dirty_setter("_subtitles_lastsearch"))
airdate = property(lambda self: self._airdate, dirty_setter("_airdate"))
hasnfo = property(lambda self: self._hasnfo, dirty_setter("_hasnfo"))
hastbn = property(lambda self: self._hastbn, dirty_setter("_hastbn"))
status = property(lambda self: self._status, dirty_setter("_status"))
indexer = property(lambda self: self._indexer, dirty_setter("_indexer"))
indexerid = property(lambda self: self._indexerid, dirty_setter("_indexerid"))
# location = property(lambda self: self._location, dirty_setter("_location"))
file_size = property(lambda self: self._file_size, dirty_setter("_file_size"))
release_name = property(lambda self: self._release_name, dirty_setter("_release_name"))
is_proper = property(lambda self: self._is_proper, dirty_setter("_is_proper"))
version = property(lambda self: self._version, dirty_setter("_version"))
release_group = property(lambda self: self._release_group, dirty_setter("_release_group"))
def _set_location(self, new_location):
logger.log(u"Setter sets location to " + new_location, logger.DEBUG)
# self._location = newLocation
dirty_setter("_location")(self, new_location)
if new_location and ek.ek(os.path.isfile, new_location):
self.file_size = ek.ek(os.path.getsize, new_location)
else:
self.file_size = 0
location = property(lambda self: self._location, _set_location)
def refreshSubtitles(self):
"""Look for subtitles files and refresh the subtitles property"""
self.subtitles = subtitles.subtitlesLanguages(self.location)
def downloadSubtitles(self, force=False):
if not ek.ek(os.path.isfile, self.location):
logger.log(u"%s: Episode file doesn't exist, can't download subtitles for S%02dE%02d" %
(self.show.indexerid, self.season, self.episode), logger.DEBUG)
return
logger.log(u"%s: Downloading subtitles for S%02dE%02d" % (self.show.indexerid, self.season, self.episode), logger.DEBUG)
previous_subtitles = self.subtitles
#logging.getLogger('subliminal.api').addHandler(logging.StreamHandler())
#logging.getLogger('subliminal.api').setLevel(logging.DEBUG)
#logging.getLogger('subliminal').addHandler(logging.StreamHandler())
#logging.getLogger('subliminal').setLevel(logging.DEBUG)
try:
languages = set()
for language in frozenset(subtitles.wantedLanguages()).difference(self.subtitles):
languages.add(subtitles.fromietf(language))
if not languages:
logger.log(u'%s: No missing subtitles for S%02dE%02d' % (self.show.indexerid, self.season, self.episode), logger.DEBUG)
return
providers = sickbeard.subtitles.getEnabledServiceList()
vname = self.location
video = None
try:
# Never look for subtitles in the same path, as we specify the path later on
video = subliminal.scan_video(vname, subtitles=False, embedded_subtitles=not sickbeard.EMBEDDED_SUBTITLES_ALL or not force)
except Exception:
logger.log(u'%s: Exception caught in subliminal.scan_video for S%02dE%02d' %
(self.show.indexerid, self.season, self.episode), logger.DEBUG)
return
if not video:
return
# TODO: Add gui option for hearing_impaired parameter ?
foundSubs = subliminal.download_best_subtitles([video], languages=languages, providers=providers, single=not sickbeard.SUBTITLES_MULTI, hearing_impaired=False)
if not foundSubs:
logger.log(u'%s: No subtitles found for S%02dE%02d on any provider' % (self.show.indexerid, self.season, self.episode), logger.DEBUG)
return
# Select the correct subtitles path
if sickbeard.SUBTITLES_DIR and ek.ek(os.path.exists, sickbeard.SUBTITLES_DIR):
subs_new_path = sickbeard.SUBTITLES_DIR
elif sickbeard.SUBTITLES_DIR:
subs_new_path = ek.ek(os.path.join, ek.ek(os.path.dirname, self.location), sickbeard.SUBTITLES_DIR)
dir_exists = helpers.makeDir(subs_new_path)
if not dir_exists:
logger.log(u'Unable to create subtitles folder ' + subs_new_path, logger.ERROR)
else:
helpers.chmodAsParent(subs_new_path)
else:
subs_new_path = ek.ek(os.path.join, ek.ek(os.path.dirname, self.location))
subliminal.save_subtitles(foundSubs, directory=subs_new_path, single=not sickbeard.SUBTITLES_MULTI)
for video, subs in foundSubs.iteritems():
for sub in subs:
# Get the file name out of video.name and use the path from above
video_path = subs_new_path + "/" + video.name.rsplit("/", 1)[-1]
subpath = subliminal.subtitle.get_subtitle_path(video_path, sub.language if sickbeard.SUBTITLES_MULTI else None)
helpers.chmodAsParent(subpath)
helpers.fixSetGroupID(subpath)
if not sickbeard.EMBEDDED_SUBTITLES_ALL and sickbeard.SUBTITLES_EXTRA_SCRIPTS and self.location.endswith(('mkv','mp4')):
subtitles.run_subs_extra_scripts(self, foundSubs)
except Exception as e:
logger.log("Error occurred when downloading subtitles for: %s" % self.location)
logger.log(traceback.format_exc(), logger.ERROR)
return
self.refreshSubtitles()
self.subtitles_searchcount += 1 if self.subtitles_searchcount else 1
self.subtitles_lastsearch = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
self.saveToDB()
newSubtitles = frozenset(self.subtitles).difference(previous_subtitles)
if newSubtitles:
subtitleList = ", ".join([subtitles.fromietf(newSub).name for newSub in newSubtitles])
logger.log(u"%s: Downloaded %s subtitles for S%02dE%02d" %
(self.show.indexerid, subtitleList, self.season, self.episode), logger.DEBUG)
notifiers.notify_subtitle_download(self.prettyName(), subtitleList)
else:
logger.log(u"%s: No subtitles downloaded for S%02dE%02d" %
(self.show.indexerid, self.season, self.episode), logger.DEBUG)
if sickbeard.SUBTITLES_HISTORY:
for video, subs in foundSubs.iteritems():
for sub in subs:
logger.log(u'history.logSubtitle %s, %s' % (sub.provider_name, sub.language.opensubtitles), logger.DEBUG)
history.logSubtitle(self.show.indexerid, self.season, self.episode, self.status, sub)
return self.subtitles
def checkForMetaFiles(self):
oldhasnfo = self.hasnfo
oldhastbn = self.hastbn
cur_nfo = False
cur_tbn = False
# check for nfo and tbn
if ek.ek(os.path.isfile, self.location):
for cur_provider in sickbeard.metadata_provider_dict.values():
if cur_provider.episode_metadata:
new_result = cur_provider._has_episode_metadata(self)
else:
new_result = False
cur_nfo = new_result or cur_nfo
if cur_provider.episode_thumbnails:
new_result = cur_provider._has_episode_thumb(self)
else:
new_result = False
cur_tbn = new_result or cur_tbn
self.hasnfo = cur_nfo
self.hastbn = cur_tbn
# if either setting has changed return true, if not return false
return oldhasnfo != self.hasnfo or oldhastbn != self.hastbn
def specifyEpisode(self, season, episode):
sqlResult = self.loadFromDB(season, episode)
if not sqlResult:
# only load from NFO if we didn't load from DB
if ek.ek(os.path.isfile, self.location):
try:
self.loadFromNFO(self.location)
except exceptions.NoNFOException:
logger.log(u"%s: There was an error loading the NFO for episode S%02dE%02d" % (self.show.indexerid, season, episode), logger.ERROR)
# if we tried loading it from NFO and didn't find the NFO, try the Indexers
if not self.hasnfo:
try:
result = self.loadFromIndexer(season, episode)
except exceptions.EpisodeDeletedException:
result = False
# if we failed SQL *and* NFO, Indexers then fail
if not result:
raise exceptions.EpisodeNotFoundException("Couldn't find episode S%02dE%02d" % (season, episode))
def loadFromDB(self, season, episode):
logger.log(u"%s: Loading episode details from DB for episode %s S%02dE%02d" % (self.show.indexerid, self.show.name, season, episode), logger.DEBUG)
myDB = db.DBConnection()
sqlResults = myDB.select("SELECT * FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?",
[self.show.indexerid, season, episode])
if len(sqlResults) > 1:
raise exceptions.MultipleDBEpisodesException("Your DB has two records for the same show somehow.")
elif len(sqlResults) == 0:
logger.log(u"%s: Episode S%02dE%02d not found in the database" % (self.show.indexerid, self.season, self.episode), logger.DEBUG)
return False
else:
# NAMEIT logger.log(u"AAAAA from" + str(self.season)+"x"+str(self.episode) + " -" + self.name + " to " + str(sqlResults[0]["name"]))
if sqlResults[0]["name"]:
self.name = sqlResults[0]["name"]
self.season = season
self.episode = episode
self.absolute_number = sqlResults[0]["absolute_number"]
self.description = sqlResults[0]["description"]
if not self.description:
self.description = ""
if sqlResults[0]["subtitles"] and sqlResults[0]["subtitles"]:
self.subtitles = sqlResults[0]["subtitles"].split(",")
self.subtitles_searchcount = sqlResults[0]["subtitles_searchcount"]
self.subtitles_lastsearch = sqlResults[0]["subtitles_lastsearch"]
self.airdate = datetime.date.fromordinal(int(sqlResults[0]["airdate"]))
# logger.log(u"1 Status changes from " + str(self.status) + " to " + str(sqlResults[0]["status"]), logger.DEBUG)
self.status = int(sqlResults[0]["status"] or -1)
# don't overwrite my location
if sqlResults[0]["location"] and sqlResults[0]["location"]:
self.location = os.path.normpath(sqlResults[0]["location"])
if sqlResults[0]["file_size"]:
self.file_size = int(sqlResults[0]["file_size"])
else:
self.file_size = 0
self.indexerid = int(sqlResults[0]["indexerid"])
self.indexer = int(sqlResults[0]["indexer"])
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
try:
self.scene_season = int(sqlResults[0]["scene_season"])
except:
self.scene_season = 0
try:
self.scene_episode = int(sqlResults[0]["scene_episode"])
except:
self.scene_episode = 0
try:
self.scene_absolute_number = int(sqlResults[0]["scene_absolute_number"])
except:
self.scene_absolute_number = 0
if self.scene_absolute_number == 0:
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
if self.scene_season == 0 or self.scene_episode == 0:
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
if sqlResults[0]["release_name"] is not None:
self.release_name = sqlResults[0]["release_name"]
if sqlResults[0]["is_proper"]:
self.is_proper = int(sqlResults[0]["is_proper"])
if sqlResults[0]["version"]:
self.version = int(sqlResults[0]["version"])
if sqlResults[0]["release_group"] is not None:
self.release_group = sqlResults[0]["release_group"]
self.dirty = False
return True
def loadFromIndexer(self, season=None, episode=None, cache=True, tvapi=None, cachedSeason=None):
if season is None:
season = self.season
if episode is None:
episode = self.episode
logger.log(u"%s: Loading episode details from %s for episode S%02dE%02d" %
(self.show.indexerid, sickbeard.indexerApi(self.show.indexer).name, season, episode) , logger.DEBUG)
indexer_lang = self.show.lang
try:
if cachedSeason is None:
if tvapi is None:
lINDEXER_API_PARMS = sickbeard.indexerApi(self.indexer).api_params.copy()
if not cache:
lINDEXER_API_PARMS['cache'] = False
if indexer_lang:
lINDEXER_API_PARMS['language'] = indexer_lang
if self.show.dvdorder != 0:
lINDEXER_API_PARMS['dvdorder'] = True
t = sickbeard.indexerApi(self.indexer).indexer(**lINDEXER_API_PARMS)
else:
t = tvapi
myEp = t[self.show.indexerid][season][episode]
else:
myEp = cachedSeason[episode]
except (sickbeard.indexer_error, IOError), e:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " threw up an error: " + ex(e), logger.DEBUG)
# if the episode is already valid just log it, if not throw it up
if self.name:
logger.log(u"" + sickbeard.indexerApi(
self.indexer).name + " timed out but we have enough info from other sources, allowing the error",
logger.DEBUG)
return
else:
logger.log(u"" + sickbeard.indexerApi(self.indexer).name + " timed out, unable to create the episode",
logger.ERROR)
return False
except (sickbeard.indexer_episodenotfound, sickbeard.indexer_seasonnotfound):
logger.log(u"Unable to find the episode on " + sickbeard.indexerApi(
self.indexer).name + "... has it been removed? Should I delete from db?", logger.DEBUG)
# if I'm no longer on the Indexers but I once was then delete myself from the DB
if self.indexerid != -1:
self.deleteEpisode()
return
if getattr(myEp, 'episodename', None) is None:
logger.log(u"This episode %s - S%02dE%02d has no name on %s" %(self.show.name, season, episode, sickbeard.indexerApi(self.indexer).name))
# if I'm incomplete on TVDB but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
if getattr(myEp, 'absolute_number', None) is None:
logger.log(u"This episode %s - S%02dE%02d has no absolute number on %s" %(self.show.name, season, episode, sickbeard.indexerApi(self.indexer).name), logger.DEBUG)
else:
logger.log(u"%s: The absolute_number for S%02dE%02d is: %s " % (self.show.indexerid, season, episode, myEp["absolute_number"]), logger.DEBUG)
self.absolute_number = int(myEp["absolute_number"])
self.name = getattr(myEp, 'episodename', "")
self.season = season
self.episode = episode
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = getattr(myEp, 'overview', "")
firstaired = getattr(myEp, 'firstaired', None)
if not firstaired or firstaired == "0000-00-00":
firstaired = str(datetime.date.fromordinal(1))
rawAirdate = [int(x) for x in firstaired.split("-")]
try:
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
except (ValueError, IndexError):
logger.log(u"Malformed air date of %s retrieved from %s for (%s - S%02dE%02d)" % (firstaired, sickbeard.indexerApi(self.indexer).name, self.show.name, season, episode),logger.WARNING)
# if I'm incomplete on the indexer but I once was complete then just delete myself from the DB for now
if self.indexerid != -1:
self.deleteEpisode()
return False
# early conversion to int so that episode doesn't get marked dirty
self.indexerid = getattr(myEp, 'id', None)
if self.indexerid is None:
logger.log(u"Failed to retrieve ID from " + sickbeard.indexerApi(self.indexer).name, logger.ERROR)
if self.indexerid != -1:
self.deleteEpisode()
return False
# don't update show status if show dir is missing, unless it's missing on purpose
if not ek.ek(os.path.isdir,
self.show._location) and not sickbeard.CREATE_MISSING_SHOW_DIRS and not sickbeard.ADD_SHOWS_WO_DIR:
logger.log(u"The show dir %s is missing, not bothering to change the episode statuses since it'd probably be invalid" % self.show._location )
return
if self.location:
logger.log(u"%s: Setting status for S%02dE%02d based on status %s and location %s" %
(self.show.indexerid, season, episode, statusStrings[self.status], self.location), logger.DEBUG)
if not ek.ek(os.path.isfile, self.location):
if self.airdate >= datetime.date.today() or self.airdate == datetime.date.fromordinal(1):
logger.log(u"Episode airs in the future or has no airdate, marking it %s" % statusStrings[UNAIRED], logger.DEBUG)
self.status = UNAIRED
elif self.status in [UNAIRED, UNKNOWN]:
# Only do UNAIRED/UNKNOWN, it could already be snatched/ignored/skipped, or downloaded/archived to disconnected media
logger.log(u"Episode has already aired, marking it %s" % statusStrings[self.show.default_ep_status], logger.DEBUG)
self.status = self.show.default_ep_status if self.season > 0 else SKIPPED # auto-skip specials
else:
logger.log(u"Not touching status [ %s ] It could be skipped/ignored/snatched/archived" % statusStrings[self.status], logger.DEBUG)
# if we have a media file then it's downloaded
elif sickbeard.helpers.isMediaFile(self.location):
# leave propers alone, you have to either post-process them or manually change them back
if self.status not in Quality.SNATCHED_PROPER + Quality.DOWNLOADED + Quality.SNATCHED + [ARCHIVED]:
logger.log(
u"5 Status changes from " + str(self.status) + " to " + str(Quality.statusFromName(self.location)),
logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
# shouldn't get here probably
else:
logger.log(u"6 Status changes from " + str(self.status) + " to " + str(UNKNOWN), logger.DEBUG)
self.status = UNKNOWN
def loadFromNFO(self, location):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(
str(self.show.indexerid) + u": The show dir is missing, not bothering to try loading the episode NFO")
return
logger.log(
str(self.show.indexerid) + u": Loading episode details from the NFO file associated with " + location,
logger.DEBUG)
self.location = location
if self.location != "":
if self.status == UNKNOWN:
if sickbeard.helpers.isMediaFile(self.location):
logger.log(u"7 Status changes from " + str(self.status) + " to " + str(
Quality.statusFromName(self.location, anime=self.show.is_anime)), logger.DEBUG)
self.status = Quality.statusFromName(self.location, anime=self.show.is_anime)
nfoFile = sickbeard.helpers.replaceExtension(self.location, "nfo")
logger.log(str(self.show.indexerid) + u": Using NFO name " + nfoFile, logger.DEBUG)
if ek.ek(os.path.isfile, nfoFile):
try:
showXML = etree.ElementTree(file=nfoFile)
except (SyntaxError, ValueError), e:
logger.log(u"Error loading the NFO, backing up the NFO and skipping for now: " + ex(e),
logger.ERROR) # TODO: figure out what's wrong and fix it
try:
ek.ek(os.rename, nfoFile, nfoFile + ".old")
except Exception, e:
logger.log(
u"Failed to rename your episode's NFO file - you need to delete it or fix it: " + ex(e),
logger.ERROR)
raise exceptions.NoNFOException("Error in NFO format")
for epDetails in showXML.getiterator('episodedetails'):
if epDetails.findtext('season') is None or int(epDetails.findtext('season')) != self.season or \
epDetails.findtext('episode') is None or int(
epDetails.findtext('episode')) != self.episode:
logger.log(u"%s: NFO has an <episodedetails> block for a different episode - wanted S%02dE%02d but got S%02dE%02d" %
(self.show.indexerid, self.season, self.episode, epDetails.findtext('season'), epDetails.findtext('episode') ), logger.DEBUG)
continue
if epDetails.findtext('title') is None or epDetails.findtext('aired') is None:
raise exceptions.NoNFOException("Error in NFO format (missing episode title or airdate)")
self.name = epDetails.findtext('title')
self.episode = int(epDetails.findtext('episode'))
self.season = int(epDetails.findtext('season'))
sickbeard.scene_numbering.xem_refresh(self.show.indexerid, self.show.indexer)
self.scene_absolute_number = sickbeard.scene_numbering.get_scene_absolute_numbering(
self.show.indexerid,
self.show.indexer,
self.absolute_number
)
self.scene_season, self.scene_episode = sickbeard.scene_numbering.get_scene_numbering(
self.show.indexerid,
self.show.indexer,
self.season, self.episode
)
self.description = epDetails.findtext('plot')
if self.description is None:
self.description = ""
if epDetails.findtext('aired'):
rawAirdate = [int(x) for x in epDetails.findtext('aired').split("-")]
self.airdate = datetime.date(rawAirdate[0], rawAirdate[1], rawAirdate[2])
else:
self.airdate = datetime.date.fromordinal(1)
self.hasnfo = True
else:
self.hasnfo = False
if ek.ek(os.path.isfile, sickbeard.helpers.replaceExtension(nfoFile, "tbn")):
self.hastbn = True
else:
self.hastbn = False
def __str__(self):
toReturn = ""
toReturn += "%s - S%02dE%02d - %s " % (self.show.name, self.season, self.episode, self.name ) + "\n"
toReturn += "location: " + str(self.location) + "\n"
toReturn += "description: " + str(self.description) + "\n"
toReturn += "subtitles: " + str(",".join(self.subtitles)) + "\n"
toReturn += "subtitles_searchcount: " + str(self.subtitles_searchcount) + "\n"
toReturn += "subtitles_lastsearch: " + str(self.subtitles_lastsearch) + "\n"
toReturn += "airdate: " + str(self.airdate.toordinal()) + " (" + str(self.airdate) + ")\n"
toReturn += "hasnfo: " + str(self.hasnfo) + "\n"
toReturn += "hastbn: " + str(self.hastbn) + "\n"
toReturn += "status: " + str(self.status) + "\n"
return toReturn
def createMetaFiles(self):
if not ek.ek(os.path.isdir, self.show._location):
logger.log(str(self.show.indexerid) + u": The show dir is missing, not bothering to try to create metadata")
return
self.createNFO()
self.createThumbnail()
if self.checkForMetaFiles():
self.saveToDB()
def createNFO(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_metadata(self) or result
return result
def createThumbnail(self):
result = False
for cur_provider in sickbeard.metadata_provider_dict.values():
result = cur_provider.create_episode_thumb(self) or result
return result
def deleteEpisode(self):
logger.log(u"Deleting %s S%02dE%02d from the DB" % (self.show.name, self.season, self.episode), logger.DEBUG)
# remove myself from the show dictionary
if self.show.getEpisode(self.season, self.episode, noCreate=True) == self:
logger.log(u"Removing myself from my show's list", logger.DEBUG)
del self.show.episodes[self.season][self.episode]
# delete myself from the DB
logger.log(u"Deleting myself from the database", logger.DEBUG)
myDB = db.DBConnection()
sql = "DELETE FROM tv_episodes WHERE showid=" + str(self.show.indexerid) + " AND season=" + str(
self.season) + " AND episode=" + str(self.episode)
myDB.action(sql)
raise exceptions.EpisodeDeletedException()
def get_sql(self, forceSave=False):
"""
Creates SQL queue for this episode if any of its data has been changed since the last save.
forceSave: If True it will create SQL queue even if no data has been changed since the
last save (aka if the record is not dirty).
"""
try:
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not creating SQL queue - record is not dirty", logger.DEBUG)
return
myDB = db.DBConnection()
rows = myDB.select(
'SELECT episode_id, subtitles FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?',
[self.show.indexerid, self.season, self.episode])
epID = None
if rows:
epID = int(rows[0]['episode_id'])
if epID:
# use a custom update method to get the data into the DB for existing records.
# Multi or added subtitle or removed subtitles
if sickbeard.SUBTITLES_MULTI or not rows[0]['subtitles'] or not self.subtitles:
return [
"UPDATE tv_episodes SET indexerid = ?, indexer = ?, name = ?, description = ?, subtitles = ?, "
"subtitles_searchcount = ?, subtitles_lastsearch = ?, airdate = ?, hasnfo = ?, hastbn = ?, status = ?, "
"location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, "
"absolute_number = ?, version = ?, release_group = ? WHERE episode_id = ?",
[self.indexerid, self.indexer, self.name, self.description, ",".join(self.subtitles),
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn,
self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid,
self.season, self.episode, self.absolute_number, self.version, self.release_group, epID]]
else:
# Don't update the subtitle language when the srt file doesn't contain the alpha2 code, keep value from subliminal
return [
"UPDATE tv_episodes SET indexerid = ?, indexer = ?, name = ?, description = ?, "
"subtitles_searchcount = ?, subtitles_lastsearch = ?, airdate = ?, hasnfo = ?, hastbn = ?, status = ?, "
"location = ?, file_size = ?, release_name = ?, is_proper = ?, showid = ?, season = ?, episode = ?, "
"absolute_number = ?, version = ?, release_group = ? WHERE episode_id = ?",
[self.indexerid, self.indexer, self.name, self.description,
self.subtitles_searchcount, self.subtitles_lastsearch, self.airdate.toordinal(), self.hasnfo,
self.hastbn,
self.status, self.location, self.file_size, self.release_name, self.is_proper, self.show.indexerid,
self.season, self.episode, self.absolute_number, self.version, self.release_group, epID]]
else:
# use a custom insert method to get the data into the DB.
return [
"INSERT OR IGNORE INTO tv_episodes (episode_id, indexerid, indexer, name, description, subtitles, "
"subtitles_searchcount, subtitles_lastsearch, airdate, hasnfo, hastbn, status, location, file_size, "
"release_name, is_proper, showid, season, episode, absolute_number, version, release_group) VALUES "
"((SELECT episode_id FROM tv_episodes WHERE showid = ? AND season = ? AND episode = ?)"
",?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?,?);",
[self.show.indexerid, self.season, self.episode, self.indexerid, self.indexer, self.name,
self.description, ",".join(self.subtitles), self.subtitles_searchcount, self.subtitles_lastsearch,
self.airdate.toordinal(), self.hasnfo, self.hastbn, self.status, self.location, self.file_size,
self.release_name, self.is_proper, self.show.indexerid, self.season, self.episode,
self.absolute_number, self.version, self.release_group]]
except Exception as e:
logger.log(u"Error while updating database: %s" %
(repr(e)), logger.ERROR)
def saveToDB(self, forceSave=False):
"""
Saves this episode to the database if any of its data has been changed since the last save.
forceSave: If True it will save to the database even if no data has been changed since the
last save (aka if the record is not dirty).
"""
if not self.dirty and not forceSave:
logger.log(str(self.show.indexerid) + u": Not saving episode to db - record is not dirty", logger.DEBUG)
return
logger.log(str(self.show.indexerid) + u": Saving episode details to database", logger.DEBUG)
logger.log(u"STATUS IS " + str(self.status), logger.DEBUG)
newValueDict = {"indexerid": self.indexerid,
"indexer": self.indexer,
"name": self.name,
"description": self.description,
"subtitles": ",".join(self.subtitles),
"subtitles_searchcount": self.subtitles_searchcount,
"subtitles_lastsearch": self.subtitles_lastsearch,
"airdate": self.airdate.toordinal(),
"hasnfo": self.hasnfo,
"hastbn": self.hastbn,
"status": self.status,
"location": self.location,
"file_size": self.file_size,
"release_name": self.release_name,
"is_proper": self.is_proper,
"absolute_number": self.absolute_number,
"version": self.version,
"release_group": self.release_group
}
controlValueDict = {"showid": self.show.indexerid,
"season": self.season,
"episode": self.episode}
# use a custom update/insert method to get the data into the DB
myDB = db.DBConnection()
myDB.upsert("tv_episodes", newValueDict, controlValueDict)
def fullPath(self):
if self.location == None or self.location == "":
return None
else:
return ek.ek(os.path.join, self.show.location, self.location)
def createStrings(self, pattern=None):
patterns = [
'%S.N.S%SE%0E',
'%S.N.S%0SE%E',
'%S.N.S%SE%E',
'%S.N.S%0SE%0E',
'%SN S%SE%0E',
'%SN S%0SE%E',
'%SN S%SE%E',
'%SN S%0SE%0E'
]
strings = []
if not pattern:
for p in patterns:
strings += [self._format_pattern(p)]
return strings
return self._format_pattern(pattern)
def prettyName(self):
"""
Returns the name of this episode in a "pretty" human-readable format. Used for logging
and notifications and such.
Returns: A string representing the episode's name and season/ep numbers
"""
if self.show.anime and not self.show.scene:
return self._format_pattern('%SN - %AB - %EN')
elif self.show.air_by_date:
return self._format_pattern('%SN - %AD - %EN')
return self._format_pattern('%SN - %Sx%0E - %EN')
def _ep_name(self):
"""
Returns the name of the episode to use during renaming. Combines the names of related episodes.
Eg. "Ep Name (1)" and "Ep Name (2)" becomes "Ep Name"
"Ep Name" and "Other Ep Name" becomes "Ep Name & Other Ep Name"
"""
multiNameRegex = "(.*) \(\d{1,2}\)"
self.relatedEps = sorted(self.relatedEps, key=lambda x: x.episode)
if len(self.relatedEps) == 0:
goodName = self.name
else:
goodName = ''
singleName = True
curGoodName = None
for curName in [self.name] + [x.name for x in self.relatedEps]:
match = re.match(multiNameRegex, curName)
if not match:
singleName = False
break
if curGoodName == None:
curGoodName = match.group(1)
elif curGoodName != match.group(1):
singleName = False
break
if singleName:
goodName = curGoodName
else:
goodName = self.name
for relEp in self.relatedEps:
goodName += " & " + relEp.name
return goodName
def _replace_map(self):
"""
Generates a replacement map for this episode which maps all possible custom naming patterns to the correct
value for this episode.
Returns: A dict with patterns as the keys and their replacement values as the values.
"""
ep_name = self._ep_name()
def dot(name):
return helpers.sanitizeSceneName(name)
def us(name):
return re.sub('[ -]', '_', name)
def release_name(name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
return name
def release_group(show, name):
if name:
name = helpers.remove_non_release_groups(helpers.remove_extension(name))
else:
return ""
try:
np = NameParser(name, showObj=show, naming_pattern=True)
parse_result = np.parse(name)
except (InvalidNameException, InvalidShowException), e:
logger.log(u"Unable to get parse release_group: " + ex(e), logger.DEBUG)
return ''
if not parse_result.release_group:
return ''
return parse_result.release_group
epStatus, epQual = Quality.splitCompositeStatus(self.status) # @UnusedVariable
if sickbeard.NAMING_STRIP_YEAR:
show_name = re.sub("\(\d+\)$", "", self.show.name).rstrip()
else:
show_name = self.show.name
#try to get the release group
rel_grp = {};
rel_grp["SiCKRAGE"] = 'SiCKRAGE';
if hasattr(self, 'location'): #from the location name
rel_grp['location'] = release_group(self.show, self.location);
if (rel_grp['location'] == ''): del rel_grp['location']
if hasattr(self, '_release_group'): #from the release group field in db
rel_grp['database'] = self._release_group;
if (rel_grp['database'] == ''): del rel_grp['database']
if hasattr(self, 'release_name'): #from the release name field in db
rel_grp['release_name'] = release_group(self.show, self.release_name);
if (rel_grp['release_name'] == ''): del rel_grp['release_name']
# use release_group, release_name, location in that order
if ('database' in rel_grp): relgrp = 'database'
elif ('release_name' in rel_grp): relgrp = 'release_name'
elif ('location' in rel_grp): relgrp = 'location'
else: relgrp = 'SiCKRAGE'
return {
'%SN': show_name,
'%S.N': dot(show_name),
'%S_N': us(show_name),
'%EN': ep_name,
'%E.N': dot(ep_name),
'%E_N': us(ep_name),
'%QN': Quality.qualityStrings[epQual],
'%Q.N': dot(Quality.qualityStrings[epQual]),
'%Q_N': us(Quality.qualityStrings[epQual]),
'%S': str(self.season),
'%0S': '%02d' % self.season,
'%E': str(self.episode),
'%0E': '%02d' % self.episode,
'%XS': str(self.scene_season),
'%0XS': '%02d' % self.scene_season,
'%XE': str(self.scene_episode),
'%0XE': '%02d' % self.scene_episode,
'%AB': '%(#)03d' % {'#': self.absolute_number},
'%XAB': '%(#)03d' % {'#': self.scene_absolute_number},
'%RN': release_name(self.release_name),
'%RG': rel_grp[relgrp],
'%AD': str(self.airdate).replace('-', ' '),
'%A.D': str(self.airdate).replace('-', '.'),
'%A_D': us(str(self.airdate)),
'%A-D': str(self.airdate),
'%Y': str(self.airdate.year),
'%M': str(self.airdate.month),
'%D': str(self.airdate.day),
'%0M': '%02d' % self.airdate.month,
'%0D': '%02d' % self.airdate.day,
'%RT': "PROPER" if self.is_proper else "",
}
def _format_string(self, pattern, replace_map):
"""
Replaces all template strings with the correct value
"""
result_name = pattern
# do the replacements
for cur_replacement in sorted(replace_map.keys(), reverse=True):
result_name = result_name.replace(cur_replacement, helpers.sanitizeFileName(replace_map[cur_replacement]))
result_name = result_name.replace(cur_replacement.lower(),
helpers.sanitizeFileName(replace_map[cur_replacement].lower()))
return result_name
def _format_pattern(self, pattern=None, multi=None, anime_type=None):
"""
Manipulates an episode naming pattern and then fills the template in
"""
if pattern == None:
pattern = sickbeard.NAMING_PATTERN
if multi == None:
multi = sickbeard.NAMING_MULTI_EP
if sickbeard.NAMING_CUSTOM_ANIME:
if anime_type == None:
anime_type = sickbeard.NAMING_ANIME
else:
anime_type = 3
replace_map = self._replace_map()
result_name = pattern
# if there's no release group in the db, let the user know we replaced it
if (not hasattr(self, '_release_group') and (not replace_map['%RG'] == 'SiCKRAGE')):
logger.log(u"Episode has no release group, replacing it with '" + replace_map['%RG'] + "'", logger.DEBUG);
self._release_group = replace_map['%RG'] #if release_group is not in the db, put it there
elif ((self._release_group == '') and (not replace_map['%RG'] == 'SiCKRAGE')):
logger.log(u"Episode has no release group, replacing it with '" + replace_map['%RG'] + "'", logger.DEBUG);
self._release_group = replace_map['%RG'] #if release_group is not in the db, put it there
# if there's no release name then replace it with a reasonable facsimile
if not replace_map['%RN']:
if self.show.air_by_date or self.show.sports:
result_name = result_name.replace('%RN', '%S.N.%A.D.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.%A.D.%e.n-' + replace_map['%RG'].lower())
elif anime_type != 3:
result_name = result_name.replace('%RN', '%S.N.%AB.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.%ab.%e.n-' + replace_map['%RG'].lower())
else:
result_name = result_name.replace('%RN', '%S.N.S%0SE%0E.%E.N-' + replace_map['%RG'])
result_name = result_name.replace('%rn', '%s.n.s%0se%0e.%e.n-' + replace_map['%RG'].lower())
logger.log(u"Episode has no release name, replacing it with a generic one: " + result_name, logger.DEBUG)
if not replace_map['%RT']:
result_name = re.sub('([ _.-]*)%RT([ _.-]*)', r'\2', result_name)
# split off ep name part only
name_groups = re.split(r'[\\/]', result_name)
# figure out the double-ep numbering style for each group, if applicable
for cur_name_group in name_groups:
season_format = sep = ep_sep = ep_format = None
season_ep_regex = '''
(?P<pre_sep>[ _.-]*)
((?:s(?:eason|eries)?\s*)?%0?S(?![._]?N))
(.*?)
(%0?E(?![._]?N))
(?P<post_sep>[ _.-]*)
'''
ep_only_regex = '(E?%0?E(?![._]?N))'
# try the normal way
season_ep_match = re.search(season_ep_regex, cur_name_group, re.I | re.X)
ep_only_match = re.search(ep_only_regex, cur_name_group, re.I | re.X)
# if we have a season and episode then collect the necessary data
if season_ep_match:
season_format = season_ep_match.group(2)
ep_sep = season_ep_match.group(3)
ep_format = season_ep_match.group(4)
sep = season_ep_match.group('pre_sep')
if not sep:
sep = season_ep_match.group('post_sep')
if not sep:
sep = ' '
# force 2-3-4 format if they chose to extend
if multi in (NAMING_EXTEND, NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED):
ep_sep = '-'
regex_used = season_ep_regex
# if there's no season then there's not much choice so we'll just force them to use 03-04-05 style
elif ep_only_match:
season_format = ''
ep_sep = '-'
ep_format = ep_only_match.group(1)
sep = ''
regex_used = ep_only_regex
else:
continue
# we need at least this much info to continue
if not ep_sep or not ep_format:
continue
# start with the ep string, eg. E03
ep_string = self._format_string(ep_format.upper(), replace_map)
for other_ep in self.relatedEps:
# for limited extend we only append the last ep
if multi in (NAMING_LIMITED_EXTEND, NAMING_LIMITED_EXTEND_E_PREFIXED) and other_ep != self.relatedEps[
-1]:
continue
elif multi == NAMING_DUPLICATE:
# add " - S01"
ep_string += sep + season_format
elif multi == NAMING_SEPARATED_REPEAT:
ep_string += sep
# add "E04"
ep_string += ep_sep
if multi == NAMING_LIMITED_EXTEND_E_PREFIXED:
ep_string += 'E'
ep_string += other_ep._format_string(ep_format.upper(), other_ep._replace_map())
if anime_type != 3:
if self.absolute_number == 0:
curAbsolute_number = self.episode
else:
curAbsolute_number = self.absolute_number
if self.season != 0: # dont set absolute numbers if we are on specials !
if anime_type == 1: # this crazy person wants both ! (note: +=)
ep_string += sep + "%(#)03d" % {
"#": curAbsolute_number}
elif anime_type == 2: # total anime freak only need the absolute number ! (note: =)
ep_string = "%(#)03d" % {"#": curAbsolute_number}
for relEp in self.relatedEps:
if relEp.absolute_number != 0:
ep_string += '-' + "%(#)03d" % {"#": relEp.absolute_number}
else:
ep_string += '-' + "%(#)03d" % {"#": relEp.episode}
regex_replacement = None
if anime_type == 2:
regex_replacement = r'\g<pre_sep>' + ep_string + r'\g<post_sep>'
elif season_ep_match:
regex_replacement = r'\g<pre_sep>\g<2>\g<3>' + ep_string + r'\g<post_sep>'
elif ep_only_match:
regex_replacement = ep_string
if regex_replacement:
# fill out the template for this piece and then insert this piece into the actual pattern
cur_name_group_result = re.sub('(?i)(?x)' + regex_used, regex_replacement, cur_name_group)
# cur_name_group_result = cur_name_group.replace(ep_format, ep_string)
# logger.log(u"found "+ep_format+" as the ep pattern using "+regex_used+" and replaced it with "+regex_replacement+" to result in "+cur_name_group_result+" from "+cur_name_group, logger.DEBUG)
result_name = result_name.replace(cur_name_group, cur_name_group_result)
result_name = self._format_string(result_name, replace_map)
logger.log(u"formatting pattern: " + pattern + " -> " + result_name, logger.DEBUG)
return result_name
def proper_path(self):
"""
Figures out the path where this episode SHOULD live according to the renaming rules, relative from the show dir
"""
anime_type = sickbeard.NAMING_ANIME
if not self.show.is_anime:
anime_type = 3
result = self.formatted_filename(anime_type=anime_type)
# if they want us to flatten it and we're allowed to flatten it then we will
if self.show.flatten_folders and not sickbeard.NAMING_FORCE_FOLDERS:
return result
# if not we append the folder on and use that
else:
result = ek.ek(os.path.join, self.formatted_dir(), result)
return result
def formatted_dir(self, pattern=None, multi=None):
"""
Just the folder name of the episode
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
if len(name_groups) == 1:
return ''
else:
return self._format_pattern(os.sep.join(name_groups[:-1]), multi)
def formatted_filename(self, pattern=None, multi=None, anime_type=None):
"""
Just the filename of the episode, formatted based on the naming settings
"""
if pattern == None:
# we only use ABD if it's enabled, this is an ABD show, AND this is not a multi-ep
if self.show.air_by_date and sickbeard.NAMING_CUSTOM_ABD and not self.relatedEps:
pattern = sickbeard.NAMING_ABD_PATTERN
elif self.show.sports and sickbeard.NAMING_CUSTOM_SPORTS and not self.relatedEps:
pattern = sickbeard.NAMING_SPORTS_PATTERN
elif self.show.anime and sickbeard.NAMING_CUSTOM_ANIME:
pattern = sickbeard.NAMING_ANIME_PATTERN
else:
pattern = sickbeard.NAMING_PATTERN
# split off the dirs only, if they exist
name_groups = re.split(r'[\\/]', pattern)
return helpers.sanitizeFileName(self._format_pattern(name_groups[-1], multi, anime_type))
def rename(self):
"""
Renames an episode file and all related files to the location and filename as specified
in the naming settings.
"""
if not ek.ek(os.path.isfile, self.location):
logger.log(u"Can't perform rename on " + self.location + " when it doesn't exist, skipping", logger.WARNING)
return
proper_path = self.proper_path()
absolute_proper_path = ek.ek(os.path.join, self.show.location, proper_path)
absolute_current_path_no_ext, file_ext = ek.ek(os.path.splitext, self.location)
absolute_current_path_no_ext_length = len(absolute_current_path_no_ext)
related_subs = []
current_path = absolute_current_path_no_ext
if absolute_current_path_no_ext.startswith(self.show.location):
current_path = absolute_current_path_no_ext[len(self.show.location):]
logger.log(u"Renaming/moving episode from the base path " + self.location + " to " + absolute_proper_path,
logger.DEBUG)
# if it's already named correctly then don't do anything
if proper_path == current_path:
logger.log(str(self.indexerid) + u": File " + self.location + " is already named correctly, skipping",
logger.DEBUG)
return
related_files = postProcessor.PostProcessor(self.location).list_associated_files(
self.location, base_name_only=True, subfolders=True)
#This is wrong. Cause of pp not moving subs.
if self.show.subtitles and sickbeard.SUBTITLES_DIR != '':
related_subs = postProcessor.PostProcessor(self.location).list_associated_files(sickbeard.SUBTITLES_DIR,
subtitles_only=True, subfolders=True)
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
logger.log(u"Files associated to " + self.location + ": " + str(related_files), logger.DEBUG)
# move the ep file
result = helpers.rename_ep_file(self.location, absolute_proper_path, absolute_current_path_no_ext_length)
# move related files
for cur_related_file in related_files:
#We need to fix something here because related files can be in subfolders and the original code doesn't handle this (at all)
cur_related_dir = ek.ek(os.path.dirname, ek.ek(os.path.abspath, cur_related_file))
subfolder = cur_related_dir.replace(ek.ek(os.path.dirname, ek.ek(os.path.abspath, self.location)), '')
#We now have a subfolder. We need to add that to the absolute_proper_path.
#First get the absolute proper-path dir
proper_related_dir = ek.ek(os.path.dirname, ek.ek(os.path.abspath, absolute_proper_path + file_ext))
proper_related_path = absolute_proper_path.replace(proper_related_dir, proper_related_dir + subfolder)
cur_result = helpers.rename_ep_file(cur_related_file, proper_related_path,
absolute_current_path_no_ext_length + len(subfolder))
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_file, logger.ERROR)
for cur_related_sub in related_subs:
absolute_proper_subs_path = ek.ek(os.path.join, sickbeard.SUBTITLES_DIR, self.formatted_filename())
cur_result = helpers.rename_ep_file(cur_related_sub, absolute_proper_subs_path,
absolute_current_path_no_ext_length)
if not cur_result:
logger.log(str(self.indexerid) + u": Unable to rename file " + cur_related_sub, logger.ERROR)
# save the ep
with self.lock:
if result:
self.location = absolute_proper_path + file_ext
for relEp in self.relatedEps:
relEp.location = absolute_proper_path + file_ext
# in case something changed with the metadata just do a quick check
for curEp in [self] + self.relatedEps:
curEp.checkForMetaFiles()
# save any changes to the databas
sql_l = []
with self.lock:
for relEp in [self] + self.relatedEps:
sql_l.append(relEp.get_sql())
if len(sql_l) > 0:
myDB = db.DBConnection()
myDB.mass_action(sql_l)
def airdateModifyStamp(self):
"""
Make the modify date and time of a file reflect the show air date and time.
Note: Also called from postProcessor
"""
hr = min = 0
airs = re.search('.*?(\d{1,2})(?::\s*?(\d{2}))?\s*(pm)?', self.show.airs, re.I)
if airs:
hr = int(airs.group(1))
hr = (12 + hr, hr)[None is airs.group(3)]
hr = (hr, hr - 12)[0 == hr % 12 and 0 != hr]
min = int((airs.group(2), min)[None is airs.group(2)])
airtime = datetime.time(hr, min)
if sickbeard.TIMEZONE_DISPLAY == 'local':
airdatetime = sbdatetime.sbdatetime.convert_to_setting( network_timezones.parse_date_time(datetime.date.toordinal(self.airdate), self.show.airs, self.show.network))
else:
airdatetime = datetime.datetime.combine(self.airdate, airtime).replace(tzinfo=tzlocal())
filemtime = datetime.datetime.fromtimestamp(os.path.getmtime(self.location)).replace(tzinfo=tzlocal())
if filemtime != airdatetime:
import time
airdatetime = airdatetime.timetuple()
logger.log(str(self.show.indexerid) + u": About to modify date of '" + self.location
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.DEBUG)
try:
if helpers.touchFile(self.location, time.mktime(airdatetime)):
logger.log(str(self.show.indexerid) + u": Changed modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime))
else:
logger.log(str(self.show.indexerid) + u": Unable to modify date of " + os.path.basename(self.location)
+ " to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
except:
logger.log(str(self.show.indexerid) + u": Failed to modify date of '" + os.path.basename(self.location)
+ "' to show air date " + time.strftime("%b %d,%Y (%H:%M)", airdatetime), logger.ERROR)
def __getstate__(self):
d = dict(self.__dict__)
del d['lock']
return d
def __setstate__(self, d):
d['lock'] = threading.Lock()
self.__dict__.update(d)
|
dannyboi104/SickRage
|
sickbeard/tv.py
|
Python
|
gpl-3.0
| 117,710
|
from __future__ import absolute_import
import six
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://etetoolkit.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2015).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit may be available in the documentation.
#
# More info at http://etetoolkit.org. Contact: huerta@embl.de
#
#
# #END_LICENSE#############################################################
# Backport of OrderedDict() class that runs on Python 2.4, 2.5, 2.6, 2.7 and pypy.
# Passes Python2.7's test suite and incorporates all the latest updates.
try:
from six.moves._thread import get_ident as _get_ident
except ImportError:
from six.moves._dummy_thread import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in six.itervalues(self.__map):
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in list(other.keys()):
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in list(kwds.items()):
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running=None):
'od.__repr__() <==> repr(od)'
if _repr_running is None:
_repr_running = {}
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, list(self.items()))
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and list(self.items()) == list(other.items())
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
Unode/ete
|
ete3/tools/ete_build_lib/ordereddict.py
|
Python
|
gpl-3.0
| 10,439
|
# -*- coding: utf-8 -*-
import operator
import os
import re
import subprocess
import time
import urllib
from xml.dom.minidom import parseString as parse_xml
from module.network.CookieJar import CookieJar
from module.network.HTTPRequest import HTTPRequest
from ..internal.Hoster import Hoster
from ..internal.misc import exists, isexecutable, json, reduce, renice, replace_patterns, which
from ..internal.Plugin import Abort, Skip
class BIGHTTPRequest(HTTPRequest):
"""
Overcome HTTPRequest's load() size limit to allow
loading very big web pages by overrding HTTPRequest's write() function
"""
# @TODO: Add 'limit' parameter to HTTPRequest in v0.4.10
def __init__(self, cookies=None, options=None, limit=2000000):
self.limit = limit
HTTPRequest.__init__(self, cookies=cookies, options=options)
def write(self, buf):
""" writes response """
if self.limit and self.rep.tell() > self.limit or self.abort:
rep = self.getResponse()
if self.abort:
raise Abort()
f = open("response.dump", "wb")
f.write(rep)
f.close()
raise Exception("Loaded Url exceeded limit")
self.rep.write(buf)
class Ffmpeg(object):
_RE_DURATION = re.compile(r'Duration: (\d{2}):(\d{2}):(\d{2})\.(\d{2}),')
_RE_TIME = re.compile(r'time=(\d{2}):(\d{2}):(\d{2})\.(\d{2})')
_RE_VERSION = re.compile((r'ffmpeg version (.+?) '))
CMD = None
priority = 0
streams = []
start_time = (0, 0)
output_filename = None
error_message = ""
def __init__(self, priority, plugin=None):
self.plugin = plugin
self.priority = priority
self.streams = []
self.start_time = (0, 0)
self.output_filename = None
self.error_message = ""
self.find()
@classmethod
def find(cls):
"""
Check for ffmpeg
"""
if cls.CMD is not None:
return True
try:
if os.name == "nt":
ffmpeg = os.path.join(pypath, "ffmpeg.exe") if isexecutable(os.path.join(pypath, "ffmpeg.exe")) \
else "ffmpeg.exe"
else:
ffmpeg = "ffmpeg"
cmd = which(ffmpeg) or ffmpeg
p = subprocess.Popen([cmd, "-version"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = (_r.strip() if _r else "" for _r in p.communicate())
except OSError:
return False
m = cls._RE_VERSION.search(out)
if m is not None:
cls.VERSION = m.group(1)
cls.CMD = cmd
return True
@property
def found(self):
return self.CMD is not None
def add_stream(self, streams):
if isinstance(streams, list):
self.streams.extend(streams)
else:
self.streams.append(streams)
def set_start_time(self, start_time):
self.start_time = start_time
def set_output_filename(self, output_filename):
self.output_filename = output_filename
def run(self):
if self.CMD is None or self.output_filename is None:
return False
maps = []
args = []
meta = []
for i, stream in enumerate(self.streams):
args.extend(["-i", stream[1]])
maps.extend(["-map", "%s:%s:0" % (i, stream[0])])
if stream[0] == 's':
meta.extend(["-metadata:s:s:0:%s" % i, "language=%s" % stream[2]])
args.extend(maps)
args.extend(meta)
args.extend(["-y",
"-vcodec", "copy",
"-acodec", "copy",
"-scodec", "copy",
"-ss", "00:%s:%s.00" % (self.start_time[0], self.start_time[1]),
"-sub_charenc", "utf8"])
call = [self.CMD] + args + [self.output_filename]
p = subprocess.Popen(
call,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
renice(p.pid, self.priority)
duration = self._find_duration(p)
if duration:
last_line = self._progress(p, duration)
else:
last_line = ""
out, err = (_r.strip() if _r else "" for _r in p.communicate())
if err or p.returncode:
self.error_message = last_line
return False
else:
self.error_message = ""
return True
def _find_duration(self, process):
duration = 0
while True:
line = process.stderr.readline() #: ffmpeg writes to stderr
#: Quit loop on eof
if not line:
break
m = self._RE_DURATION.search(line)
if m is not None:
duration = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
break
return duration
def _progress(self, process, duration):
line = ""
last_line = ""
while True:
c = process.stderr.read(1) #: ffmpeg writes to stderr
#: Quit loop on eof
if not c:
break
elif c == "\r":
last_line = line.strip('\r\n')
line = ""
m = self._RE_TIME.search(last_line)
if m is not None:
current_time = sum(int(v) * [60 * 60 * 100, 60 * 100, 100, 1][i]
for i, v in enumerate(m.groups()))
if self.plugin:
progress = current_time * 100 / duration
self.plugin.pyfile.setProgress(progress)
else:
line += c
continue
return last_line #: Last line may contain error message
class YoutubeCom(Hoster):
__name__ = "YoutubeCom"
__type__ = "hoster"
__version__ = "0.68"
__status__ = "testing"
__pattern__ = r'https?://(?:[^/]*\.)?(?:youtu\.be/|youtube\.com/watch\?(?:.*&)?v=)[\w\-]+'
__config__ = [("activated", "bool", "Activated", True),
("quality", "sd;hd;fullhd;240p;360p;480p;720p;1080p;1440p;2160p;3072p;4320p", "Quality Setting", "hd"),
("vfmt", "int", "Video FMT/ITAG Number (0 for auto)", 0),
("afmt", "int", "Audio FMT/ITAG Number (0 for auto)", 0),
(".mp4", "bool", "Allow .mp4", True),
(".flv", "bool", "Allow .flv", True),
(".webm", "bool", "Allow .webm", True),
(".mkv", "bool", "Allow .mkv", True),
(".3gp", "bool", "Allow .3gp", False),
("aac", "bool", "Allow aac audio (DASH video only)", True),
("vorbis", "bool", "Allow vorbis audio (DASH video only)", True),
("opus", "bool", "Allow opus audio (DASH video only)", True),
("ac3", "bool", "Allow ac3 audio (DASH video only)", True),
("dts", "bool", "Allow dts audio (DASH video only)", True),
("3d", "bool", "Prefer 3D", False),
("subs_dl", "off;all_specified;first_available", "Download subtitles", "off"),
("subs_dl_langs", "str", "Subtitle language codes (ISO639-1) to download (comma separated)", ""),
("subs_embed", "bool", "Embed subtitles inside the output file (.mp4 and .mkv only)", False),
("priority", "int", "ffmpeg process priority", 0)]
__description__ = """Youtube.com hoster plugin"""
__license__ = "GPLv3"
__authors__ = [("spoob", "spoob@pyload.org"),
("zoidberg", "zoidberg@mujmail.cz"),
("GammaC0de", "nitzo2001[AT]yahoo[DOT]com")]
URL_REPLACEMENTS = [(r'youtu\.be/', 'youtube.com/watch?v=')]
#: Invalid characters that must be removed from the file name
invalid_chars = u'\u2605:?><"|\\'
#: name, width, height, quality ranking, 3D, type
formats = {
# 3gp
17: {'ext': ".3gp", 'width': 176, 'height': 144, 'qi': 0, '3d': False, 'type': "av"},
36: {'ext': ".3gp", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
# flv
5: {'ext': ".flv", 'width': 400, 'height': 240, 'qi': 1, '3d': False, 'type': "av"},
6: {'ext': ".flv", 'width': 640, 'height': 400, 'qi': 4, '3d': False, 'type': "av"},
34: {'ext': ".flv", 'width': 640, 'height': 360, 'qi': 4, '3d': False, 'type': "av"},
35: {'ext': ".flv", 'width': 854, 'height': 480, 'qi': 6, '3d': False, 'type': "av"},
# mp4
83: {'ext': ".mp4", 'width': 400, 'height': 240, 'qi': 1, '3d': True, 'type': "av"},
18: {'ext': ".mp4", 'width': 480, 'height': 360, 'qi': 2, '3d': False, 'type': "av"},
82: {'ext': ".mp4", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
22: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "av"},
136: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': False, 'type': "v"},
84: {'ext': ".mp4", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
37: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
137: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
85: {'ext': ".mp4", 'width': 1920, 'height': 1080, 'qi': 9, '3d': True, 'type': "av"},
264: {'ext': ".mp4", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
266: {'ext': ".mp4", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
38: {'ext': ".mp4", 'width': 4096, 'height': 3072, 'qi': 12 , '3d': False, 'type': "av"},
# webm
43: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': False, 'type': "av"},
100: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 3, '3d': True, 'type': "av"},
101: {'ext': ".webm", 'width': 640, 'height': 360, 'qi': 4, '3d': True, 'type': "av"},
44: {'ext': ".webm", 'width': 854, 'height': 480, 'qi': 5, '3d': False, 'type': "av"},
45: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "av"},
247: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 7, '3d': False, 'type': "v"},
102: {'ext': ".webm", 'width': 1280, 'height': 720, 'qi': 8, '3d': True, 'type': "av"},
46: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "av"},
248: {'ext': ".webm", 'width': 1920, 'height': 1080, 'qi': 9, '3d': False, 'type': "v"},
271: {'ext': ".webm", 'width': 2560, 'height': 1440, 'qi': 10, '3d': False, 'type': "v"},
313: {'ext': ".webm", 'width': 3840, 'height': 2160, 'qi': 11, '3d': False, 'type': "v"},
272: {'ext': ".webm", 'width': 7680, 'height': 4320, 'qi': 13, '3d': False, 'type': "v"},
# audio
139: {'ext': ".mp4", 'qi': 1, 'acodec': "aac", 'type': "a"},
140: {'ext': ".mp4", 'qi': 2, 'acodec': "aac", 'type': "a"},
141: {'ext': ".mp4", 'qi': 3, 'acodec': "aac", 'type': "a"},
256: {'ext': ".mp4", 'qi': 4, 'acodec': "aac", 'type': "a"},
258: {'ext': ".mp4", 'qi': 5, 'acodec': "aac", 'type': "a"},
325: {'ext': ".mp4", 'qi': 6, 'acodec': "dts", 'type': "a"},
328: {'ext': ".mp4", 'qi': 7, 'acodec': "ac3", 'type': "a"},
171: {'ext': ".webm", 'qi': 1, 'acodec': "vorbis", 'type': 'a'},
172: {'ext': ".webm", 'qi': 2, 'acodec': "vorbis", 'type': 'a'},
249: {'ext': ".webm", 'qi': 3, 'acodec': "opus", 'type': 'a'},
250: {'ext': ".webm", 'qi': 4, 'acodec': "opus", 'type': 'a'},
251: {'ext': ".webm", 'qi': 5, 'acodec': "opus", 'type': 'a'}
}
def _decrypt_signature(self, encrypted_sig):
"""Turn the encrypted 's' field into a working signature"""
# try:
# player_url = json.loads(re.search(r'"assets":.+?"js":\s*("[^"]+")',self.data).group(1))
# except (AttributeError, IndexError):
# self.fail(_("Player URL not found"))
player_url = self.player_config['assets']['js']
if player_url.startswith("//"):
player_url = 'https:' + player_url
if not player_url.endswith(".js"):
self.fail(_("Unsupported player type %s") % player_url)
cache_info = self.db.retrieve("cache")
cache_dirty = False
if cache_info is None or 'version' not in cache_info or cache_info[
'version'] != self.__version__:
cache_info = {'version': self.__version__,
'cache': {}}
cache_dirty = True
if player_url in cache_info['cache'] and time.time() < cache_info['cache'][player_url]['time'] + 24 * 60 * 60:
self.log_debug("Using cached decode function to decrypt the URL")
decrypt_func = lambda s: ''.join(s[_i] for _i in cache_info['cache'][player_url]['decrypt_map'])
decrypted_sig = decrypt_func(encrypted_sig)
else:
player_data = self.load(self.fixurl(player_url))
m = re.search(r'\.sig\|\|(?P<sig>[a-zA-Z0-9$]+)\(', player_data) or \
re.search(r'(["\'])signature\1\s*,\s*(?P<sig>[a-zA-Z0-9$]+)\(', player_data)
try:
function_name = m.group('sig')
except (AttributeError, IndexError):
self.fail(_("Signature decode function name not found"))
try:
jsi = JSInterpreter(player_data)
decrypt_func = lambda s: jsi.extract_function(function_name)([s])
#: Since Youtube just scrambles the order of the characters in the signature
#: and does not change any byte value, we can store just a transformation map as a cached function
decrypt_map = [ord(c) for c in decrypt_func(''.join(map(unichr, range(len(encrypted_sig)))))]
cache_info['cache'][player_url] = {'decrypt_map': decrypt_map,
'time': time.time()}
cache_dirty = True
decrypted_sig = decrypt_func(encrypted_sig)
except (JSInterpreterError, AssertionError), e:
self.log_error(_("Signature decode failed"), e)
self.fail(e.message)
#: Remove old records from cache
for _k in list(cache_info['cache'].keys()):
if time.time() >= cache_info['cache'][_k]['time'] + 24 * 60 * 60:
cache_info['cache'].pop(_k, None)
cache_dirty = True
if cache_dirty:
self.db.store("cache", cache_info)
return decrypted_sig
def _handle_video(self):
use3d = self.config.get('3d')
if use3d:
quality = {'sd': 82, 'hd': 84, 'fullhd': 85, '240p': 83, '360p': 82, '480p': 82, '720p': 84,
'1080p': 85, '1440p': 85, '2160p': 85, '3072p': 85, '4320p': 85}
else:
quality = {'sd': 18, 'hd': 22, 'fullhd': 37, '240p': 5, '360p': 18, '480p': 35, '720p': 22,
'1080p': 37, '1440p': 264, '2160p': 266, '3072p': 38, '4320p': 272}
desired_fmt = self.config.get('vfmt') or quality.get(self.config.get('quality'), 0)
is_video = lambda x: 'v' in self.formats[x]['type']
if desired_fmt not in self.formats or not is_video(desired_fmt):
self.log_warning(_("VIDEO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 22
#: Build dictionary of supported itags (3D/2D)
allowed_suffix = lambda x: self.config.get(self.formats[x]['ext'])
video_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and allowed_suffix(_s[0]) and
is_video(_s[0]) and self.formats[_s[0]]['3d'] == use3d])
if not video_streams:
self.fail(_("No available video stream meets your preferences"))
self.log_debug("DESIRED VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s) %sfound, %sallowed" %
(desired_fmt, self.formats[desired_fmt]['ext'], self.formats[desired_fmt]['width'],
self.formats[desired_fmt]['height'], self.formats[desired_fmt]['qi'],
self.formats[desired_fmt]['3d'], "" if desired_fmt in video_streams else "NOT ",
"" if allowed_suffix(desired_fmt) else "NOT "))
#: Return fmt nearest to quality index
if desired_fmt in video_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in video_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, video_streams.keys())
self.log_debug("CHOSEN VIDEO STREAM: ITAG:%d (%s %dx%d Q:%d 3D:%s)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['width'],
self.formats[chosen_fmt]['height'], self.formats[chosen_fmt]['qi'],
self.formats[chosen_fmt]['3d']))
url = video_streams[chosen_fmt][0]
if video_streams[chosen_fmt][1]:
if video_streams[chosen_fmt][2]:
signature = self._decrypt_signature(video_streams[chosen_fmt][1])
else:
signature = video_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".flv"
if 'a' not in self.formats[chosen_fmt]['type']:
file_suffix = ".video" + file_suffix
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_audio(self, video_fmt):
desired_fmt = self.config.get('afmt') or 141
is_audio = lambda x: self.formats[x]['type'] == "a"
if desired_fmt not in self.formats or not is_audio(desired_fmt):
self.log_warning(_("AUDIO ITAG %d unknown, using default") % desired_fmt)
desired_fmt = 141
#: Build dictionary of supported audio itags
allowed_codec = lambda x: self.config.get(self.formats[x]['acodec'])
allowed_suffix = lambda x: self.config.get(".mkv") or \
self.config.get(self.formats[x]['ext']) and \
self.formats[x]['ext'] == self.formats[video_fmt]['ext']
audio_streams = dict([(_s[0], _s[1:]) for _s in self.streams
if _s[0] in self.formats and is_audio(_s[0]) and
allowed_codec(_s[0]) and allowed_suffix(_s[0])])
if not audio_streams:
self.fail(_("No available audio stream meets your preferences"))
if desired_fmt in audio_streams and allowed_suffix(desired_fmt):
chosen_fmt = desired_fmt
else:
quality_index = lambda x: self.formats[x]['qi'] #: Select quality index
quality_distance = lambda x, y: abs(quality_index(x) - quality_index(y))
self.log_debug("Choosing nearest stream: %s" % [(_s, allowed_suffix(_s), quality_distance(_s, desired_fmt))
for _s in audio_streams.keys()])
chosen_fmt = reduce(lambda x, y: x if quality_distance(x, desired_fmt) <= quality_distance(y, desired_fmt)
and quality_index(x) > quality_index(y) else y, audio_streams.keys())
self.log_debug("CHOSEN AUDIO STREAM: ITAG:%d (%s %s Q:%d)" %
(chosen_fmt, self.formats[chosen_fmt]['ext'], self.formats[chosen_fmt]['acodec'],
self.formats[chosen_fmt]['qi']))
url = audio_streams[chosen_fmt][0]
if audio_streams[chosen_fmt][1]:
if audio_streams[chosen_fmt][2]:
signature = self._decrypt_signature(audio_streams[chosen_fmt][1])
else:
signature = audio_streams[chosen_fmt][1]
url += "&signature=" + signature
if "&ratebypass=" not in url:
url += "&ratebypass=yes"
file_suffix = ".audio" + self.formats[chosen_fmt]['ext'] if chosen_fmt in self.formats else ".m4a"
self.pyfile.name = self.file_name + file_suffix
try:
filename = self.download(url, disposition=False)
except Skip, e:
filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
self.pyfile.name)
self.log_info(_("Download skipped: %s due to %s") % (self.pyfile.name, e.message))
return filename, chosen_fmt
def _handle_subtitles(self):
def timedtext_to_srt(timedtext):
def _format_srt_time(millisec):
sec, milli = divmod(millisec, 1000)
m, s = divmod(int(sec), 60)
h, m = divmod(m, 60)
return "%02d:%02d:%02d,%s" % (h, m, s, milli)
i = 1
srt = ""
dom = parse_xml(timedtext)
body = dom.getElementsByTagName("body")[0]
paras = body.getElementsByTagName("p")
for para in paras:
srt += str(i) + "\n"
srt += _format_srt_time(int(para.attributes['t'].value)) + ' --> ' + \
_format_srt_time(int(para.attributes['t'].value) + int(para.attributes['d'].value)) + "\n"
for child in para.childNodes:
if child.nodeName == 'br':
srt += "\n"
elif child.nodeName == '#text':
srt += unicode(child.data)
srt += "\n\n"
i += 1
return srt
srt_files =[]
try:
subs = json.loads(self.player_config['args']['player_response'])['captions']['playerCaptionsTracklistRenderer']['captionTracks']
subtitles_urls = dict([(_subtitle['languageCode'],
urllib.unquote(_subtitle['baseUrl']).decode('unicode-escape') + "&fmt=3")
for _subtitle in subs])
self.log_debug("AVAILABLE SUBTITLES: %s" % subtitles_urls.keys() or "None")
except KeyError:
self.log_debug("AVAILABLE SUBTITLES: None")
return srt_files
subs_dl = self.config.get('subs_dl')
if subs_dl != "off":
subs_dl_langs = [_x.strip() for _x in self.config.get('subs_dl_langs', "").split(',') if _x.strip()]
if subs_dl_langs:
# Download only listed subtitles (`subs_dl_langs` config gives the priority)
for _lang in subs_dl_langs:
if _lang in subtitles_urls:
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _lang + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
continue
timed_text = self.load(subtitles_urls[_lang], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
else:
# Download any available subtitle
for _subtitle in subtitles_urls.items():
srt_filename = os.path.join(self.pyload.config.get("general", "download_folder"),
self.pyfile.package().folder,
os.path.splitext(self.file_name)[0] + "." + _subtitle[0] + ".srt")
if self.pyload.config.get('download', 'skip_existing') and \
exists(srt_filename) and os.stat(srt_filename).st_size != 0:
self.log_info("Download skipped: %s due to File exists" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _subtitle[0]))
continue
timed_text = self.load(_subtitle[1], decode=False)
srt = timedtext_to_srt(timed_text)
with open(srt_filename, "w") as f:
f.write(srt.encode('utf-8'))
self.set_permissions(srt_filename)
self.log_debug("Saved subtitle: %s" % os.path.basename(srt_filename))
srt_files.append((srt_filename, _lang))
if subs_dl == "first_available":
break
return srt_files
def _postprocess(self, video_filename, audio_filename, subtitles_files):
final_filename = video_filename
subs_embed = self.config.get("subs_embed")
self.pyfile.setCustomStatus("postprocessing")
self.pyfile.setProgress(0)
if self.ffmpeg.found:
if audio_filename is not None:
video_suffix = os.path.splitext(video_filename)[1]
final_filename = os.path.join(os.path.dirname(video_filename),
self.file_name +
(video_suffix if video_suffix == os.path.splitext(audio_filename)[1]
else ".mkv"))
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.add_stream(('a', audio_filename))
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s',) + subtitle)
self.ffmpeg.set_start_time(self.start_time)
self.ffmpeg.set_output_filename(final_filename)
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(video_filename) + \
os.path.getsize(audio_filename) #: Just an estimate
if self.ffmpeg.run():
self.remove(video_filename, trash=False)
self.remove(audio_filename, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
final_filename = video_filename
elif self.start_time[0] != 0 or self.start_time[1] != 0 or subtitles_files and subs_embed:
inputfile = video_filename + "_"
final_filename = video_filename
os.rename(video_filename, inputfile)
self.ffmpeg.add_stream(('v', video_filename))
self.ffmpeg.set_start_time(self.start_time)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.ffmpeg.add_stream(('s', subtitle))
self.pyfile.name = os.path.basename(final_filename)
self.pyfile.size = os.path.getsize(inputfile) #: Just an estimate
if self.ffmpeg.run():
self.remove(inputfile, trash=False)
if subtitles_files and subs_embed:
for subtitle in subtitles_files:
self.remove(subtitle[0])
else:
self.log_warning(_("ffmpeg error"), self.ffmpeg.error_message)
else:
if audio_filename is not None:
self.log_warning("ffmpeg is not installed, video and audio files will not be merged")
if subtitles_files and self.config.get("subs_embed"):
self.log_warning("ffmpeg is not installed, subtitles files will not be embedded")
self.pyfile.setProgress(100)
self.set_permissions(final_filename)
return final_filename
def setup(self):
self.resume_download = True
self.multiDL = True
try:
self.req.http.close()
except Exception:
pass
self.req.http = BIGHTTPRequest(
cookies=CookieJar(None),
options=self.pyload.requestFactory.getOptions(),
limit=2500000)
def process(self, pyfile):
pyfile.url = replace_patterns(pyfile.url, self.URL_REPLACEMENTS)
self.data = self.load(pyfile.url)
if re.search(r'<div id="player-unavailable" class="\s*player-width player-height\s*(?:player-unavailable\s*)?">',
self.data) or '"playabilityStatus":{"status":"ERROR"' in self.data:
self.offline()
if "We have been receiving a large volume of requests from your network." in self.data:
self.temp_offline()
m = re.search(r'ytplayer.config = ({.+?});', self.data)
if m is None:
self.fail(_("Player config pattern not found"))
self.player_config = json.loads(m.group(1))
self.ffmpeg = Ffmpeg(self.config.get('priority') ,self)
#: Set file name
self.file_name = self.player_config['args']['title']
#: Check for start time
self.start_time = (0, 0)
m = re.search(r't=(?:(\d+)m)?(\d+)s', pyfile.url)
if self.ffmpeg and m:
self.start_time = tuple(map(lambda _x: 0 if _x is None else int(_x), m.groups()))
self.file_name += " (starting at %sm%ss)" % (self.start_time[0], self.start_time[1])
#: Cleaning invalid characters from the file name
self.file_name = self.file_name.encode('ascii', 'replace')
for c in self.invalid_chars:
self.file_name = self.file_name.replace(c, '_')
#: Parse available streams
streams_keys = ['url_encoded_fmt_stream_map']
if 'adaptive_fmts' in self.player_config['args']:
streams_keys.append('adaptive_fmts')
self.streams = []
for streams_key in streams_keys:
streams = self.player_config['args'][streams_key]
streams = [_s.split('&') for _s in streams.split(',')]
streams = [dict((_x.split('=', 1)) for _x in _s) for _s in streams]
streams = [(int(_s['itag']),
urllib.unquote(_s['url']),
_s.get('s', _s.get('sig', None)),
True if 's' in _s else False)
for _s in streams]
self.streams += streams
self.log_debug("AVAILABLE STREAMS: %s" % [_s[0] for _s in self.streams])
video_filename, video_itag = self._handle_video()
has_audio = 'a' in self.formats[video_itag]['type']
if not has_audio:
audio_filename, audio_itag = self._handle_audio(video_itag)
else:
audio_filename = None
subtitles_files = self._handle_subtitles()
final_filename = self._postprocess(video_filename,
audio_filename,
subtitles_files)
#: Everything is finished and final name can be set
pyfile.name = os.path.basename(final_filename)
pyfile.size = os.path.getsize(final_filename)
self.last_download = final_filename
"""Credit to this awesome piece of code below goes to the 'youtube_dl' project, kudos!"""
class JSInterpreterError(Exception):
pass
class JSInterpreter(object):
def __init__(self, code, objects=None):
self._OPERATORS = [
('|', operator.or_),
('^', operator.xor),
('&', operator.and_),
('>>', operator.rshift),
('<<', operator.lshift),
('-', operator.sub),
('+', operator.add),
('%', operator.mod),
('/', operator.truediv),
('*', operator.mul),
]
self._ASSIGN_OPERATORS = [(op + '=', opfunc)
for op, opfunc in self._OPERATORS]
self._ASSIGN_OPERATORS.append(('=', lambda cur, right: right))
self._VARNAME_PATTERN = r'[a-zA-Z_$][a-zA-Z_$0-9]*'
if objects is None:
objects = {}
self.code = code
self._functions = {}
self._objects = objects
def interpret_statement(self, stmt, local_vars, allow_recursion=100):
if allow_recursion < 0:
raise JSInterpreterError('Recursion limit reached')
should_abort = False
stmt = stmt.lstrip()
stmt_m = re.match(r'var\s', stmt)
if stmt_m:
expr = stmt[len(stmt_m.group(0)):]
else:
return_m = re.match(r'return(?:\s+|$)', stmt)
if return_m:
expr = stmt[len(return_m.group(0)):]
should_abort = True
else:
# Try interpreting it as an expression
expr = stmt
v = self.interpret_expression(expr, local_vars, allow_recursion)
return v, should_abort
def interpret_expression(self, expr, local_vars, allow_recursion):
expr = expr.strip()
if expr == '': # Empty expression
return None
if expr.startswith('('):
parens_count = 0
for m in re.finditer(r'[()]', expr):
if m.group(0) == '(':
parens_count += 1
else:
parens_count -= 1
if parens_count == 0:
sub_expr = expr[1:m.start()]
sub_result = self.interpret_expression(sub_expr, local_vars, allow_recursion)
remaining_expr = expr[m.end():].strip()
if not remaining_expr:
return sub_result
else:
expr = json.dumps(sub_result) + remaining_expr
break
else:
raise JSInterpreterError('Premature end of parens in %r' % expr)
for op, opfunc in self._ASSIGN_OPERATORS:
m = re.match(r'(?x)(?P<out>%s)(?:\[(?P<index>[^\]]+?)\])?\s*%s(?P<expr>.*)$' %
(self._VARNAME_PATTERN, re.escape(op)), expr)
if m is None:
continue
right_val = self.interpret_expression(m.group('expr'), local_vars, allow_recursion - 1)
if m.groupdict().get('index'):
lvar = local_vars[m.group('out')]
idx = self.interpret_expression(m.group('index'), local_vars, allow_recursion)
assert isinstance(idx, int)
cur = lvar[idx]
val = opfunc(cur, right_val)
lvar[idx] = val
return val
else:
cur = local_vars.get(m.group('out'))
val = opfunc(cur, right_val)
local_vars[m.group('out')] = val
return val
if expr.isdigit():
return int(expr)
var_m = re.match(r'(?!if|return|true|false)(?P<name>%s)$' % self._VARNAME_PATTERN, expr)
if var_m:
return local_vars[var_m.group('name')]
try:
return json.loads(expr)
except ValueError:
pass
m = re.match(r'(?P<var>%s)\.(?P<member>[^(]+)(?:\(+(?P<args>[^()]*)\))?$' % self._VARNAME_PATTERN, expr)
if m is not None:
variable = m.group('var')
member = m.group('member')
arg_str = m.group('args')
if variable in local_vars:
obj = local_vars[variable]
else:
if variable not in self._objects:
self._objects[variable] = self.extract_object(variable)
obj = self._objects[variable]
if arg_str is None:
# Member access
if member == 'length':
return len(obj)
return obj[member]
assert expr.endswith(')')
# Function call
if arg_str == '':
argvals = tuple()
else:
argvals = tuple(self.interpret_expression(v, local_vars, allow_recursion) for v in arg_str.split(','))
if member == 'split':
assert argvals == ('',)
return list(obj)
if member == 'join':
assert len(argvals) == 1
return argvals[0].join(obj)
if member == 'reverse':
assert len(argvals) == 0
obj.reverse()
return obj
if member == 'slice':
assert len(argvals) == 1
return obj[argvals[0]:]
if member == 'splice':
assert isinstance(obj, list)
index, howMany = argvals
res = []
for i in range(index, min(index + howMany, len(obj))):
res.append(obj.pop(index))
return res
return obj[member](argvals)
m = re.match(r'(?P<in>%s)\[(?P<idx>.+)\]$' % self._VARNAME_PATTERN, expr)
if m is not None:
val = local_vars[m.group('in')]
idx = self.interpret_expression(m.group('idx'), local_vars, allow_recursion - 1)
return val[idx]
for op, opfunc in self._OPERATORS:
m = re.match(r'(?P<x>.+?)%s(?P<y>.+)' % re.escape(op), expr)
if m is None:
continue
x, abort = self.interpret_statement(m.group('x'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature left-side return of %s in %r' % (op, expr))
y, abort = self.interpret_statement(m.group('y'), local_vars, allow_recursion - 1)
if abort:
raise JSInterpreterError('Premature right-side return of %s in %r' % (op, expr))
return opfunc(x, y)
m = re.match(r'^(?P<func>%s)\((?P<args>[a-zA-Z0-9_$,]+)\)$' % self._VARNAME_PATTERN, expr)
if m is not None:
fname = m.group('func')
argvals = tuple(int(v) if v.isdigit() else local_vars[v]
for v in m.group('args').split(','))
if fname not in self._functions:
self._functions[fname] = self.extract_function(fname)
return self._functions[fname](argvals)
raise JSInterpreterError('Unsupported JS expression %r' % expr)
def extract_object(self, objname):
obj = {}
obj_m = re.search(r'(?:var\s+)?%s\s*=\s*\{\s*(?P<fields>([a-zA-Z$0-9]+\s*:\s*function\(.*?\)\s*\{.*?\}(?:,\s*)?)*)\}\s*;'
% re.escape(objname), self.code)
fields = obj_m.group('fields')
# Currently, it only supports function definitions
fields_m = re.finditer(r'(?P<key>[a-zA-Z$0-9]+)\s*:\s*function\((?P<args>[a-z,]+)\){(?P<code>[^}]+)}', fields)
for f in fields_m:
argnames = f.group('args').split(',')
obj[f.group('key')] = self.build_function(argnames, f.group('code'))
return obj
def extract_function(self, function_name):
func_m = re.search(r'(?x)(?:function\s+%s|[{;,]\s*%s\s*=\s*function|var\s+%s\s*=\s*function)\s*\((?P<args>[^)]*)\)\s*\{(?P<code>[^}]+)\}'
% (re.escape(function_name), re.escape(function_name), re.escape(function_name)), self.code)
if func_m is None:
raise JSInterpreterError('Could not find JS function %r' % function_name)
argnames = func_m.group('args').split(',')
return self.build_function(argnames, func_m.group('code'))
def call_function(self, function_name, *args):
f = self.extract_function(function_name)
return f(args)
def build_function(self, argnames, code):
def resf(argvals):
local_vars = dict(zip(argnames, argvals))
for stmt in code.split(';'):
res, abort = self.interpret_statement(stmt, local_vars)
if abort:
break
return res
return resf
|
Arno-Nymous/pyload
|
module/plugins/hoster/YoutubeCom.py
|
Python
|
gpl-3.0
| 42,175
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import django_extensions.db.fields
class Migration(migrations.Migration):
dependencies = [
('talks', '0002_auto_20150808_2108'),
]
operations = [
migrations.AlterField(
model_name='speaker',
name='slug',
field=django_extensions.db.fields.AutoSlugField(verbose_name='Slug', blank=True, populate_from='get_name', editable=False),
),
migrations.AlterField(
model_name='talk',
name='speaker',
field=models.ForeignKey(null=True, blank=True, related_name='talks', to='talks.Speaker'),
),
]
|
jgmize/tulsawebdevs.org
|
talks/migrations/0003_auto_20150816_2148.py
|
Python
|
gpl-3.0
| 722
|
from django.test.testcases import SimpleTestCase
from publicweb.extra_models import (NotificationSettings, OrganizationSettings,
NO_NOTIFICATIONS, FEEDBACK_MAJOR_CHANGES)
from django.contrib.auth.models import User, AnonymousUser
from organizations.models import Organization
from django.db.models.fields.related import OneToOneField
from publicweb.tests.factories import UserFactory, OrganizationFactory
from mock import patch, MagicMock
from django.test.client import RequestFactory
from publicweb.views import UserNotificationSettings
from publicweb.forms import NotificationSettingsForm
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
def create_fake_organization(**kwargs):
return OrganizationFactory.build(**kwargs)
class SettingsTest(SimpleTestCase):
def test_notification_settings_have_user_field(self):
self.assertTrue(hasattr(NotificationSettings, 'user'))
def test_notification_settings_are_linked_to_user(self):
self.assertEqual(NotificationSettings.user.field.rel.to, User)
def test_notification_settings_have_organization_field(self):
self.assertTrue(hasattr(NotificationSettings, 'organization'))
def test_notification_settings_are_linked_to_organization(self):
self.assertEqual(
NotificationSettings.organization.field.rel.to, Organization)
def test_organization_settings_have_organization_field(self):
self.assertTrue(hasattr(OrganizationSettings, 'organization'))
def test_organization_settings_are_linked_to_organization(self):
self.assertEqual(
OrganizationSettings.organization.field.rel.to, Organization)
def test_each_organization_has_only_one_set_of_settings(self):
self.assertIsInstance(
OrganizationSettings.organization.field, OneToOneField)
def test_notification_settings_are_unique_for_an_organization_and_user(self):
self.assertEqual((('user', 'organization'),),
NotificationSettings()._meta.unique_together)
def test_notifitication_settings_default_value_is_main_items_only(self):
the_settings = NotificationSettings()
self.assertEqual(FEEDBACK_MAJOR_CHANGES,
the_settings.notification_level)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
def test_notification_settings_view_uses_a_form(self):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().get('/')
request.user = user
context = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
).context_data
self.assertIn('form', context)
def test_notifcation_settings_view_redirects_to_organization_list(self):
notification_settings_view = UserNotificationSettings()
self.assertEqual(reverse('organization_list'),
notification_settings_view.get_success_url())
def test_user_notification_settings_view_context_contains_organisation(self):
notification_settings_view = UserNotificationSettings()
notification_settings_view.object = MagicMock(spec=NotificationSettings)
notification_settings_view.organization = create_fake_organization(id=2)
context = notification_settings_view.get_context_data()
self.assertIn('organization', context)
self.assertTrue(context['organization'])
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
def test_notification_settings_view_uses_notification_settings_form(self):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().get('/')
request.user = user
context = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
).context_data
self.assertIsInstance(context['form'], NotificationSettingsForm)
def test_notification_settings_view_requires_login(self):
request = RequestFactory().get('/')
user = AnonymousUser()
organization = create_fake_organization(id=2)
request.user = user
response = UserNotificationSettings.as_view()(request,
organization=organization.id)
self.assertIsInstance(response, HttpResponseRedirect)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
root_id=None
)
)
def test_posting_valid_data_saves_settings(self, settings_obj):
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().post(
reverse('notification_settings', args=[organization.slug]),
{'notification_level': unicode(NO_NOTIFICATIONS)}
)
user = UserFactory.build(id=1)
request.user = user
# This patch depends on the UsertNotificationSettings.model patch
# It needs to return the object created by that patch, which is passed
# in as a parameter.
# The only way I've found to handle the dependency is to do this patch
# here
with patch('publicweb.views.UserNotificationSettings.model.objects',
get=lambda organization, user: settings_obj):
UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
)
self.assertTrue(settings_obj.save.called)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
root_id=None
)
)
def test_posting_invalid_data_returns_form_with_errors(self, settings_obj):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().post(
reverse('notification_settings', args=[organization.id]))
request.user = user
# This patch depends on the UsertNotificationSettings.model patch
# It needs to return the object created by that patch, which is passed
# in as a parameter.
# The only way I've found to handle the dependency is to do this patch
# here
with patch('publicweb.views.UserNotificationSettings.model.objects',
get=lambda organization, user: settings_obj):
response = UserNotificationSettings.as_view()(
request,
org_slug=organization.slug
)
self.assertIn('form', response.context_data)
self.assertTrue(response.context_data['form'].errors)
@patch('publicweb.views.Organization.objects',
new=MagicMock(
spec=Organization.objects,
get=create_fake_organization,
filter=create_fake_organization
)
)
@patch('publicweb.views.UserNotificationSettings.model',
return_value=MagicMock(
spec=NotificationSettings,
_meta=MagicMock(fields=[], many_to_many=[]),
root_id=None
)
)
def test_cancel_doesnt_save_settings(self, settings_obj):
user = UserFactory.build(id=1)
organization = create_fake_organization(id=2, slug='test')
request = RequestFactory().post(
reverse('notification_settings', args=[organization.id]),
{
'notification_level': unicode(NO_NOTIFICATIONS),
'submit': "Cancel"
}
)
request.user = user
# This patch depends on the UsertNotificationSettings.model patch
# It needs to return the object created by that patch, which is passed
# in as a parameter.
# The only way I've found to handle the dependency is to do this patch
# here
with patch('publicweb.views.UserNotificationSettings.model.objects',
get=lambda organization, user: settings_obj):
UserNotificationSettings.as_view()(
request, org_slug=organization.slug
)
self.assertFalse(settings_obj.save.called)
|
aptivate/econsensus
|
django/econsensus/publicweb/tests/settings_test.py
|
Python
|
gpl-3.0
| 9,245
|
# Copyright (c) 2018, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
class TestCourseTopic(unittest.TestCase):
pass
|
frappe/erpnext
|
erpnext/education/doctype/course_topic/test_course_topic.py
|
Python
|
gpl-3.0
| 154
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import django_mysql.models
class Migration(migrations.Migration):
dependencies = [
('base', '0008_auto_20160331_1405'),
]
operations = [
migrations.AddField(
model_name='snippet',
name='client_options',
field=django_mysql.models.DynamicField(default=None),
),
]
|
akatsoulas/snippets-service
|
snippets/base/migrations/0009_snippet_client_options.py
|
Python
|
mpl-2.0
| 448
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-09 22:56
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('judge', '0038_profile_problem_count'),
]
operations = [
migrations.RemoveField(
model_name='contest',
name='is_external',
),
]
|
monouno/site
|
judge/migrations/0039_remove_contest_is_external.py
|
Python
|
agpl-3.0
| 399
|
"""Points and related utilities
"""
from ctypes import c_double
from ctypes import cast, POINTER
from shapely.coords import required
from shapely.geos import lgeos, DimensionError
from shapely.geometry.base import BaseGeometry
from shapely.geometry.proxy import CachingGeometryProxy
__all__ = ['Point', 'asPoint']
class Point(BaseGeometry):
"""
A zero dimensional feature
A point has zero length and zero area.
Attributes
----------
x, y, z : float
Coordinate values
Example
-------
>>> p = Point(1.0, -1.0)
>>> print p
POINT (1.0000000000000000 -1.0000000000000000)
>>> p.y
-1.0
>>> p.x
1.0
"""
def __init__(self, *args):
"""
Parameters
----------
There are 2 cases:
1) 1 parameter: this must satisfy the numpy array protocol.
2) 2 or more parameters: x, y, z : float
Easting, northing, and elevation.
"""
BaseGeometry.__init__(self)
if len(args) > 0:
self._set_coords(*args)
# Coordinate getters and setters
@property
def x(self):
"""Return x coordinate."""
return self.coords[0][0]
@property
def y(self):
"""Return y coordinate."""
return self.coords[0][1]
@property
def z(self):
"""Return z coordinate."""
if self._ndim != 3:
raise DimensionError("This point has no z coordinate.")
return self.coords[0][2]
@property
def __geo_interface__(self):
return {
'type': 'Point',
'coordinates': self.coords[0]
}
@property
def ctypes(self):
if not self._ctypes_data:
array_type = c_double * self._ndim
array = array_type()
xy = self.coords[0]
array[0] = xy[0]
array[1] = xy[1]
if self._ndim == 3:
array[2] = xy[2]
self._ctypes_data = array
return self._ctypes_data
def array_interface(self):
"""Provide the Numpy array protocol."""
ai = self.array_interface_base
ai.update({'shape': (self._ndim,)})
return ai
__array_interface__ = property(array_interface)
@property
def bounds(self):
xy = self.coords[0]
return (xy[0], xy[1], xy[0], xy[1])
# Coordinate access
def _set_coords(self, *args):
self.empty()
if len(args) == 1:
self._geom, self._ndim = geos_point_from_py(args[0])
else:
self._geom, self._ndim = geos_point_from_py(tuple(args))
coords = property(BaseGeometry._get_coords, _set_coords)
@property
def xy(self):
"""Separate arrays of X and Y coordinate values
Example:
>>> x, y = Point(0, 0).xy
>>> list(x)
[0.0]
>>> list(y)
[0.0]
"""
return self.coords.xy
class PointAdapter(CachingGeometryProxy, Point):
_owned = False
def __init__(self, context):
self.context = context
self.factory = geos_point_from_py
@property
def _ndim(self):
try:
# From array protocol
array = self.context.__array_interface__
n = array['shape'][0]
assert n == 2 or n == 3
return n
except AttributeError:
# Fall back on list
return len(self.context)
@property
def __array_interface__(self):
"""Provide the Numpy array protocol."""
try:
return self.context.__array_interface__
except AttributeError:
return self.array_interface()
_get_coords = BaseGeometry._get_coords
def _set_coords(self, ob):
raise NotImplementedError("Adapters can not modify their sources")
coords = property(_get_coords)
def asPoint(context):
"""Adapt an object to the Point interface"""
return PointAdapter(context)
def geos_point_from_py(ob, update_geom=None, update_ndim=0):
"""Create a GEOS geom from an object that is a coordinate sequence
or that provides the array interface.
Returns the GEOS geometry and the number of its dimensions.
"""
# If numpy is present, we use numpy.require to ensure that we have a
# C-continguous array that owns its data. View data will be copied.
ob = required(ob)
try:
# From array protocol
array = ob.__array_interface__
assert len(array['shape']) == 1
n = array['shape'][0]
assert n == 2 or n == 3
dz = None
da = array['data']
if type(da) == type((0,)):
cdata = da[0]
# If we had numpy, we would do
# from numpy.ctypeslib import as_ctypes
# cp = as_ctypes(ob) - check that code?
cp = cast(cdata, POINTER(c_double))
dx = c_double(cp[0])
dy = c_double(cp[1])
if n == 3:
dz = c_double(cp[2])
ndim = 3
else:
dx, dy = da[0:2]
if n == 3:
dz = da[2]
ndim = 3
except AttributeError:
# Fall back on the case of Python sequence data
# Accept either (x, y) or [(x, y)]
if type(ob[0]) == type(tuple()):
coords = ob[0]
else:
coords = ob
n = len(coords)
dx = c_double(coords[0])
dy = c_double(coords[1])
dz = None
if n == 3:
dz = c_double(coords[2])
if update_geom:
cs = lgeos.GEOSGeom_getCoordSeq(update_geom)
if n != update_ndim:
raise ValueError(
"Wrong coordinate dimensions; this geometry has dimensions: %d" \
% update_ndim)
else:
cs = lgeos.GEOSCoordSeq_create(1, n)
# Because of a bug in the GEOS C API, always set X before Y
lgeos.GEOSCoordSeq_setX(cs, 0, dx)
lgeos.GEOSCoordSeq_setY(cs, 0, dy)
if n == 3:
lgeos.GEOSCoordSeq_setZ(cs, 0, dz)
if update_geom:
return None
else:
return lgeos.GEOSGeom_createPoint(cs), n
def update_point_from_py(geom, ob):
geos_point_from_py(ob, geom._geom, geom._ndim)
# Test runner
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
|
GbalsaC/bitnamiP
|
venv/lib/python2.7/site-packages/shapely/geometry/point.py
|
Python
|
agpl-3.0
| 6,390
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.2 on 2018-06-01 12:58
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('base', '0276_professional_integration'),
]
operations = [
migrations.AlterField(
model_name='learningunit',
name='end_year',
field=models.IntegerField(blank=True, null=True, verbose_name='end_year_title'),
),
migrations.AlterField(
model_name='learningunit',
name='start_year',
field=models.IntegerField(verbose_name='start_year'),
),
]
|
uclouvain/OSIS-Louvain
|
base/migrations/0277_auto_20180601_1458.py
|
Python
|
agpl-3.0
| 682
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2017, Shoop Commerce Ltd. All rights reserved.
#
# This source code is licensed under the OSL-3.0 license found in the
# LICENSE file in the root directory of this source tree.
from django.conf.urls import url
urlpatterns = [
url('test/(?P<arg>.+)/$', (lambda: 0), name="test"),
]
|
suutari-ai/shoop
|
shuup_tests/notify/notification_test_urls.py
|
Python
|
agpl-3.0
| 364
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2020, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import os
from torchvision import transforms
from torchvision.transforms import RandomResizedCrop
import nupic.research.frameworks.pytorch.dataset_utils.auto_augment as aa
from nupic.research.frameworks.pytorch.dataset_utils import HDF5Dataset
from nupic.research.frameworks.pytorch.datasets.imagenet_factory import (
IMAGENET_NUM_CLASSES as IMAGENET_CLASS_SUBSETS,
)
class ImageNet100(object):
def __init__(self, use_auto_augment=False):
self.use_auto_augment = use_auto_augment
self.train_dataset = None
self.test_dataset = None
def get_train_dataset(self, iteration):
if self.train_dataset is None:
if self.use_auto_augment:
transform = transforms.Compose(
transforms=[
RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
aa.ImageNetPolicy(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
inplace=True
),
],
)
else:
transform = transforms.Compose(
transforms=[
RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
inplace=True
),
],
)
self.train_dataset = HDF5Dataset(
hdf5_file=os.path.expanduser("~/nta/data/imagenet/imagenet.hdf5"),
root="train",
classes=IMAGENET_CLASS_SUBSETS[100],
transform=transform)
return self.train_dataset
def get_test_dataset(self, noise_level=0.0):
assert noise_level == 0.0
if self.test_dataset is None:
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225],
inplace=True
),
]
)
self.test_dataset = HDF5Dataset(
hdf5_file=os.path.expanduser("~/nta/data/imagenet/imagenet.hdf5"),
root="val",
classes=IMAGENET_CLASS_SUBSETS[100],
transform=transform)
return self.test_dataset
|
numenta/nupic.research
|
packages/backprop_structure/src/nupic/research/frameworks/backprop_structure/dataset_managers/imagenet.py
|
Python
|
agpl-3.0
| 3,774
|