id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7,300
|
session.py
|
CouchPotato_CouchPotatoServer/libs/xmpp/session.py
|
##
## XMPP server
##
## Copyright (C) 2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
__version__="$Id"
"""
When your handler is called it is getting the session instance as the first argument.
This is the difference from xmpppy 0.1 where you got the "Client" instance.
With Session class you can have "multi-session" client instead of having
one client for each connection. Is is specifically important when you are
writing the server.
"""
from protocol import *
# Transport-level flags
SOCKET_UNCONNECTED =0
SOCKET_ALIVE =1
SOCKET_DEAD =2
# XML-level flags
STREAM__NOT_OPENED =1
STREAM__OPENED =2
STREAM__CLOSING =3
STREAM__CLOSED =4
# XMPP-session flags
SESSION_NOT_AUTHED =1
SESSION_AUTHED =2
SESSION_BOUND =3
SESSION_OPENED =4
SESSION_CLOSED =5
class Session:
"""
The Session class instance is used for storing all session-related info like
credentials, socket/xml stream/session state flags, roster items (in case of
client type connection) etc.
Session object have no means of discovering is any info is ready to be read.
Instead you should use poll() (recomended) or select() methods for this purpose.
Session can be one of two types: 'server' and 'client'. 'server' session handles
inbound connection and 'client' one used to create an outbound one.
Session instance have multitude of internal attributes. The most imporant is the 'peer' one.
It is set once the peer is authenticated (client).
"""
def __init__(self,socket,owner,xmlns=None,peer=None):
""" When the session is created it's type (client/server) is determined from the beginning.
socket argument is the pre-created socket-like object.
It must have the following methods: send, recv, fileno, close.
owner is the 'master' instance that have Dispatcher plugged into it and generally
will take care about all session events.
xmlns is the stream namespace that will be used. Client must set this argument
If server sets this argument than stream will be dropped if opened with some another namespace.
peer is the name of peer instance. This is the flag that differentiates client session from
server session. Client must set it to the name of the server that will be connected, server must
leave this argument alone.
"""
self.xmlns=xmlns
if peer:
self.TYP='client'
self.peer=peer
self._socket_state=SOCKET_UNCONNECTED
else:
self.TYP='server'
self.peer=None
self._socket_state=SOCKET_ALIVE
self._sock=socket
self._send=socket.send
self._recv=socket.recv
self.fileno=socket.fileno
self._registered=0
self.Dispatcher=owner.Dispatcher
self.DBG_LINE='session'
self.DEBUG=owner.Dispatcher.DEBUG
self._expected={}
self._owner=owner
if self.TYP=='server': self.ID=`random.random()`[2:]
else: self.ID=None
self.sendbuffer=''
self._stream_pos_queued=None
self._stream_pos_sent=0
self.deliver_key_queue=[]
self.deliver_queue_map={}
self.stanza_queue=[]
self._session_state=SESSION_NOT_AUTHED
self.waiting_features=[]
for feature in [NS_TLS,NS_SASL,NS_BIND,NS_SESSION]:
if feature in owner.features: self.waiting_features.append(feature)
self.features=[]
self.feature_in_process=None
self.slave_session=None
self.StartStream()
def StartStream(self):
""" This method is used to initialise the internal xml expat parser
and to send initial stream header (in case of client connection).
Should be used after initial connection and after every stream restart."""
self._stream_state=STREAM__NOT_OPENED
self.Stream=simplexml.NodeBuilder()
self.Stream._dispatch_depth=2
self.Stream.dispatch=self._dispatch
self.Parse=self.Stream.Parse
self.Stream.stream_footer_received=self._stream_close
if self.TYP=='client':
self.Stream.stream_header_received=self._catch_stream_id
self._stream_open()
else:
self.Stream.stream_header_received=self._stream_open
def receive(self):
""" Reads all pending incoming data.
Raises IOError on disconnection.
Blocks until at least one byte is read."""
try: received = self._recv(10240)
except: received = ''
if len(received): # length of 0 means disconnect
self.DEBUG(`self.fileno()`+' '+received,'got')
else:
self.DEBUG('Socket error while receiving data','error')
self.set_socket_state(SOCKET_DEAD)
raise IOError("Peer disconnected")
return received
def sendnow(self,chunk):
""" Put chunk into "immidiatedly send" queue.
Should only be used for auth/TLS stuff and like.
If you just want to shedule regular stanza for delivery use enqueue method.
"""
if isinstance(chunk,Node): chunk = chunk.__str__().encode('utf-8')
elif type(chunk)==type(u''): chunk = chunk.encode('utf-8')
self.enqueue(chunk)
def enqueue(self,stanza):
""" Takes Protocol instance as argument.
Puts stanza into "send" fifo queue. Items into the send queue are hold until
stream authenticated. After that this method is effectively the same as "sendnow" method."""
if isinstance(stanza,Protocol):
self.stanza_queue.append(stanza)
else: self.sendbuffer+=stanza
if self._socket_state>=SOCKET_ALIVE: self.push_queue()
def push_queue(self,failreason=ERR_RECIPIENT_UNAVAILABLE):
""" If stream is authenticated than move items from "send" queue to "immidiatedly send" queue.
Else if the stream is failed then return all queued stanzas with error passed as argument.
Otherwise do nothing."""
# If the stream authed - convert stanza_queue into sendbuffer and set the checkpoints
if self._stream_state>=STREAM__CLOSED or self._socket_state>=SOCKET_DEAD: # the stream failed. Return all stanzas that are still waiting for delivery.
self._owner.deactivatesession(self)
for key in self.deliver_key_queue: # Not sure. May be I
self._dispatch(Error(self.deliver_queue_map[key],failreason),trusted=1) # should simply re-dispatch it?
for stanza in self.stanza_queue: # But such action can invoke
self._dispatch(Error(stanza,failreason),trusted=1) # Infinite loops in case of S2S connection...
self.deliver_queue_map,self.deliver_key_queue,self.stanza_queue={},[],[]
return
elif self._session_state>=SESSION_AUTHED: # FIXME! Должен быть какой-то другой флаг.
#### LOCK_QUEUE
for stanza in self.stanza_queue:
txt=stanza.__str__().encode('utf-8')
self.sendbuffer+=txt
self._stream_pos_queued+=len(txt) # should be re-evaluated for SSL connection.
self.deliver_queue_map[self._stream_pos_queued]=stanza # position of the stream when stanza will be successfully and fully sent
self.deliver_key_queue.append(self._stream_pos_queued)
self.stanza_queue=[]
#### UNLOCK_QUEUE
def flush_queue(self):
""" Put the "immidiatedly send" queue content on the wire. Blocks until at least one byte sent."""
if self.sendbuffer:
try:
# LOCK_QUEUE
sent=self._send(self.sendbuffer) # Блокирующая штучка!
except:
# UNLOCK_QUEUE
self.set_socket_state(SOCKET_DEAD)
self.DEBUG("Socket error while sending data",'error')
return self.terminate_stream()
self.DEBUG(`self.fileno()`+' '+self.sendbuffer[:sent],'sent')
self._stream_pos_sent+=sent
self.sendbuffer=self.sendbuffer[sent:]
self._stream_pos_delivered=self._stream_pos_sent # Should be acquired from socket somehow. Take SSL into account.
while self.deliver_key_queue and self._stream_pos_delivered>self.deliver_key_queue[0]:
del self.deliver_queue_map[self.deliver_key_queue[0]]
self.deliver_key_queue.remove(self.deliver_key_queue[0])
# UNLOCK_QUEUE
def _dispatch(self,stanza,trusted=0):
""" This is callback that is used to pass the received stanza forth to owner's dispatcher
_if_ the stream is authorised. Otherwise the stanza is just dropped.
The 'trusted' argument is used to emulate stanza receive.
This method is used internally.
"""
self._owner.packets+=1
if self._stream_state==STREAM__OPENED or trusted: # if the server really should reject all stanzas after he is closed stream (himeself)?
self.DEBUG(stanza.__str__(),'dispatch')
stanza.trusted=trusted
return self.Dispatcher.dispatch(stanza,self)
def _catch_stream_id(self,ns=None,tag='stream',attrs={}):
""" This callback is used to detect the stream namespace of incoming stream. Used internally. """
if not attrs.has_key('id') or not attrs['id']:
return self.terminate_stream(STREAM_INVALID_XML)
self.ID=attrs['id']
if not attrs.has_key('version'): self._owner.Dialback(self)
def _stream_open(self,ns=None,tag='stream',attrs={}):
""" This callback is used to handle opening stream tag of the incoming stream.
In the case of client session it just make some validation.
Server session also sends server headers and if the stream valid the features node.
Used internally. """
text='<?xml version="1.0" encoding="utf-8"?>\n<stream:stream'
if self.TYP=='client':
text+=' to="%s"'%self.peer
else:
text+=' id="%s"'%self.ID
if not attrs.has_key('to'): text+=' from="%s"'%self._owner.servernames[0]
else: text+=' from="%s"'%attrs['to']
if attrs.has_key('xml:lang'): text+=' xml:lang="%s"'%attrs['xml:lang']
if self.xmlns: xmlns=self.xmlns
else: xmlns=NS_SERVER
text+=' xmlns:db="%s" xmlns:stream="%s" xmlns="%s"'%(NS_DIALBACK,NS_STREAMS,xmlns)
if attrs.has_key('version') or self.TYP=='client': text+=' version="1.0"'
self.sendnow(text+'>')
self.set_stream_state(STREAM__OPENED)
if self.TYP=='client': return
if tag<>'stream': return self.terminate_stream(STREAM_INVALID_XML)
if ns<>NS_STREAMS: return self.terminate_stream(STREAM_INVALID_NAMESPACE)
if self.Stream.xmlns<>self.xmlns: return self.terminate_stream(STREAM_BAD_NAMESPACE_PREFIX)
if not attrs.has_key('to'): return self.terminate_stream(STREAM_IMPROPER_ADDRESSING)
if attrs['to'] not in self._owner.servernames: return self.terminate_stream(STREAM_HOST_UNKNOWN)
self.ourname=attrs['to'].lower()
if self.TYP=='server' and attrs.has_key('version'):
# send features
features=Node('stream:features')
if NS_TLS in self.waiting_features:
features.NT.starttls.setNamespace(NS_TLS)
features.T.starttls.NT.required
if NS_SASL in self.waiting_features:
features.NT.mechanisms.setNamespace(NS_SASL)
for mec in self._owner.SASL.mechanisms:
features.T.mechanisms.NT.mechanism=mec
else:
if NS_BIND in self.waiting_features: features.NT.bind.setNamespace(NS_BIND)
if NS_SESSION in self.waiting_features: features.NT.session.setNamespace(NS_SESSION)
self.sendnow(features)
def feature(self,feature):
""" Declare some stream feature as activated one. """
if feature not in self.features: self.features.append(feature)
self.unfeature(feature)
def unfeature(self,feature):
""" Declare some feature as illegal. Illegal features can not be used.
Example: BIND feature becomes illegal after Non-SASL auth. """
if feature in self.waiting_features: self.waiting_features.remove(feature)
def _stream_close(self,unregister=1):
""" Write the closing stream tag and destroy the underlaying socket. Used internally. """
if self._stream_state>=STREAM__CLOSED: return
self.set_stream_state(STREAM__CLOSING)
self.sendnow('</stream:stream>')
self.set_stream_state(STREAM__CLOSED)
self.push_queue() # decompose queue really since STREAM__CLOSED
self._owner.flush_queues()
if unregister: self._owner.unregistersession(self)
self._destroy_socket()
def terminate_stream(self,error=None,unregister=1):
""" Notify the peer about stream closure.
Ensure that xmlstream is not brokes - i.e. if the stream isn't opened yet -
open it before closure.
If the error condition is specified than create a stream error and send it along with
closing stream tag.
Emulate receiving 'unavailable' type presence just before stream closure.
"""
if self._stream_state>=STREAM__CLOSING: return
if self._stream_state<STREAM__OPENED:
self.set_stream_state(STREAM__CLOSING)
self._stream_open()
else:
self.set_stream_state(STREAM__CLOSING)
p=Presence(typ='unavailable')
p.setNamespace(NS_CLIENT)
self._dispatch(p,trusted=1)
if error:
if isinstance(error,Node): self.sendnow(error)
else: self.sendnow(ErrorNode(error))
self._stream_close(unregister=unregister)
if self.slave_session:
self.slave_session.terminate_stream(STREAM_REMOTE_CONNECTION_FAILED)
def _destroy_socket(self):
""" Break cyclic dependancies to let python's GC free memory right now."""
self.Stream.dispatch=None
self.Stream.stream_footer_received=None
self.Stream.stream_header_received=None
self.Stream.destroy()
self._sock.close()
self.set_socket_state(SOCKET_DEAD)
def start_feature(self,f):
""" Declare some feature as "negotiating now" to prevent other features from start negotiating. """
if self.feature_in_process: raise "Starting feature %s over %s !"%(f,self.feature_in_process)
self.feature_in_process=f
def stop_feature(self,f):
""" Declare some feature as "negotiated" to allow other features start negotiating. """
if self.feature_in_process<>f: raise "Stopping feature %s instead of %s !"%(f,self.feature_in_process)
self.feature_in_process=None
def set_socket_state(self,newstate):
""" Change the underlaying socket state.
Socket starts with SOCKET_UNCONNECTED state
and then proceeds (possibly) to SOCKET_ALIVE
and then to SOCKET_DEAD """
if self._socket_state<newstate: self._socket_state=newstate
def set_session_state(self,newstate):
""" Change the session state.
Session starts with SESSION_NOT_AUTHED state
and then comes through
SESSION_AUTHED, SESSION_BOUND, SESSION_OPENED and SESSION_CLOSED states.
"""
if self._session_state<newstate:
if self._session_state<SESSION_AUTHED and \
newstate>=SESSION_AUTHED: self._stream_pos_queued=self._stream_pos_sent
self._session_state=newstate
def set_stream_state(self,newstate):
""" Change the underlaying XML stream state
Stream starts with STREAM__NOT_OPENED and then proceeds with
STREAM__OPENED, STREAM__CLOSING and STREAM__CLOSED states.
Note that some features (like TLS and SASL)
requires stream re-start so this state can have non-linear changes. """
if self._stream_state<newstate: self._stream_state=newstate
| 16,899
|
Python
|
.py
| 320
| 43.253125
| 158
| 0.646828
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,301
|
commands.py
|
CouchPotato_CouchPotatoServer/libs/xmpp/commands.py
|
## $Id: commands.py,v 1.17 2007/08/28 09:54:15 normanr Exp $
## Ad-Hoc Command manager
## Mike Albon (c) 5th January 2005
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
"""This module is a ad-hoc command processor for xmpppy. It uses the plug-in mechanism like most of the core library. It depends on a DISCO browser manager.
There are 3 classes here, a command processor Commands like the Browser, and a command template plugin Command, and an example command.
To use this module:
Instansiate the module with the parent transport and disco browser manager as parameters.
'Plug in' commands using the command template.
The command feature must be added to existing disco replies where neccessary.
What it supplies:
Automatic command registration with the disco browser manager.
Automatic listing of commands in the public command list.
A means of handling requests, by redirection though the command manager.
"""
from protocol import *
from client import PlugIn
class Commands(PlugIn):
"""Commands is an ancestor of PlugIn and can be attached to any session.
The commands class provides a lookup and browse mechnism. It follows the same priciple of the Browser class, for Service Discovery to provide the list of commands, it adds the 'list' disco type to your existing disco handler function.
How it works:
The commands are added into the existing Browser on the correct nodes. When the command list is built the supplied discovery handler function needs to have a 'list' option in type. This then gets enumerated, all results returned as None are ignored.
The command executed is then called using it's Execute method. All session management is handled by the command itself.
"""
def __init__(self, browser):
"""Initialises class and sets up local variables"""
PlugIn.__init__(self)
DBG_LINE='commands'
self._exported_methods=[]
self._handlers={'':{}}
self._browser = browser
def plugin(self, owner):
"""Makes handlers within the session"""
# Plug into the session and the disco manager
# We only need get and set, results are not needed by a service provider, only a service user.
owner.RegisterHandler('iq',self._CommandHandler,typ='set',ns=NS_COMMANDS)
owner.RegisterHandler('iq',self._CommandHandler,typ='get',ns=NS_COMMANDS)
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid='')
def plugout(self):
"""Removes handlers from the session"""
# unPlug from the session and the disco manager
self._owner.UnregisterHandler('iq',self._CommandHandler,ns=NS_COMMANDS)
for jid in self._handlers:
self._browser.delDiscoHandler(self._DiscoHandler,node=NS_COMMANDS)
def _CommandHandler(self,conn,request):
"""The internal method to process the routing of command execution requests"""
# This is the command handler itself.
# We must:
# Pass on command execution to command handler
# (Do we need to keep session details here, or can that be done in the command?)
jid = str(request.getTo())
try:
node = request.getTagAttr('command','node')
except:
conn.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
if self._handlers.has_key(jid):
if self._handlers[jid].has_key(node):
self._handlers[jid][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif self._handlers[''].has_key(node):
self._handlers[''][node]['execute'](conn,request)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
def _DiscoHandler(self,conn,request,typ):
"""The internal method to process service discovery requests"""
# This is the disco manager handler.
if typ == 'items':
# We must:
# Generate a list of commands and return the list
# * This handler does not handle individual commands disco requests.
# Pseudo:
# Enumerate the 'item' disco of each command for the specified jid
# Build responce and send
# To make this code easy to write we add an 'list' disco type, it returns a tuple or 'none' if not advertised
list = []
items = []
jid = str(request.getTo())
# Get specific jid based results
if self._handlers.has_key(jid):
for each in self._handlers[jid].keys():
items.append((jid,each))
else:
# Get generic results
for each in self._handlers[''].keys():
items.append(('',each))
if items != []:
for each in items:
i = self._handlers[each[0]][each[1]]['disco'](conn,request,'list')
if i != None:
list.append(Node(tag='item',attrs={'jid':i[0],'node':i[1],'name':i[2]}))
iq = request.buildReply('result')
if request.getQuerynode(): iq.setQuerynode(request.getQuerynode())
iq.setQueryPayload(list)
conn.send(iq)
else:
conn.send(Error(request,ERR_ITEM_NOT_FOUND))
raise NodeProcessed
elif typ == 'info':
return {'ids':[{'category':'automation','type':'command-list'}],'features':[]}
def addCommand(self,name,cmddisco,cmdexecute,jid=''):
"""The method to call if adding a new command to the session, the requred parameters of cmddisco and cmdexecute are the methods to enable that command to be executed"""
# This command takes a command object and the name of the command for registration
# We must:
# Add item into disco
# Add item into command list
if not self._handlers.has_key(jid):
self._handlers[jid]={}
self._browser.setDiscoHandler(self._DiscoHandler,node=NS_COMMANDS,jid=jid)
if self._handlers[jid].has_key(name):
raise NameError,'Command Exists'
else:
self._handlers[jid][name]={'disco':cmddisco,'execute':cmdexecute}
# Need to add disco stuff here
self._browser.setDiscoHandler(cmddisco,node=name,jid=jid)
def delCommand(self,name,jid=''):
"""Removed command from the session"""
# This command takes a command object and the name used for registration
# We must:
# Remove item from disco
# Remove item from command list
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
if not self._handlers[jid].has_key(name):
raise NameError, 'Command not found'
else:
#Do disco removal here
command = self.getCommand(name,jid)['disco']
del self._handlers[jid][name]
self._browser.delDiscoHandler(command,node=name,jid=jid)
def getCommand(self,name,jid=''):
"""Returns the command tuple"""
# This gets the command object with name
# We must:
# Return item that matches this name
if not self._handlers.has_key(jid):
raise NameError,'Jid not found'
elif not self._handlers[jid].has_key(name):
raise NameError,'Command not found'
else:
return self._handlers[jid][name]
class Command_Handler_Prototype(PlugIn):
"""This is a prototype command handler, as each command uses a disco method
and execute method you can implement it any way you like, however this is
my first attempt at making a generic handler that you can hang process
stages on too. There is an example command below.
The parameters are as follows:
name : the name of the command within the jabber environment
description : the natural language description
discofeatures : the features supported by the command
initial : the initial command in the from of {'execute':commandname}
All stages set the 'actions' dictionary for each session to represent the possible options available.
"""
name = 'examplecommand'
count = 0
description = 'an example command'
discofeatures = [NS_COMMANDS,NS_DATA]
# This is the command template
def __init__(self,jid=''):
"""Set up the class"""
PlugIn.__init__(self)
DBG_LINE='command'
self.sessioncount = 0
self.sessions = {}
# Disco information for command list pre-formatted as a tuple
self.discoinfo = {'ids':[{'category':'automation','type':'command-node','name':self.description}],'features': self.discofeatures}
self._jid = jid
def plugin(self,owner):
"""Plug command into the commands class"""
# The owner in this instance is the Command Processor
self._commands = owner
self._owner = owner._owner
self._commands.addCommand(self.name,self._DiscoHandler,self.Execute,jid=self._jid)
def plugout(self):
"""Remove command from the commands class"""
self._commands.delCommand(self.name,self._jid)
def getSessionID(self):
"""Returns an id for the command session"""
self.count = self.count+1
return 'cmd-%s-%d'%(self.name,self.count)
def Execute(self,conn,request):
"""The method that handles all the commands, and routes them to the correct method for that stage."""
# New request or old?
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
try:
action = request.getTagAttr('command','action')
except:
action = None
if action == None: action = 'execute'
# Check session is in session list
if self.sessions.has_key(session):
if self.sessions[session]['jid']==request.getFrom():
# Check action is vaild
if self.sessions[session]['actions'].has_key(action):
# Execute next action
self.sessions[session]['actions'][action](conn,request)
else:
# Stage not presented as an option
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# Jid and session don't match. Go away imposter
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
elif session != None:
# Not on this sessionid you won't.
self._owner.send(Error(request,ERR_BAD_REQUEST))
raise NodeProcessed
else:
# New session
self.initial[action](conn,request)
def _DiscoHandler(self,conn,request,type):
"""The handler for discovery events"""
if type == 'list':
return (request.getTo(),self.name,self.description)
elif type == 'items':
return []
elif type == 'info':
return self.discoinfo
class TestCommand(Command_Handler_Prototype):
""" Example class. You should read source if you wish to understate how it works.
Generally, it presents a "master" that giudes user through to calculate something.
"""
name = 'testcommand'
description = 'a noddy example command'
def __init__(self,jid=''):
""" Init internal constants. """
Command_Handler_Prototype.__init__(self,jid)
self.initial = {'execute':self.cmdFirstStage}
def cmdFirstStage(self,conn,request):
""" Determine """
# This is the only place this should be repeated as all other stages should have SessionIDs
try:
session = request.getTagAttr('command','sessionid')
except:
session = None
if session == None:
session = self.getSessionID()
self.sessions[session]={'jid':request.getFrom(),'actions':{'cancel':self.cmdCancel,'next':self.cmdSecondStage,'execute':self.cmdSecondStage},'data':{'type':None}}
# As this is the first stage we only send a form
reply = request.buildReply('result')
form = DataForm(title='Select type of operation',data=['Use the combobox to select the type of calculation you would like to do, then click Next',DataField(name='calctype',desc='Calculation Type',value=self.sessions[session]['data']['type'],options=[['circlediameter','Calculate the Diameter of a circle'],['circlearea','Calculate the area of a circle']],typ='list-single',required=1)])
replypayload = [Node('actions',attrs={'execute':'next'},payload=[Node('next')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':session,'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdSecondStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
self.sessions[request.getTagAttr('command','sessionid')]['data']['type']=form.getField('calctype').getValue()
self.sessions[request.getTagAttr('command','sessionid')]['actions']={'cancel':self.cmdCancel,None:self.cmdThirdStage,'previous':self.cmdFirstStage,'execute':self.cmdThirdStage,'next':self.cmdThirdStage}
# The form generation is split out to another method as it may be called by cmdThirdStage
self.cmdSecondStageReply(conn,request)
def cmdSecondStageReply(self,conn,request):
reply = request.buildReply('result')
form = DataForm(title = 'Enter the radius', data=['Enter the radius of the circle (numbers only)',DataField(desc='Radius',name='radius',typ='text-single')])
replypayload = [Node('actions',attrs={'execute':'complete'},payload=[Node('complete'),Node('prev')]),form]
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'executing'},payload=replypayload)
self._owner.send(reply)
raise NodeProcessed
def cmdThirdStage(self,conn,request):
form = DataForm(node = request.getTag(name='command').getTag(name='x',namespace=NS_DATA))
try:
num = float(form.getField('radius').getValue())
except:
self.cmdSecondStageReply(conn,request)
from math import pi
if self.sessions[request.getTagAttr('command','sessionid')]['data']['type'] == 'circlearea':
result = (num**2)*pi
else:
result = num*2*pi
reply = request.buildReply('result')
form = DataForm(typ='result',data=[DataField(desc='result',name='result',value=result)])
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'completed'},payload=[form])
self._owner.send(reply)
raise NodeProcessed
def cmdCancel(self,conn,request):
reply = request.buildReply('result')
reply.addChild(name='command',namespace=NS_COMMANDS,attrs={'node':request.getTagAttr('command','node'),'sessionid':request.getTagAttr('command','sessionid'),'status':'cancelled'})
self._owner.send(reply)
del self.sessions[request.getTagAttr('command','sessionid')]
| 16,116
|
Python
|
.py
| 294
| 45.054422
| 394
| 0.648743
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,302
|
filetransfer.py
|
CouchPotato_CouchPotatoServer/libs/xmpp/filetransfer.py
|
## filetransfer.py
##
## Copyright (C) 2004 Alexey "Snake" Nezhdanov
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2, or (at your option)
## any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
# $Id: filetransfer.py,v 1.6 2004/12/25 20:06:59 snakeru Exp $
"""
This module contains IBB class that is the simple implementation of JEP-0047.
Note that this is just a transport for data. You have to negotiate data transfer before
(via StreamInitiation most probably). Unfortunately SI is not implemented yet.
"""
from protocol import *
from dispatcher import PlugIn
import base64
class IBB(PlugIn):
""" IBB used to transfer small-sized data chunk over estabilished xmpp connection.
Data is split into small blocks (by default 3000 bytes each), encoded as base 64
and sent to another entity that compiles these blocks back into the data chunk.
This is very inefficiend but should work under any circumstances. Note that
using IBB normally should be the last resort.
"""
def __init__(self):
""" Initialise internal variables. """
PlugIn.__init__(self)
self.DBG_LINE='ibb'
self._exported_methods=[self.OpenStream]
self._streams={}
self._ampnode=Node(NS_AMP+' amp',payload=[Node('rule',{'condition':'deliver-at','value':'stored','action':'error'}),Node('rule',{'condition':'match-resource','value':'exact','action':'error'})])
def plugin(self,owner):
""" Register handlers for receiving incoming datastreams. Used internally. """
self._owner.RegisterHandlerOnce('iq',self.StreamOpenReplyHandler) # Move to StreamOpen and specify stanza id
self._owner.RegisterHandler('iq',self.IqHandler,ns=NS_IBB)
self._owner.RegisterHandler('message',self.ReceiveHandler,ns=NS_IBB)
def IqHandler(self,conn,stanza):
""" Handles streams state change. Used internally. """
typ=stanza.getType()
self.DEBUG('IqHandler called typ->%s'%typ,'info')
if typ=='set' and stanza.getTag('open',namespace=NS_IBB): self.StreamOpenHandler(conn,stanza)
elif typ=='set' and stanza.getTag('close',namespace=NS_IBB): self.StreamCloseHandler(conn,stanza)
elif typ=='result': self.StreamCommitHandler(conn,stanza)
elif typ=='error': self.StreamOpenReplyHandler(conn,stanza)
else: conn.send(Error(stanza,ERR_BAD_REQUEST))
raise NodeProcessed
def StreamOpenHandler(self,conn,stanza):
""" Handles opening of new incoming stream. Used internally. """
"""
<iq type='set'
from='romeo@montague.net/orchard'
to='juliet@capulet.com/balcony'
id='inband_1'>
<open sid='mySID'
block-size='4096'
xmlns='http://jabber.org/protocol/ibb'/>
</iq>
"""
err=None
sid,blocksize=stanza.getTagAttr('open','sid'),stanza.getTagAttr('open','block-size')
self.DEBUG('StreamOpenHandler called sid->%s blocksize->%s'%(sid,blocksize),'info')
try: blocksize=int(blocksize)
except: err=ERR_BAD_REQUEST
if not sid or not blocksize: err=ERR_BAD_REQUEST
elif sid in self._streams.keys(): err=ERR_UNEXPECTED_REQUEST
if err: rep=Error(stanza,err)
else:
self.DEBUG("Opening stream: id %s, block-size %s"%(sid,blocksize),'info')
rep=Protocol('iq',stanza.getFrom(),'result',stanza.getTo(),{'id':stanza.getID()})
self._streams[sid]={'direction':'<'+str(stanza.getFrom()),'block-size':blocksize,'fp':open('/tmp/xmpp_file_'+sid,'w'),'seq':0,'syn_id':stanza.getID()}
conn.send(rep)
def OpenStream(self,sid,to,fp,blocksize=3000):
""" Start new stream. You should provide stream id 'sid', the endpoind jid 'to',
the file object containing info for send 'fp'. Also the desired blocksize can be specified.
Take into account that recommended stanza size is 4k and IBB uses base64 encoding
that increases size of data by 1/3."""
if sid in self._streams.keys(): return
if not JID(to).getResource(): return
self._streams[sid]={'direction':'|>'+to,'block-size':blocksize,'fp':fp,'seq':0}
self._owner.RegisterCycleHandler(self.SendHandler)
syn=Protocol('iq',to,'set',payload=[Node(NS_IBB+' open',{'sid':sid,'block-size':blocksize})])
self._owner.send(syn)
self._streams[sid]['syn_id']=syn.getID()
return self._streams[sid]
def SendHandler(self,conn):
""" Send next portion of data if it is time to do it. Used internally. """
self.DEBUG('SendHandler called','info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['direction'][:2]=='|>': cont=1
elif stream['direction'][0]=='>':
chunk=stream['fp'].read(stream['block-size'])
if chunk:
datanode=Node(NS_IBB+' data',{'sid':sid,'seq':stream['seq']},base64.encodestring(chunk))
stream['seq']+=1
if stream['seq']==65536: stream['seq']=0
conn.send(Protocol('message',stream['direction'][1:],payload=[datanode,self._ampnode]))
else:
""" notify the other side about stream closing
notify the local user about sucessfull send
delete the local stream"""
conn.send(Protocol('iq',stream['direction'][1:],'set',payload=[Node(NS_IBB+' close',{'sid':sid})]))
conn.Event(self.DBG_LINE,'SUCCESSFULL SEND',stream)
del self._streams[sid]
self._owner.UnregisterCycleHandler(self.SendHandler)
"""
<message from='romeo@montague.net/orchard' to='juliet@capulet.com/balcony' id='msg1'>
<data xmlns='http://jabber.org/protocol/ibb' sid='mySID' seq='0'>
qANQR1DBwU4DX7jmYZnncmUQB/9KuKBddzQH+tZ1ZywKK0yHKnq57kWq+RFtQdCJ
WpdWpR0uQsuJe7+vh3NWn59/gTc5MDlX8dS9p0ovStmNcyLhxVgmqS8ZKhsblVeu
IpQ0JgavABqibJolc3BKrVtVV1igKiX/N7Pi8RtY1K18toaMDhdEfhBRzO/XB0+P
AQhYlRjNacGcslkhXqNjK5Va4tuOAPy2n1Q8UUrHbUd0g+xJ9Bm0G0LZXyvCWyKH
kuNEHFQiLuCY6Iv0myq6iX6tjuHehZlFSh80b5BVV9tNLwNR5Eqz1klxMhoghJOA
</data>
<amp xmlns='http://jabber.org/protocol/amp'>
<rule condition='deliver-at' value='stored' action='error'/>
<rule condition='match-resource' value='exact' action='error'/>
</amp>
</message>
"""
def ReceiveHandler(self,conn,stanza):
""" Receive next portion of incoming datastream and store it write
it to temporary file. Used internally.
"""
sid,seq,data=stanza.getTagAttr('data','sid'),stanza.getTagAttr('data','seq'),stanza.getTagData('data')
self.DEBUG('ReceiveHandler called sid->%s seq->%s'%(sid,seq),'info')
try: seq=int(seq); data=base64.decodestring(data)
except: seq=''; data=''
err=None
if not sid in self._streams.keys(): err=ERR_ITEM_NOT_FOUND
else:
stream=self._streams[sid]
if not data: err=ERR_BAD_REQUEST
elif seq<>stream['seq']: err=ERR_UNEXPECTED_REQUEST
else:
self.DEBUG('Successfull receive sid->%s %s+%s bytes'%(sid,stream['fp'].tell(),len(data)),'ok')
stream['seq']+=1
stream['fp'].write(data)
if err:
self.DEBUG('Error on receive: %s'%err,'error')
conn.send(Error(Iq(to=stanza.getFrom(),frm=stanza.getTo(),payload=[Node(NS_IBB+' close')]),err,reply=0))
def StreamCloseHandler(self,conn,stanza):
""" Handle stream closure due to all data transmitted.
Raise xmpppy event specifying successfull data receive. """
sid=stanza.getTagAttr('close','sid')
self.DEBUG('StreamCloseHandler called sid->%s'%sid,'info')
if sid in self._streams.keys():
conn.send(stanza.buildReply('result'))
conn.Event(self.DBG_LINE,'SUCCESSFULL RECEIVE',self._streams[sid])
del self._streams[sid]
else: conn.send(Error(stanza,ERR_ITEM_NOT_FOUND))
def StreamBrokenHandler(self,conn,stanza):
""" Handle stream closure due to all some error while receiving data.
Raise xmpppy event specifying unsuccessfull data receive. """
syn_id=stanza.getID()
self.DEBUG('StreamBrokenHandler called syn_id->%s'%syn_id,'info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['syn_id']==syn_id:
if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream)
else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream)
del self._streams[sid]
def StreamOpenReplyHandler(self,conn,stanza):
""" Handle remote side reply about is it agree or not to receive our datastream.
Used internally. Raises xmpppy event specfiying if the data transfer
is agreed upon."""
syn_id=stanza.getID()
self.DEBUG('StreamOpenReplyHandler called syn_id->%s'%syn_id,'info')
for sid in self._streams.keys():
stream=self._streams[sid]
if stream['syn_id']==syn_id:
if stanza.getType()=='error':
if stream['direction'][0]=='<': conn.Event(self.DBG_LINE,'ERROR ON RECEIVE',stream)
else: conn.Event(self.DBG_LINE,'ERROR ON SEND',stream)
del self._streams[sid]
elif stanza.getType()=='result':
if stream['direction'][0]=='|':
stream['direction']=stream['direction'][1:]
conn.Event(self.DBG_LINE,'STREAM COMMITTED',stream)
else: conn.send(Error(stanza,ERR_UNEXPECTED_REQUEST))
| 10,157
|
Python
|
.py
| 185
| 45.318919
| 202
| 0.639888
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,303
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/tus/__init__.py
|
import os
import base64
import logging
import argparse
import requests
LOG_LEVEL = logging.INFO
DEFAULT_CHUNK_SIZE = 4 * 1024 * 1024
TUS_VERSION = '1.0.0'
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
logger.addHandler(logging.NullHandler())
class TusError(Exception):
pass
def _init():
fmt = "[%(asctime)s] %(levelname)s %(message)s"
h = logging.StreamHandler()
h.setLevel(LOG_LEVEL)
h.setFormatter(logging.Formatter(fmt))
logger.addHandler(h)
def _create_parser():
parser = argparse.ArgumentParser()
parser.add_argument('file', type=argparse.FileType('rb'))
parser.add_argument('--chunk-size', type=int, default=DEFAULT_CHUNK_SIZE)
parser.add_argument(
'--header',
action='append',
help="A single key/value pair"
" to be sent with all requests as HTTP header."
" Can be specified multiple times to send more then one header."
" Key and value must be separated with \":\".")
return parser
def _cmd_upload():
_init()
parser = _create_parser()
parser.add_argument('tus_endpoint')
parser.add_argument('--file_name')
parser.add_argument(
'--metadata',
action='append',
help="A single key/value pair to be sent in Upload-Metadata header."
" Can be specified multiple times to send more than one pair."
" Key and value must be separated with space.")
args = parser.parse_args()
headers = dict([x.split(':') for x in args.header])
metadata = dict([x.split(' ') for x in args.metadata])
upload(
args.file,
args.tus_endpoint,
chunk_size=args.chunk_size,
file_name=args.file_name,
headers=headers,
metadata=metadata)
def _cmd_resume():
_init()
parser = _create_parser()
parser.add_argument('file_endpoint')
args = parser.parse_args()
headers = dict([x.split(':') for x in args.header])
resume(
args.file,
args.file_endpoint,
chunk_size=args.chunk_size,
headers=headers)
def upload(file_obj,
tus_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
file_name=None,
headers=None,
metadata=None):
file_name = os.path.basename(file_obj.name)
file_size = _get_file_size(file_obj)
location = _create_file(
tus_endpoint,
file_name,
file_size,
extra_headers=headers,
metadata=metadata)
resume(
file_obj, location, chunk_size=chunk_size, headers=headers, offset=0)
def _get_file_size(f):
pos = f.tell()
f.seek(0, 2)
size = f.tell()
f.seek(pos)
return size
def _create_file(tus_endpoint,
file_name,
file_size,
extra_headers=None,
metadata=None):
logger.info("Creating file endpoint")
headers = {
"Tus-Resumable": TUS_VERSION,
"Upload-Length": str(file_size),
}
if extra_headers:
headers.update(extra_headers)
if metadata:
l = [k + ' ' + base64.b64encode(v) for k, v in metadata.items()]
headers["Upload-Metadata"] = ','.join(l)
response = requests.post(tus_endpoint, headers=headers)
if response.status_code != 201:
raise TusError("Create failed: %s" % response)
location = response.headers["Location"]
logger.info("Created: %s", location)
return location
def resume(file_obj,
file_endpoint,
chunk_size=DEFAULT_CHUNK_SIZE,
headers=None,
offset=None):
if offset is None:
offset = _get_offset(file_endpoint, extra_headers=headers)
total_sent = 0
file_size = _get_file_size(file_obj)
while offset < file_size:
file_obj.seek(offset)
data = file_obj.read(chunk_size)
offset = _upload_chunk(
data, offset, file_endpoint, extra_headers=headers)
total_sent += len(data)
logger.info("Total bytes sent: %i", total_sent)
def _get_offset(file_endpoint, extra_headers=None):
logger.info("Getting offset")
headers = {"Tus-Resumable": TUS_VERSION}
if extra_headers:
headers.update(extra_headers)
response = requests.head(file_endpoint, headers=headers)
response.raise_for_status()
offset = int(response.headers["Upload-Offset"])
logger.info("offset=%i", offset)
return offset
def _upload_chunk(data, offset, file_endpoint, extra_headers=None):
logger.info("Uploading chunk from offset: %i", offset)
headers = {
'Content-Type': 'application/offset+octet-stream',
'Upload-Offset': str(offset),
'Tus-Resumable': TUS_VERSION,
}
if extra_headers:
headers.update(extra_headers)
response = requests.patch(file_endpoint, headers=headers, data=data)
if response.status_code != 204:
raise TusError("Upload chunk failed: %s" % response)
return int(response.headers["Upload-Offset"])
| 4,988
|
Python
|
.py
| 145
| 27.606897
| 77
| 0.640058
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,304
|
element.py
|
CouchPotato_CouchPotatoServer/libs/bs4/element.py
|
import collections
import re
import sys
import warnings
from bs4.dammit import EntitySubstitution
DEFAULT_OUTPUT_ENCODING = "utf-8"
PY3K = (sys.version_info[0] > 2)
whitespace_re = re.compile("\s+")
def _alias(attr):
"""Alias one attribute name to another for backward compatibility"""
@property
def alias(self):
return getattr(self, attr)
@alias.setter
def alias(self):
return setattr(self, attr)
return alias
class NamespacedAttribute(unicode):
def __new__(cls, prefix, name, namespace=None):
if name is None:
obj = unicode.__new__(cls, prefix)
elif prefix is None:
# Not really namespaced.
obj = unicode.__new__(cls, name)
else:
obj = unicode.__new__(cls, prefix + ":" + name)
obj.prefix = prefix
obj.name = name
obj.namespace = namespace
return obj
class AttributeValueWithCharsetSubstitution(unicode):
"""A stand-in object for a character encoding specified in HTML."""
class CharsetMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'charset' attribute.
When Beautiful Soup parses the markup '<meta charset="utf8">', the
value of the 'charset' attribute will be one of these objects.
"""
def __new__(cls, original_value):
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
return encoding
class ContentMetaAttributeValue(AttributeValueWithCharsetSubstitution):
"""A generic stand-in for the value of a meta tag's 'content' attribute.
When Beautiful Soup parses the markup:
<meta http-equiv="content-type" content="text/html; charset=utf8">
The value of the 'content' attribute will be one of these objects.
"""
CHARSET_RE = re.compile("((^|;)\s*charset=)([^;]*)", re.M)
def __new__(cls, original_value):
match = cls.CHARSET_RE.search(original_value)
if match is None:
# No substitution necessary.
return unicode.__new__(unicode, original_value)
obj = unicode.__new__(cls, original_value)
obj.original_value = original_value
return obj
def encode(self, encoding):
def rewrite(match):
return match.group(1) + encoding
return self.CHARSET_RE.sub(rewrite, self.original_value)
class HTMLAwareEntitySubstitution(EntitySubstitution):
"""Entity substitution rules that are aware of some HTML quirks.
Specifically, the contents of <script> and <style> tags should not
undergo entity substitution.
Incoming NavigableString objects are checked to see if they're the
direct children of a <script> or <style> tag.
"""
cdata_containing_tags = set(["script", "style"])
preformatted_tags = set(["pre"])
@classmethod
def _substitute_if_appropriate(cls, ns, f):
if (isinstance(ns, NavigableString)
and ns.parent is not None
and ns.parent.name in cls.cdata_containing_tags):
# Do nothing.
return ns
# Substitute.
return f(ns)
@classmethod
def substitute_html(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_html)
@classmethod
def substitute_xml(cls, ns):
return cls._substitute_if_appropriate(
ns, EntitySubstitution.substitute_xml)
class PageElement(object):
"""Contains the navigational information for some part of the page
(either a tag or a piece of text)"""
# There are five possible values for the "formatter" argument passed in
# to methods like encode() and prettify():
#
# "html" - All Unicode characters with corresponding HTML entities
# are converted to those entities on output.
# "minimal" - Bare ampersands and angle brackets are converted to
# XML entities: & < >
# None - The null formatter. Unicode characters are never
# converted to entities. This is not recommended, but it's
# faster than "minimal".
# A function - This function will be called on every string that
# needs to undergo entity substitution.
#
# In an HTML document, the default "html" and "minimal" functions
# will leave the contents of <script> and <style> tags alone. For
# an XML document, all tags will be given the same treatment.
HTML_FORMATTERS = {
"html" : HTMLAwareEntitySubstitution.substitute_html,
"minimal" : HTMLAwareEntitySubstitution.substitute_xml,
None : None
}
XML_FORMATTERS = {
"html" : EntitySubstitution.substitute_html,
"minimal" : EntitySubstitution.substitute_xml,
None : None
}
def format_string(self, s, formatter='minimal'):
"""Format the given string using the given formatter."""
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
if formatter is None:
output = s
else:
output = formatter(s)
return output
@property
def _is_xml(self):
"""Is this element part of an XML tree or an HTML tree?
This is used when mapping a formatter name ("minimal") to an
appropriate function (one that performs entity-substitution on
the contents of <script> and <style> tags, or not). It's
inefficient, but it should be called very rarely.
"""
if self.parent is None:
# This is the top-level object. It should have .is_xml set
# from tree creation. If not, take a guess--BS is usually
# used on HTML markup.
return getattr(self, 'is_xml', False)
return self.parent._is_xml
def _formatter_for_name(self, name):
"Look up a formatter function based on its name and the tree."
if self._is_xml:
return self.XML_FORMATTERS.get(
name, EntitySubstitution.substitute_xml)
else:
return self.HTML_FORMATTERS.get(
name, HTMLAwareEntitySubstitution.substitute_xml)
def setup(self, parent=None, previous_element=None):
"""Sets up the initial relations between this element and
other elements."""
self.parent = parent
self.previous_element = previous_element
if previous_element is not None:
self.previous_element.next_element = self
self.next_element = None
self.previous_sibling = None
self.next_sibling = None
if self.parent is not None and self.parent.contents:
self.previous_sibling = self.parent.contents[-1]
self.previous_sibling.next_sibling = self
nextSibling = _alias("next_sibling") # BS3
previousSibling = _alias("previous_sibling") # BS3
def replace_with(self, replace_with):
if replace_with is self:
return
if replace_with is self.parent:
raise ValueError("Cannot replace a Tag with its parent.")
old_parent = self.parent
my_index = self.parent.index(self)
self.extract()
old_parent.insert(my_index, replace_with)
return self
replaceWith = replace_with # BS3
def unwrap(self):
my_parent = self.parent
my_index = self.parent.index(self)
self.extract()
for child in reversed(self.contents[:]):
my_parent.insert(my_index, child)
return self
replace_with_children = unwrap
replaceWithChildren = unwrap # BS3
def wrap(self, wrap_inside):
me = self.replace_with(wrap_inside)
wrap_inside.append(me)
return wrap_inside
def extract(self):
"""Destructively rips this element out of the tree."""
if self.parent is not None:
del self.parent.contents[self.parent.index(self)]
#Find the two elements that would be next to each other if
#this element (and any children) hadn't been parsed. Connect
#the two.
last_child = self._last_descendant()
next_element = last_child.next_element
if self.previous_element is not None:
self.previous_element.next_element = next_element
if next_element is not None:
next_element.previous_element = self.previous_element
self.previous_element = None
last_child.next_element = None
self.parent = None
if self.previous_sibling is not None:
self.previous_sibling.next_sibling = self.next_sibling
if self.next_sibling is not None:
self.next_sibling.previous_sibling = self.previous_sibling
self.previous_sibling = self.next_sibling = None
return self
def _last_descendant(self, is_initialized=True, accept_self=True):
"Finds the last element beneath this object to be parsed."
if is_initialized and self.next_sibling:
last_child = self.next_sibling.previous_element
else:
last_child = self
while isinstance(last_child, Tag) and last_child.contents:
last_child = last_child.contents[-1]
if not accept_self and last_child == self:
last_child = None
return last_child
# BS3: Not part of the API!
_lastRecursiveChild = _last_descendant
def insert(self, position, new_child):
if new_child is self:
raise ValueError("Cannot insert a tag into itself.")
if (isinstance(new_child, basestring)
and not isinstance(new_child, NavigableString)):
new_child = NavigableString(new_child)
position = min(position, len(self.contents))
if hasattr(new_child, 'parent') and new_child.parent is not None:
# We're 'inserting' an element that's already one
# of this object's children.
if new_child.parent is self:
current_index = self.index(new_child)
if current_index < position:
# We're moving this element further down the list
# of this object's children. That means that when
# we extract this element, our target index will
# jump down one.
position -= 1
new_child.extract()
new_child.parent = self
previous_child = None
if position == 0:
new_child.previous_sibling = None
new_child.previous_element = self
else:
previous_child = self.contents[position - 1]
new_child.previous_sibling = previous_child
new_child.previous_sibling.next_sibling = new_child
new_child.previous_element = previous_child._last_descendant(False)
if new_child.previous_element is not None:
new_child.previous_element.next_element = new_child
new_childs_last_element = new_child._last_descendant(False)
if position >= len(self.contents):
new_child.next_sibling = None
parent = self
parents_next_sibling = None
while parents_next_sibling is None and parent is not None:
parents_next_sibling = parent.next_sibling
parent = parent.parent
if parents_next_sibling is not None:
# We found the element that comes next in the document.
break
if parents_next_sibling is not None:
new_childs_last_element.next_element = parents_next_sibling
else:
# The last element of this tag is the last element in
# the document.
new_childs_last_element.next_element = None
else:
next_child = self.contents[position]
new_child.next_sibling = next_child
if new_child.next_sibling is not None:
new_child.next_sibling.previous_sibling = new_child
new_childs_last_element.next_element = next_child
if new_childs_last_element.next_element is not None:
new_childs_last_element.next_element.previous_element = new_childs_last_element
self.contents.insert(position, new_child)
def append(self, tag):
"""Appends the given tag to the contents of this tag."""
self.insert(len(self.contents), tag)
def insert_before(self, predecessor):
"""Makes the given element the immediate predecessor of this one.
The two elements will have the same parent, and the given element
will be immediately before this one.
"""
if self is predecessor:
raise ValueError("Can't insert an element before itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'before' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(predecessor, PageElement):
predecessor.extract()
index = parent.index(self)
parent.insert(index, predecessor)
def insert_after(self, successor):
"""Makes the given element the immediate successor of this one.
The two elements will have the same parent, and the given element
will be immediately after this one.
"""
if self is successor:
raise ValueError("Can't insert an element after itself.")
parent = self.parent
if parent is None:
raise ValueError(
"Element has no parent, so 'after' has no meaning.")
# Extract first so that the index won't be screwed up if they
# are siblings.
if isinstance(successor, PageElement):
successor.extract()
index = parent.index(self)
parent.insert(index+1, successor)
def find_next(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears after this Tag in the document."""
return self._find_one(self.find_all_next, name, attrs, text, **kwargs)
findNext = find_next # BS3
def find_all_next(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
after this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.next_elements,
**kwargs)
findAllNext = find_all_next # BS3
def find_next_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears after this Tag in the document."""
return self._find_one(self.find_next_siblings, name, attrs, text,
**kwargs)
findNextSibling = find_next_sibling # BS3
def find_next_siblings(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear after this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.next_siblings, **kwargs)
findNextSiblings = find_next_siblings # BS3
fetchNextSiblings = find_next_siblings # BS2
def find_previous(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the first item that matches the given criteria and
appears before this Tag in the document."""
return self._find_one(
self.find_all_previous, name, attrs, text, **kwargs)
findPrevious = find_previous # BS3
def find_all_previous(self, name=None, attrs={}, text=None, limit=None,
**kwargs):
"""Returns all items that match the given criteria and appear
before this Tag in the document."""
return self._find_all(name, attrs, text, limit, self.previous_elements,
**kwargs)
findAllPrevious = find_all_previous # BS3
fetchPrevious = find_all_previous # BS2
def find_previous_sibling(self, name=None, attrs={}, text=None, **kwargs):
"""Returns the closest sibling to this Tag that matches the
given criteria and appears before this Tag in the document."""
return self._find_one(self.find_previous_siblings, name, attrs, text,
**kwargs)
findPreviousSibling = find_previous_sibling # BS3
def find_previous_siblings(self, name=None, attrs={}, text=None,
limit=None, **kwargs):
"""Returns the siblings of this Tag that match the given
criteria and appear before this Tag in the document."""
return self._find_all(name, attrs, text, limit,
self.previous_siblings, **kwargs)
findPreviousSiblings = find_previous_siblings # BS3
fetchPreviousSiblings = find_previous_siblings # BS2
def find_parent(self, name=None, attrs={}, **kwargs):
"""Returns the closest parent of this Tag that matches the given
criteria."""
# NOTE: We can't use _find_one because findParents takes a different
# set of arguments.
r = None
l = self.find_parents(name, attrs, 1, **kwargs)
if l:
r = l[0]
return r
findParent = find_parent # BS3
def find_parents(self, name=None, attrs={}, limit=None, **kwargs):
"""Returns the parents of this Tag that match the given
criteria."""
return self._find_all(name, attrs, None, limit, self.parents,
**kwargs)
findParents = find_parents # BS3
fetchParents = find_parents # BS2
@property
def next(self):
return self.next_element
@property
def previous(self):
return self.previous_element
#These methods do the real heavy lifting.
def _find_one(self, method, name, attrs, text, **kwargs):
r = None
l = method(name, attrs, text, 1, **kwargs)
if l:
r = l[0]
return r
def _find_all(self, name, attrs, text, limit, generator, **kwargs):
"Iterates over a generator looking for things that match."
if isinstance(name, SoupStrainer):
strainer = name
else:
strainer = SoupStrainer(name, attrs, text, **kwargs)
if text is None and not limit and not attrs and not kwargs:
if name is True or name is None:
# Optimization to find all tags.
result = (element for element in generator
if isinstance(element, Tag))
return ResultSet(strainer, result)
elif isinstance(name, basestring):
# Optimization to find all tags with a given name.
result = (element for element in generator
if isinstance(element, Tag)
and element.name == name)
return ResultSet(strainer, result)
results = ResultSet(strainer)
while True:
try:
i = next(generator)
except StopIteration:
break
if i:
found = strainer.search(i)
if found:
results.append(found)
if limit and len(results) >= limit:
break
return results
#These generators can be used to navigate starting from both
#NavigableStrings and Tags.
@property
def next_elements(self):
i = self.next_element
while i is not None:
yield i
i = i.next_element
@property
def next_siblings(self):
i = self.next_sibling
while i is not None:
yield i
i = i.next_sibling
@property
def previous_elements(self):
i = self.previous_element
while i is not None:
yield i
i = i.previous_element
@property
def previous_siblings(self):
i = self.previous_sibling
while i is not None:
yield i
i = i.previous_sibling
@property
def parents(self):
i = self.parent
while i is not None:
yield i
i = i.parent
# Methods for supporting CSS selectors.
tag_name_re = re.compile('^[a-z0-9]+$')
# /^(\w+)\[(\w+)([=~\|\^\$\*]?)=?"?([^\]"]*)"?\]$/
# \---/ \---/\-------------/ \-------/
# | | | |
# | | | The value
# | | ~,|,^,$,* or =
# | Attribute
# Tag
attribselect_re = re.compile(
r'^(?P<tag>\w+)?\[(?P<attribute>\w+)(?P<operator>[=~\|\^\$\*]?)' +
r'=?"?(?P<value>[^\]"]*)"?\]$'
)
def _attr_value_as_string(self, value, default=None):
"""Force an attribute value into a string representation.
A multi-valued attribute will be converted into a
space-separated stirng.
"""
value = self.get(value, default)
if isinstance(value, list) or isinstance(value, tuple):
value =" ".join(value)
return value
def _tag_name_matches_and(self, function, tag_name):
if not tag_name:
return function
else:
def _match(tag):
return tag.name == tag_name and function(tag)
return _match
def _attribute_checker(self, operator, attribute, value=''):
"""Create a function that performs a CSS selector operation.
Takes an operator, attribute and optional value. Returns a
function that will return True for elements that match that
combination.
"""
if operator == '=':
# string representation of `attribute` is equal to `value`
return lambda el: el._attr_value_as_string(attribute) == value
elif operator == '~':
# space-separated list representation of `attribute`
# contains `value`
def _includes_value(element):
attribute_value = element.get(attribute, [])
if not isinstance(attribute_value, list):
attribute_value = attribute_value.split()
return value in attribute_value
return _includes_value
elif operator == '^':
# string representation of `attribute` starts with `value`
return lambda el: el._attr_value_as_string(
attribute, '').startswith(value)
elif operator == '$':
# string represenation of `attribute` ends with `value`
return lambda el: el._attr_value_as_string(
attribute, '').endswith(value)
elif operator == '*':
# string representation of `attribute` contains `value`
return lambda el: value in el._attr_value_as_string(attribute, '')
elif operator == '|':
# string representation of `attribute` is either exactly
# `value` or starts with `value` and then a dash.
def _is_or_starts_with_dash(element):
attribute_value = element._attr_value_as_string(attribute, '')
return (attribute_value == value or attribute_value.startswith(
value + '-'))
return _is_or_starts_with_dash
else:
return lambda el: el.has_attr(attribute)
# Old non-property versions of the generators, for backwards
# compatibility with BS3.
def nextGenerator(self):
return self.next_elements
def nextSiblingGenerator(self):
return self.next_siblings
def previousGenerator(self):
return self.previous_elements
def previousSiblingGenerator(self):
return self.previous_siblings
def parentGenerator(self):
return self.parents
class NavigableString(unicode, PageElement):
PREFIX = ''
SUFFIX = ''
def __new__(cls, value):
"""Create a new NavigableString.
When unpickling a NavigableString, this method is called with
the string in DEFAULT_OUTPUT_ENCODING. That encoding needs to be
passed in to the superclass's __new__ or the superclass won't know
how to handle non-ASCII characters.
"""
if isinstance(value, unicode):
return unicode.__new__(cls, value)
return unicode.__new__(cls, value, DEFAULT_OUTPUT_ENCODING)
def __copy__(self):
return self
def __getnewargs__(self):
return (unicode(self),)
def __getattr__(self, attr):
"""text.string gives you text. This is for backwards
compatibility for Navigable*String, but for CData* it lets you
get the string without the CData wrapper."""
if attr == 'string':
return self
else:
raise AttributeError(
"'%s' object has no attribute '%s'" % (
self.__class__.__name__, attr))
def output_ready(self, formatter="minimal"):
output = self.format_string(self, formatter)
return self.PREFIX + output + self.SUFFIX
@property
def name(self):
return None
@name.setter
def name(self, name):
raise AttributeError("A NavigableString cannot be given a name.")
class PreformattedString(NavigableString):
"""A NavigableString not subject to the normal formatting rules.
The string will be passed into the formatter (to trigger side effects),
but the return value will be ignored.
"""
def output_ready(self, formatter="minimal"):
"""CData strings are passed into the formatter.
But the return value is ignored."""
self.format_string(self, formatter)
return self.PREFIX + self + self.SUFFIX
class CData(PreformattedString):
PREFIX = u'<![CDATA['
SUFFIX = u']]>'
class ProcessingInstruction(PreformattedString):
PREFIX = u'<?'
SUFFIX = u'?>'
class Comment(PreformattedString):
PREFIX = u'<!--'
SUFFIX = u'-->'
class Declaration(PreformattedString):
PREFIX = u'<!'
SUFFIX = u'!>'
class Doctype(PreformattedString):
@classmethod
def for_name_and_ids(cls, name, pub_id, system_id):
value = name or ''
if pub_id is not None:
value += ' PUBLIC "%s"' % pub_id
if system_id is not None:
value += ' "%s"' % system_id
elif system_id is not None:
value += ' SYSTEM "%s"' % system_id
return Doctype(value)
PREFIX = u'<!DOCTYPE '
SUFFIX = u'>\n'
class Tag(PageElement):
"""Represents a found HTML tag with its attributes and contents."""
def __init__(self, parser=None, builder=None, name=None, namespace=None,
prefix=None, attrs=None, parent=None, previous=None):
"Basic constructor."
if parser is None:
self.parser_class = None
else:
# We don't actually store the parser object: that lets extracted
# chunks be garbage-collected.
self.parser_class = parser.__class__
if name is None:
raise ValueError("No value provided for new tag's name.")
self.name = name
self.namespace = namespace
self.prefix = prefix
if attrs is None:
attrs = {}
elif attrs and builder.cdata_list_attributes:
attrs = builder._replace_cdata_list_attribute_values(
self.name, attrs)
else:
attrs = dict(attrs)
self.attrs = attrs
self.contents = []
self.setup(parent, previous)
self.hidden = False
# Set up any substitutions, such as the charset in a META tag.
if builder is not None:
builder.set_up_substitutions(self)
self.can_be_empty_element = builder.can_be_empty_element(name)
else:
self.can_be_empty_element = False
parserClass = _alias("parser_class") # BS3
@property
def is_empty_element(self):
"""Is this tag an empty-element tag? (aka a self-closing tag)
A tag that has contents is never an empty-element tag.
A tag that has no contents may or may not be an empty-element
tag. It depends on the builder used to create the tag. If the
builder has a designated list of empty-element tags, then only
a tag whose name shows up in that list is considered an
empty-element tag.
If the builder has no designated list of empty-element tags,
then any tag with no contents is an empty-element tag.
"""
return len(self.contents) == 0 and self.can_be_empty_element
isSelfClosing = is_empty_element # BS3
@property
def string(self):
"""Convenience property to get the single string within this tag.
:Return: If this tag has a single string child, return value
is that string. If this tag has no children, or more than one
child, return value is None. If this tag has one child tag,
return value is the 'string' attribute of the child tag,
recursively.
"""
if len(self.contents) != 1:
return None
child = self.contents[0]
if isinstance(child, NavigableString):
return child
return child.string
@string.setter
def string(self, string):
self.clear()
self.append(string.__class__(string))
def _all_strings(self, strip=False, types=(NavigableString, CData)):
"""Yield all strings of certain classes, possibly stripping them.
By default, yields only NavigableString and CData objects. So
no comments, processing instructions, etc.
"""
for descendant in self.descendants:
if (
(types is None and not isinstance(descendant, NavigableString))
or
(types is not None and type(descendant) not in types)):
continue
if strip:
descendant = descendant.strip()
if len(descendant) == 0:
continue
yield descendant
strings = property(_all_strings)
@property
def stripped_strings(self):
for string in self._all_strings(True):
yield string
def get_text(self, separator=u"", strip=False,
types=(NavigableString, CData)):
"""
Get all child strings, concatenated using the given separator.
"""
return separator.join([s for s in self._all_strings(
strip, types=types)])
getText = get_text
text = property(get_text)
def decompose(self):
"""Recursively destroys the contents of this tree."""
self.extract()
i = self
while i is not None:
next = i.next_element
i.__dict__.clear()
i.contents = []
i = next
def clear(self, decompose=False):
"""
Extract all children. If decompose is True, decompose instead.
"""
if decompose:
for element in self.contents[:]:
if isinstance(element, Tag):
element.decompose()
else:
element.extract()
else:
for element in self.contents[:]:
element.extract()
def index(self, element):
"""
Find the index of a child by identity, not value. Avoids issues with
tag.contents.index(element) getting the index of equal elements.
"""
for i, child in enumerate(self.contents):
if child is element:
return i
raise ValueError("Tag.index: element not in tag")
def get(self, key, default=None):
"""Returns the value of the 'key' attribute for the tag, or
the value given for 'default' if it doesn't have that
attribute."""
return self.attrs.get(key, default)
def has_attr(self, key):
return key in self.attrs
def __hash__(self):
return str(self).__hash__()
def __getitem__(self, key):
"""tag[key] returns the value of the 'key' attribute for the tag,
and throws an exception if it's not there."""
return self.attrs[key]
def __iter__(self):
"Iterating over a tag iterates over its contents."
return iter(self.contents)
def __len__(self):
"The length of a tag is the length of its list of contents."
return len(self.contents)
def __contains__(self, x):
return x in self.contents
def __nonzero__(self):
"A tag is non-None even if it has no contents."
return True
def __setitem__(self, key, value):
"""Setting tag[key] sets the value of the 'key' attribute for the
tag."""
self.attrs[key] = value
def __delitem__(self, key):
"Deleting tag[key] deletes all 'key' attributes for the tag."
self.attrs.pop(key, None)
def __call__(self, *args, **kwargs):
"""Calling a tag like a function is the same as calling its
find_all() method. Eg. tag('a') returns a list of all the A tags
found within this tag."""
return self.find_all(*args, **kwargs)
def __getattr__(self, tag):
#print "Getattr %s.%s" % (self.__class__, tag)
if len(tag) > 3 and tag.endswith('Tag'):
# BS3: soup.aTag -> "soup.find("a")
tag_name = tag[:-3]
warnings.warn(
'.%sTag is deprecated, use .find("%s") instead.' % (
tag_name, tag_name))
return self.find(tag_name)
# We special case contents to avoid recursion.
elif not tag.startswith("__") and not tag=="contents":
return self.find(tag)
raise AttributeError(
"'%s' object has no attribute '%s'" % (self.__class__, tag))
def __eq__(self, other):
"""Returns true iff this tag has the same name, the same attributes,
and the same contents (recursively) as the given tag."""
if self is other:
return True
if (not hasattr(other, 'name') or
not hasattr(other, 'attrs') or
not hasattr(other, 'contents') or
self.name != other.name or
self.attrs != other.attrs or
len(self) != len(other)):
return False
for i, my_child in enumerate(self.contents):
if my_child != other.contents[i]:
return False
return True
def __ne__(self, other):
"""Returns true iff this tag is not identical to the other tag,
as defined in __eq__."""
return not self == other
def __repr__(self, encoding=DEFAULT_OUTPUT_ENCODING):
"""Renders this tag as a string."""
return self.encode(encoding)
def __unicode__(self):
return self.decode()
def __str__(self):
return self.encode()
if PY3K:
__str__ = __repr__ = __unicode__
def encode(self, encoding=DEFAULT_OUTPUT_ENCODING,
indent_level=None, formatter="minimal",
errors="xmlcharrefreplace"):
# Turn the data structure into Unicode, then encode the
# Unicode.
u = self.decode(indent_level, encoding, formatter)
return u.encode(encoding, errors)
def _should_pretty_print(self, indent_level):
"""Should this tag be pretty-printed?"""
return (
indent_level is not None and
(self.name not in HTMLAwareEntitySubstitution.preformatted_tags
or self._is_xml))
def decode(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a Unicode representation of this tag and its contents.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
attrs = []
if self.attrs:
for key, val in sorted(self.attrs.items()):
if val is None:
decoded = key
else:
if isinstance(val, list) or isinstance(val, tuple):
val = ' '.join(val)
elif not isinstance(val, basestring):
val = unicode(val)
elif (
isinstance(val, AttributeValueWithCharsetSubstitution)
and eventual_encoding is not None):
val = val.encode(eventual_encoding)
text = self.format_string(val, formatter)
decoded = (
unicode(key) + '='
+ EntitySubstitution.quoted_attribute_value(text))
attrs.append(decoded)
close = ''
closeTag = ''
prefix = ''
if self.prefix:
prefix = self.prefix + ":"
if self.is_empty_element:
close = '/'
else:
closeTag = '</%s%s>' % (prefix, self.name)
pretty_print = self._should_pretty_print(indent_level)
space = ''
indent_space = ''
if indent_level is not None:
indent_space = (' ' * (indent_level - 1))
if pretty_print:
space = indent_space
indent_contents = indent_level + 1
else:
indent_contents = None
contents = self.decode_contents(
indent_contents, eventual_encoding, formatter)
if self.hidden:
# This is the 'document root' object.
s = contents
else:
s = []
attribute_string = ''
if attrs:
attribute_string = ' ' + ' '.join(attrs)
if indent_level is not None:
# Even if this particular tag is not pretty-printed,
# we should indent up to the start of the tag.
s.append(indent_space)
s.append('<%s%s%s%s>' % (
prefix, self.name, attribute_string, close))
if pretty_print:
s.append("\n")
s.append(contents)
if pretty_print and contents and contents[-1] != "\n":
s.append("\n")
if pretty_print and closeTag:
s.append(space)
s.append(closeTag)
if indent_level is not None and closeTag and self.next_sibling:
# Even if this particular tag is not pretty-printed,
# we're now done with the tag, and we should add a
# newline if appropriate.
s.append("\n")
s = ''.join(s)
return s
def prettify(self, encoding=None, formatter="minimal"):
if encoding is None:
return self.decode(True, formatter=formatter)
else:
return self.encode(encoding, True, formatter=formatter)
def decode_contents(self, indent_level=None,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a Unicode string.
:param eventual_encoding: The tag is destined to be
encoded into this encoding. This method is _not_
responsible for performing that encoding. This information
is passed in so that it can be substituted in if the
document contains a <META> tag that mentions the document's
encoding.
"""
# First off, turn a string formatter into a function. This
# will stop the lookup from happening over and over again.
if not callable(formatter):
formatter = self._formatter_for_name(formatter)
pretty_print = (indent_level is not None)
s = []
for c in self:
text = None
if isinstance(c, NavigableString):
text = c.output_ready(formatter)
elif isinstance(c, Tag):
s.append(c.decode(indent_level, eventual_encoding,
formatter))
if text and indent_level and not self.name == 'pre':
text = text.strip()
if text:
if pretty_print and not self.name == 'pre':
s.append(" " * (indent_level - 1))
s.append(text)
if pretty_print and not self.name == 'pre':
s.append("\n")
return ''.join(s)
def encode_contents(
self, indent_level=None, encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Renders the contents of this tag as a bytestring."""
contents = self.decode_contents(indent_level, encoding, formatter)
return contents.encode(encoding)
# Old method for BS3 compatibility
def renderContents(self, encoding=DEFAULT_OUTPUT_ENCODING,
prettyPrint=False, indentLevel=0):
if not prettyPrint:
indentLevel = None
return self.encode_contents(
indent_level=indentLevel, encoding=encoding)
#Soup methods
def find(self, name=None, attrs={}, recursive=True, text=None,
**kwargs):
"""Return only the first child of this Tag matching the given
criteria."""
r = None
l = self.find_all(name, attrs, recursive, text, 1, **kwargs)
if l:
r = l[0]
return r
findChild = find
def find_all(self, name=None, attrs={}, recursive=True, text=None,
limit=None, **kwargs):
"""Extracts a list of Tag objects that match the given
criteria. You can specify the name of the Tag and any
attributes you want the Tag to have.
The value of a key-value pair in the 'attrs' map can be a
string, a list of strings, a regular expression object, or a
callable that takes a string and returns whether or not the
string matches for some custom definition of 'matches'. The
same is true of the tag name."""
generator = self.descendants
if not recursive:
generator = self.children
return self._find_all(name, attrs, text, limit, generator, **kwargs)
findAll = find_all # BS3
findChildren = find_all # BS2
#Generator methods
@property
def children(self):
# return iter() to make the purpose of the method clear
return iter(self.contents) # XXX This seems to be untested.
@property
def descendants(self):
if not len(self.contents):
return
stopNode = self._last_descendant().next_element
current = self.contents[0]
while current is not stopNode:
yield current
current = current.next_element
# CSS selector code
_selector_combinators = ['>', '+', '~']
_select_debug = False
def select(self, selector, _candidate_generator=None):
"""Perform a CSS selection operation on the current element."""
tokens = selector.split()
current_context = [self]
if tokens[-1] in self._selector_combinators:
raise ValueError(
'Final combinator "%s" is missing an argument.' % tokens[-1])
if self._select_debug:
print 'Running CSS selector "%s"' % selector
for index, token in enumerate(tokens):
if self._select_debug:
print ' Considering token "%s"' % token
recursive_candidate_generator = None
tag_name = None
if tokens[index-1] in self._selector_combinators:
# This token was consumed by the previous combinator. Skip it.
if self._select_debug:
print ' Token was consumed by the previous combinator.'
continue
# Each operation corresponds to a checker function, a rule
# for determining whether a candidate matches the
# selector. Candidates are generated by the active
# iterator.
checker = None
m = self.attribselect_re.match(token)
if m is not None:
# Attribute selector
tag_name, attribute, operator, value = m.groups()
checker = self._attribute_checker(operator, attribute, value)
elif '#' in token:
# ID selector
tag_name, tag_id = token.split('#', 1)
def id_matches(tag):
return tag.get('id', None) == tag_id
checker = id_matches
elif '.' in token:
# Class selector
tag_name, klass = token.split('.', 1)
classes = set(klass.split('.'))
def classes_match(candidate):
return classes.issubset(candidate.get('class', []))
checker = classes_match
elif ':' in token:
# Pseudo-class
tag_name, pseudo = token.split(':', 1)
if tag_name == '':
raise ValueError(
"A pseudo-class must be prefixed with a tag name.")
pseudo_attributes = re.match('([a-zA-Z\d-]+)\(([a-zA-Z\d]+)\)', pseudo)
found = []
if pseudo_attributes is not None:
pseudo_type, pseudo_value = pseudo_attributes.groups()
if pseudo_type == 'nth-of-type':
try:
pseudo_value = int(pseudo_value)
except:
raise NotImplementedError(
'Only numeric values are currently supported for the nth-of-type pseudo-class.')
if pseudo_value < 1:
raise ValueError(
'nth-of-type pseudo-class value must be at least 1.')
class Counter(object):
def __init__(self, destination):
self.count = 0
self.destination = destination
def nth_child_of_type(self, tag):
self.count += 1
if self.count == self.destination:
return True
if self.count > self.destination:
# Stop the generator that's sending us
# these things.
raise StopIteration()
return False
checker = Counter(pseudo_value).nth_child_of_type
else:
raise NotImplementedError(
'Only the following pseudo-classes are implemented: nth-of-type.')
elif token == '*':
# Star selector -- matches everything
pass
elif token == '>':
# Run the next token as a CSS selector against the
# direct children of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.children
elif token == '~':
# Run the next token as a CSS selector against the
# siblings of each tag in the current context.
recursive_candidate_generator = lambda tag: tag.next_siblings
elif token == '+':
# For each tag in the current context, run the next
# token as a CSS selector against the tag's next
# sibling that's a tag.
def next_tag_sibling(tag):
yield tag.find_next_sibling(True)
recursive_candidate_generator = next_tag_sibling
elif self.tag_name_re.match(token):
# Just a tag name.
tag_name = token
else:
raise ValueError(
'Unsupported or invalid CSS selector: "%s"' % token)
if recursive_candidate_generator:
# This happens when the selector looks like "> foo".
#
# The generator calls select() recursively on every
# member of the current context, passing in a different
# candidate generator and a different selector.
#
# In the case of "> foo", the candidate generator is
# one that yields a tag's direct children (">"), and
# the selector is "foo".
next_token = tokens[index+1]
def recursive_select(tag):
if self._select_debug:
print ' Calling select("%s") recursively on %s %s' % (next_token, tag.name, tag.attrs)
print '-' * 40
for i in tag.select(next_token, recursive_candidate_generator):
if self._select_debug:
print '(Recursive select picked up candidate %s %s)' % (i.name, i.attrs)
yield i
if self._select_debug:
print '-' * 40
_use_candidate_generator = recursive_select
elif _candidate_generator is None:
# By default, a tag's candidates are all of its
# children. If tag_name is defined, only yield tags
# with that name.
if self._select_debug:
if tag_name:
check = "[any]"
else:
check = tag_name
print ' Default candidate generator, tag name="%s"' % check
if self._select_debug:
# This is redundant with later code, but it stops
# a bunch of bogus tags from cluttering up the
# debug log.
def default_candidate_generator(tag):
for child in tag.descendants:
if not isinstance(child, Tag):
continue
if tag_name and not child.name == tag_name:
continue
yield child
_use_candidate_generator = default_candidate_generator
else:
_use_candidate_generator = lambda tag: tag.descendants
else:
_use_candidate_generator = _candidate_generator
new_context = []
new_context_ids = set([])
for tag in current_context:
if self._select_debug:
print " Running candidate generator on %s %s" % (
tag.name, repr(tag.attrs))
for candidate in _use_candidate_generator(tag):
if not isinstance(candidate, Tag):
continue
if tag_name and candidate.name != tag_name:
continue
if checker is not None:
try:
result = checker(candidate)
except StopIteration:
# The checker has decided we should no longer
# run the generator.
break
if checker is None or result:
if self._select_debug:
print " SUCCESS %s %s" % (candidate.name, repr(candidate.attrs))
if id(candidate) not in new_context_ids:
# If a tag matches a selector more than once,
# don't include it in the context more than once.
new_context.append(candidate)
new_context_ids.add(id(candidate))
elif self._select_debug:
print " FAILURE %s %s" % (candidate.name, repr(candidate.attrs))
current_context = new_context
if self._select_debug:
print "Final verdict:"
for i in current_context:
print " %s %s" % (i.name, i.attrs)
return current_context
# Old names for backwards compatibility
def childGenerator(self):
return self.children
def recursiveChildGenerator(self):
return self.descendants
def has_key(self, key):
"""This was kind of misleading because has_key() (attributes)
was different from __in__ (contents). has_key() is gone in
Python 3, anyway."""
warnings.warn('has_key is deprecated. Use has_attr("%s") instead.' % (
key))
return self.has_attr(key)
# Next, a couple classes to represent queries and their results.
class SoupStrainer(object):
"""Encapsulates a number of ways of matching a markup element (tag or
text)."""
def __init__(self, name=None, attrs={}, text=None, **kwargs):
self.name = self._normalize_search_value(name)
if not isinstance(attrs, dict):
# Treat a non-dict value for attrs as a search for the 'class'
# attribute.
kwargs['class'] = attrs
attrs = None
if 'class_' in kwargs:
# Treat class_="foo" as a search for the 'class'
# attribute, overriding any non-dict value for attrs.
kwargs['class'] = kwargs['class_']
del kwargs['class_']
if kwargs:
if attrs:
attrs = attrs.copy()
attrs.update(kwargs)
else:
attrs = kwargs
normalized_attrs = {}
for key, value in attrs.items():
normalized_attrs[key] = self._normalize_search_value(value)
self.attrs = normalized_attrs
self.text = self._normalize_search_value(text)
def _normalize_search_value(self, value):
# Leave it alone if it's a Unicode string, a callable, a
# regular expression, a boolean, or None.
if (isinstance(value, unicode) or callable(value) or hasattr(value, 'match')
or isinstance(value, bool) or value is None):
return value
# If it's a bytestring, convert it to Unicode, treating it as UTF-8.
if isinstance(value, bytes):
return value.decode("utf8")
# If it's listlike, convert it into a list of strings.
if hasattr(value, '__iter__'):
new_value = []
for v in value:
if (hasattr(v, '__iter__') and not isinstance(v, bytes)
and not isinstance(v, unicode)):
# This is almost certainly the user's mistake. In the
# interests of avoiding infinite loops, we'll let
# it through as-is rather than doing a recursive call.
new_value.append(v)
else:
new_value.append(self._normalize_search_value(v))
return new_value
# Otherwise, convert it into a Unicode string.
# The unicode(str()) thing is so this will do the same thing on Python 2
# and Python 3.
return unicode(str(value))
def __str__(self):
if self.text:
return self.text
else:
return "%s|%s" % (self.name, self.attrs)
def search_tag(self, markup_name=None, markup_attrs={}):
found = None
markup = None
if isinstance(markup_name, Tag):
markup = markup_name
markup_attrs = markup
call_function_with_tag_data = (
isinstance(self.name, collections.Callable)
and not isinstance(markup_name, Tag))
if ((not self.name)
or call_function_with_tag_data
or (markup and self._matches(markup, self.name))
or (not markup and self._matches(markup_name, self.name))):
if call_function_with_tag_data:
match = self.name(markup_name, markup_attrs)
else:
match = True
markup_attr_map = None
for attr, match_against in list(self.attrs.items()):
if not markup_attr_map:
if hasattr(markup_attrs, 'get'):
markup_attr_map = markup_attrs
else:
markup_attr_map = {}
for k, v in markup_attrs:
markup_attr_map[k] = v
attr_value = markup_attr_map.get(attr)
if not self._matches(attr_value, match_against):
match = False
break
if match:
if markup:
found = markup
else:
found = markup_name
if found and self.text and not self._matches(found.string, self.text):
found = None
return found
searchTag = search_tag
def search(self, markup):
# print 'looking for %s in %s' % (self, markup)
found = None
# If given a list of items, scan it for a text element that
# matches.
if hasattr(markup, '__iter__') and not isinstance(markup, (Tag, basestring)):
for element in markup:
if isinstance(element, NavigableString) \
and self.search(element):
found = element
break
# If it's a Tag, make sure its name or attributes match.
# Don't bother with Tags if we're searching for text.
elif isinstance(markup, Tag):
if not self.text or self.name or self.attrs:
found = self.search_tag(markup)
# If it's text, make sure the text matches.
elif isinstance(markup, NavigableString) or \
isinstance(markup, basestring):
if not self.name and not self.attrs and self._matches(markup, self.text):
found = markup
else:
raise Exception(
"I don't know how to match against a %s" % markup.__class__)
return found
def _matches(self, markup, match_against):
# print u"Matching %s against %s" % (markup, match_against)
result = False
if isinstance(markup, list) or isinstance(markup, tuple):
# This should only happen when searching a multi-valued attribute
# like 'class'.
if (isinstance(match_against, unicode)
and ' ' in match_against):
# A bit of a special case. If they try to match "foo
# bar" on a multivalue attribute's value, only accept
# the literal value "foo bar"
#
# XXX This is going to be pretty slow because we keep
# splitting match_against. But it shouldn't come up
# too often.
return (whitespace_re.split(match_against) == markup)
else:
for item in markup:
if self._matches(item, match_against):
return True
return False
if match_against is True:
# True matches any non-None value.
return markup is not None
if isinstance(match_against, collections.Callable):
return match_against(markup)
# Custom callables take the tag as an argument, but all
# other ways of matching match the tag name as a string.
if isinstance(markup, Tag):
markup = markup.name
# Ensure that `markup` is either a Unicode string, or None.
markup = self._normalize_search_value(markup)
if markup is None:
# None matches None, False, an empty string, an empty list, and so on.
return not match_against
if isinstance(match_against, unicode):
# Exact string match
return markup == match_against
if hasattr(match_against, 'match'):
# Regexp match
return match_against.search(markup)
if hasattr(match_against, '__iter__'):
# The markup must be an exact match against something
# in the iterable.
return markup in match_against
class ResultSet(list):
"""A ResultSet is just a list that keeps track of the SoupStrainer
that created it."""
def __init__(self, source, result=()):
super(ResultSet, self).__init__(result)
self.source = source
| 61,538
|
Python
|
.py
| 1,383
| 32.560376
| 113
| 0.573965
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,305
|
diagnose.py
|
CouchPotato_CouchPotatoServer/libs/bs4/diagnose.py
|
"""Diagnostic functions, mainly for use when doing tech support."""
import cProfile
from StringIO import StringIO
from HTMLParser import HTMLParser
import bs4
from bs4 import BeautifulSoup, __version__
from bs4.builder import builder_registry
import os
import pstats
import random
import tempfile
import time
import traceback
import sys
import cProfile
def diagnose(data):
"""Diagnostic suite for isolating common problems."""
print "Diagnostic running on Beautiful Soup %s" % __version__
print "Python version %s" % sys.version
basic_parsers = ["html.parser", "html5lib", "lxml"]
for name in basic_parsers:
for builder in builder_registry.builders:
if name in builder.features:
break
else:
basic_parsers.remove(name)
print (
"I noticed that %s is not installed. Installing it may help." %
name)
if 'lxml' in basic_parsers:
basic_parsers.append(["lxml", "xml"])
from lxml import etree
print "Found lxml version %s" % ".".join(map(str,etree.LXML_VERSION))
if 'html5lib' in basic_parsers:
import html5lib
print "Found html5lib version %s" % html5lib.__version__
if hasattr(data, 'read'):
data = data.read()
elif os.path.exists(data):
print '"%s" looks like a filename. Reading data from the file.' % data
data = open(data).read()
elif data.startswith("http:") or data.startswith("https:"):
print '"%s" looks like a URL. Beautiful Soup is not an HTTP client.' % data
print "You need to use some other library to get the document behind the URL, and feed that document to Beautiful Soup."
return
print
for parser in basic_parsers:
print "Trying to parse your markup with %s" % parser
success = False
try:
soup = BeautifulSoup(data, parser)
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "Here's what %s did with the markup:" % parser
print soup.prettify()
print "-" * 80
def lxml_trace(data, html=True, **kwargs):
"""Print out the lxml events that occur during parsing.
This lets you see how lxml parses a document when no Beautiful
Soup code is running.
"""
from lxml import etree
for event, element in etree.iterparse(StringIO(data), html=html, **kwargs):
print("%s, %4s, %s" % (event, element.tag, element.text))
class AnnouncingParser(HTMLParser):
"""Announces HTMLParser parse events, without doing anything else."""
def _p(self, s):
print(s)
def handle_starttag(self, name, attrs):
self._p("%s START" % name)
def handle_endtag(self, name):
self._p("%s END" % name)
def handle_data(self, data):
self._p("%s DATA" % data)
def handle_charref(self, name):
self._p("%s CHARREF" % name)
def handle_entityref(self, name):
self._p("%s ENTITYREF" % name)
def handle_comment(self, data):
self._p("%s COMMENT" % data)
def handle_decl(self, data):
self._p("%s DECL" % data)
def unknown_decl(self, data):
self._p("%s UNKNOWN-DECL" % data)
def handle_pi(self, data):
self._p("%s PI" % data)
def htmlparser_trace(data):
"""Print out the HTMLParser events that occur during parsing.
This lets you see how HTMLParser parses a document when no
Beautiful Soup code is running.
"""
parser = AnnouncingParser()
parser.feed(data)
_vowels = "aeiou"
_consonants = "bcdfghjklmnpqrstvwxyz"
def rword(length=5):
"Generate a random word-like string."
s = ''
for i in range(length):
if i % 2 == 0:
t = _consonants
else:
t = _vowels
s += random.choice(t)
return s
def rsentence(length=4):
"Generate a random sentence-like string."
return " ".join(rword(random.randint(4,9)) for i in range(length))
def rdoc(num_elements=1000):
"""Randomly generate an invalid HTML document."""
tag_names = ['p', 'div', 'span', 'i', 'b', 'script', 'table']
elements = []
for i in range(num_elements):
choice = random.randint(0,3)
if choice == 0:
# New tag.
tag_name = random.choice(tag_names)
elements.append("<%s>" % tag_name)
elif choice == 1:
elements.append(rsentence(random.randint(1,4)))
elif choice == 2:
# Close a tag.
tag_name = random.choice(tag_names)
elements.append("</%s>" % tag_name)
return "<html>" + "\n".join(elements) + "</html>"
def benchmark_parsers(num_elements=100000):
"""Very basic head-to-head performance benchmark."""
print "Comparative parser benchmark on Beautiful Soup %s" % __version__
data = rdoc(num_elements)
print "Generated a large invalid HTML document (%d bytes)." % len(data)
for parser in ["lxml", ["lxml", "html"], "html5lib", "html.parser"]:
success = False
try:
a = time.time()
soup = BeautifulSoup(data, parser)
b = time.time()
success = True
except Exception, e:
print "%s could not parse the markup." % parser
traceback.print_exc()
if success:
print "BS4+%s parsed the markup in %.2fs." % (parser, b-a)
from lxml import etree
a = time.time()
etree.HTML(data)
b = time.time()
print "Raw lxml parsed the markup in %.2fs." % (b-a)
import html5lib
parser = html5lib.HTMLParser()
a = time.time()
parser.parse(data)
b = time.time()
print "Raw html5lib parsed the markup in %.2fs." % (b-a)
def profile(num_elements=100000, parser="lxml"):
filehandle = tempfile.NamedTemporaryFile()
filename = filehandle.name
data = rdoc(num_elements)
vars = dict(bs4=bs4, data=data, parser=parser)
cProfile.runctx('bs4.BeautifulSoup(data, parser)' , vars, vars, filename)
stats = pstats.Stats(filename)
# stats.strip_dirs()
stats.sort_stats("cumulative")
stats.print_stats('_html5lib|bs4', 50)
if __name__ == '__main__':
diagnose(sys.stdin.read())
| 6,315
|
Python
|
.py
| 168
| 30.565476
| 128
| 0.624201
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,306
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/bs4/__init__.py
|
"""Beautiful Soup
Elixir and Tonic
"The Screen-Scraper's Friend"
http://www.crummy.com/software/BeautifulSoup/
Beautiful Soup uses a pluggable XML or HTML parser to parse a
(possibly invalid) document into a tree representation. Beautiful Soup
provides provides methods and Pythonic idioms that make it easy to
navigate, search, and modify the parse tree.
Beautiful Soup works with Python 2.6 and up. It works better if lxml
and/or html5lib is installed.
For more than you ever wanted to know about Beautiful Soup, see the
documentation:
http://www.crummy.com/software/BeautifulSoup/bs4/doc/
"""
__author__ = "Leonard Richardson (leonardr@segfault.org)"
__version__ = "4.3.2"
__copyright__ = "Copyright (c) 2004-2013 Leonard Richardson"
__license__ = "MIT"
__all__ = ['BeautifulSoup']
import os
import re
import warnings
from .builder import builder_registry, ParserRejectedMarkup
from .dammit import UnicodeDammit
from .element import (
CData,
Comment,
DEFAULT_OUTPUT_ENCODING,
Declaration,
Doctype,
NavigableString,
PageElement,
ProcessingInstruction,
ResultSet,
SoupStrainer,
Tag,
)
# The very first thing we do is give a useful error if someone is
# running this code under Python 3 without converting it.
syntax_error = u'You are trying to run the Python 2 version of Beautiful Soup under Python 3. This will not work. You need to convert the code, either by installing it (`python setup.py install`) or by running 2to3 (`2to3 -w bs4`).'
class BeautifulSoup(Tag):
"""
This class defines the basic interface called by the tree builders.
These methods will be called by the parser:
reset()
feed(markup)
The tree builder may call these methods from its feed() implementation:
handle_starttag(name, attrs) # See note about return value
handle_endtag(name)
handle_data(data) # Appends to the current data node
endData(containerClass=NavigableString) # Ends the current data node
No matter how complicated the underlying parser is, you should be
able to build a tree using 'start tag' events, 'end tag' events,
'data' events, and "done with data" events.
If you encounter an empty-element tag (aka a self-closing tag,
like HTML's <br> tag), call handle_starttag and then
handle_endtag.
"""
ROOT_TAG_NAME = u'[document]'
# If the end-user gives no indication which tree builder they
# want, look for one with these features.
DEFAULT_BUILDER_FEATURES = ['html', 'fast']
ASCII_SPACES = '\x20\x0a\x09\x0c\x0d'
def __init__(self, markup="", features=None, builder=None,
parse_only=None, from_encoding=None, **kwargs):
"""The Soup object is initialized as the 'root tag', and the
provided markup (which can be a string or a file-like object)
is fed into the underlying parser."""
if 'convertEntities' in kwargs:
warnings.warn(
"BS4 does not respect the convertEntities argument to the "
"BeautifulSoup constructor. Entities are always converted "
"to Unicode characters.")
if 'markupMassage' in kwargs:
del kwargs['markupMassage']
warnings.warn(
"BS4 does not respect the markupMassage argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for any necessary markup massage.")
if 'smartQuotesTo' in kwargs:
del kwargs['smartQuotesTo']
warnings.warn(
"BS4 does not respect the smartQuotesTo argument to the "
"BeautifulSoup constructor. Smart quotes are always converted "
"to Unicode characters.")
if 'selfClosingTags' in kwargs:
del kwargs['selfClosingTags']
warnings.warn(
"BS4 does not respect the selfClosingTags argument to the "
"BeautifulSoup constructor. The tree builder is responsible "
"for understanding self-closing tags.")
if 'isHTML' in kwargs:
del kwargs['isHTML']
warnings.warn(
"BS4 does not respect the isHTML argument to the "
"BeautifulSoup constructor. You can pass in features='html' "
"or features='xml' to get a builder capable of handling "
"one or the other.")
def deprecated_argument(old_name, new_name):
if old_name in kwargs:
warnings.warn(
'The "%s" argument to the BeautifulSoup constructor '
'has been renamed to "%s."' % (old_name, new_name))
value = kwargs[old_name]
del kwargs[old_name]
return value
return None
parse_only = parse_only or deprecated_argument(
"parseOnlyThese", "parse_only")
from_encoding = from_encoding or deprecated_argument(
"fromEncoding", "from_encoding")
if len(kwargs) > 0:
arg = kwargs.keys().pop()
raise TypeError(
"__init__() got an unexpected keyword argument '%s'" % arg)
if builder is None:
if isinstance(features, basestring):
features = [features]
if features is None or len(features) == 0:
features = self.DEFAULT_BUILDER_FEATURES
builder_class = builder_registry.lookup(*features)
if builder_class is None:
raise FeatureNotFound(
"Couldn't find a tree builder with the features you "
"requested: %s. Do you need to install a parser library?"
% ",".join(features))
builder = builder_class()
self.builder = builder
self.is_xml = builder.is_xml
self.builder.soup = self
self.parse_only = parse_only
if hasattr(markup, 'read'): # It's a file-type object.
markup = markup.read()
elif len(markup) <= 256:
# Print out warnings for a couple beginner problems
# involving passing non-markup to Beautiful Soup.
# Beautiful Soup will still parse the input as markup,
# just in case that's what the user really wants.
if (isinstance(markup, unicode)
and not os.path.supports_unicode_filenames):
possible_filename = markup.encode("utf8")
else:
possible_filename = markup
is_file = False
try:
is_file = os.path.exists(possible_filename)
except Exception, e:
# This is almost certainly a problem involving
# characters not valid in filenames on this
# system. Just let it go.
pass
if is_file:
warnings.warn(
'"%s" looks like a filename, not markup. You should probably open this file and pass the filehandle into Beautiful Soup.' % markup)
if markup[:5] == "http:" or markup[:6] == "https:":
# TODO: This is ugly but I couldn't get it to work in
# Python 3 otherwise.
if ((isinstance(markup, bytes) and not b' ' in markup)
or (isinstance(markup, unicode) and not u' ' in markup)):
warnings.warn(
'"%s" looks like a URL. Beautiful Soup is not an HTTP client. You should probably use an HTTP client to get the document behind the URL, and feed that document to Beautiful Soup.' % markup)
for (self.markup, self.original_encoding, self.declared_html_encoding,
self.contains_replacement_characters) in (
self.builder.prepare_markup(markup, from_encoding)):
self.reset()
try:
self._feed()
break
except ParserRejectedMarkup:
pass
# Clear out the markup and remove the builder's circular
# reference to this object.
self.markup = None
self.builder.soup = None
def _feed(self):
# Convert the document to Unicode.
self.builder.reset()
self.builder.feed(self.markup)
# Close out any unfinished strings and close all the open tags.
self.endData()
while self.currentTag.name != self.ROOT_TAG_NAME:
self.popTag()
def reset(self):
Tag.__init__(self, self, self.builder, self.ROOT_TAG_NAME)
self.hidden = 1
self.builder.reset()
self.current_data = []
self.currentTag = None
self.tagStack = []
self.preserve_whitespace_tag_stack = []
self.pushTag(self)
def new_tag(self, name, namespace=None, nsprefix=None, **attrs):
"""Create a new tag associated with this soup."""
return Tag(None, self.builder, name, namespace, nsprefix, attrs)
def new_string(self, s, subclass=NavigableString):
"""Create a new NavigableString associated with this soup."""
navigable = subclass(s)
navigable.setup()
return navigable
def insert_before(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_before().")
def insert_after(self, successor):
raise NotImplementedError("BeautifulSoup objects don't support insert_after().")
def popTag(self):
tag = self.tagStack.pop()
if self.preserve_whitespace_tag_stack and tag == self.preserve_whitespace_tag_stack[-1]:
self.preserve_whitespace_tag_stack.pop()
#print "Pop", tag.name
if self.tagStack:
self.currentTag = self.tagStack[-1]
return self.currentTag
def pushTag(self, tag):
#print "Push", tag.name
if self.currentTag:
self.currentTag.contents.append(tag)
self.tagStack.append(tag)
self.currentTag = self.tagStack[-1]
if tag.name in self.builder.preserve_whitespace_tags:
self.preserve_whitespace_tag_stack.append(tag)
def endData(self, containerClass=NavigableString):
if self.current_data:
current_data = u''.join(self.current_data)
# If whitespace is not preserved, and this string contains
# nothing but ASCII spaces, replace it with a single space
# or newline.
if not self.preserve_whitespace_tag_stack:
strippable = True
for i in current_data:
if i not in self.ASCII_SPACES:
strippable = False
break
if strippable:
if '\n' in current_data:
current_data = '\n'
else:
current_data = ' '
# Reset the data collector.
self.current_data = []
# Should we add this string to the tree at all?
if self.parse_only and len(self.tagStack) <= 1 and \
(not self.parse_only.text or \
not self.parse_only.search(current_data)):
return
o = containerClass(current_data)
self.object_was_parsed(o)
def object_was_parsed(self, o, parent=None, most_recent_element=None):
"""Add an object to the parse tree."""
parent = parent or self.currentTag
most_recent_element = most_recent_element or self._most_recent_element
o.setup(parent, most_recent_element)
if most_recent_element is not None:
most_recent_element.next_element = o
self._most_recent_element = o
parent.contents.append(o)
def _popToTag(self, name, nsprefix=None, inclusivePop=True):
"""Pops the tag stack up to and including the most recent
instance of the given tag. If inclusivePop is false, pops the tag
stack up to but *not* including the most recent instqance of
the given tag."""
#print "Popping to %s" % name
if name == self.ROOT_TAG_NAME:
# The BeautifulSoup object itself can never be popped.
return
most_recently_popped = None
stack_size = len(self.tagStack)
for i in range(stack_size - 1, 0, -1):
t = self.tagStack[i]
if (name == t.name and nsprefix == t.prefix):
if inclusivePop:
most_recently_popped = self.popTag()
break
most_recently_popped = self.popTag()
return most_recently_popped
def handle_starttag(self, name, namespace, nsprefix, attrs):
"""Push a start tag on to the stack.
If this method returns None, the tag was rejected by the
SoupStrainer. You should proceed as if the tag had not occured
in the document. For instance, if this was a self-closing tag,
don't call handle_endtag.
"""
# print "Start tag %s: %s" % (name, attrs)
self.endData()
if (self.parse_only and len(self.tagStack) <= 1
and (self.parse_only.text
or not self.parse_only.search_tag(name, attrs))):
return None
tag = Tag(self, self.builder, name, namespace, nsprefix, attrs,
self.currentTag, self._most_recent_element)
if tag is None:
return tag
if self._most_recent_element:
self._most_recent_element.next_element = tag
self._most_recent_element = tag
self.pushTag(tag)
return tag
def handle_endtag(self, name, nsprefix=None):
#print "End tag: " + name
self.endData()
self._popToTag(name, nsprefix)
def handle_data(self, data):
self.current_data.append(data)
def decode(self, pretty_print=False,
eventual_encoding=DEFAULT_OUTPUT_ENCODING,
formatter="minimal"):
"""Returns a string or Unicode representation of this document.
To get Unicode, pass None for encoding."""
if self.is_xml:
# Print the XML declaration
encoding_part = ''
if eventual_encoding != None:
encoding_part = ' encoding="%s"' % eventual_encoding
prefix = u'<?xml version="1.0"%s?>\n' % encoding_part
else:
prefix = u''
if not pretty_print:
indent_level = None
else:
indent_level = 0
return prefix + super(BeautifulSoup, self).decode(
indent_level, eventual_encoding, formatter)
# Alias to make it easier to type import: 'from bs4 import _soup'
_s = BeautifulSoup
_soup = BeautifulSoup
class BeautifulStoneSoup(BeautifulSoup):
"""Deprecated interface to an XML parser."""
def __init__(self, *args, **kwargs):
kwargs['features'] = 'xml'
warnings.warn(
'The BeautifulStoneSoup class is deprecated. Instead of using '
'it, pass features="xml" into the BeautifulSoup constructor.')
super(BeautifulStoneSoup, self).__init__(*args, **kwargs)
class StopParsing(Exception):
pass
class FeatureNotFound(ValueError):
pass
#By default, act as an HTML pretty-printer.
if __name__ == '__main__':
import sys
soup = BeautifulSoup(sys.stdin)
print soup.prettify()
| 15,401
|
Python
|
.py
| 340
| 34.826471
| 232
| 0.613805
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,307
|
testing.py
|
CouchPotato_CouchPotatoServer/libs/bs4/testing.py
|
"""Helper classes for tests."""
import copy
import functools
import unittest
from unittest import TestCase
from bs4 import BeautifulSoup
from bs4.element import (
CharsetMetaAttributeValue,
Comment,
ContentMetaAttributeValue,
Doctype,
SoupStrainer,
)
from bs4.builder import HTMLParserTreeBuilder
default_builder = HTMLParserTreeBuilder
class SoupTest(unittest.TestCase):
@property
def default_builder(self):
return default_builder()
def soup(self, markup, **kwargs):
"""Build a Beautiful Soup object from markup."""
builder = kwargs.pop('builder', self.default_builder)
return BeautifulSoup(markup, builder=builder, **kwargs)
def document_for(self, markup):
"""Turn an HTML fragment into a document.
The details depend on the builder.
"""
return self.default_builder.test_fragment_to_document(markup)
def assertSoupEquals(self, to_parse, compare_parsed_to=None):
builder = self.default_builder
obj = BeautifulSoup(to_parse, builder=builder)
if compare_parsed_to is None:
compare_parsed_to = to_parse
self.assertEqual(obj.decode(), self.document_for(compare_parsed_to))
class HTMLTreeBuilderSmokeTest(object):
"""A basic test of a treebuilder's competence.
Any HTML treebuilder, present or future, should be able to pass
these tests. With invalid markup, there's room for interpretation,
and different parsers can handle it differently. But with the
markup in these tests, there's not much room for interpretation.
"""
def assertDoctypeHandled(self, doctype_fragment):
"""Assert that a given doctype string is handled correctly."""
doctype_str, soup = self._document_with_doctype(doctype_fragment)
# Make sure a Doctype object was created.
doctype = soup.contents[0]
self.assertEqual(doctype.__class__, Doctype)
self.assertEqual(doctype, doctype_fragment)
self.assertEqual(str(soup)[:len(doctype_str)], doctype_str)
# Make sure that the doctype was correctly associated with the
# parse tree and that the rest of the document parsed.
self.assertEqual(soup.p.contents[0], 'foo')
def _document_with_doctype(self, doctype_fragment):
"""Generate and parse a document with the given doctype."""
doctype = '<!DOCTYPE %s>' % doctype_fragment
markup = doctype + '\n<p>foo</p>'
soup = self.soup(markup)
return doctype, soup
def test_normal_doctypes(self):
"""Make sure normal, everyday HTML doctypes are handled correctly."""
self.assertDoctypeHandled("html")
self.assertDoctypeHandled(
'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"')
def test_empty_doctype(self):
soup = self.soup("<!DOCTYPE>")
doctype = soup.contents[0]
self.assertEqual("", doctype.strip())
def test_public_doctype_with_url(self):
doctype = 'html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"'
self.assertDoctypeHandled(doctype)
def test_system_doctype(self):
self.assertDoctypeHandled('foo SYSTEM "http://www.example.com/"')
def test_namespaced_system_doctype(self):
# We can handle a namespaced doctype with a system ID.
self.assertDoctypeHandled('xsl:stylesheet SYSTEM "htmlent.dtd"')
def test_namespaced_public_doctype(self):
# Test a namespaced doctype with a public id.
self.assertDoctypeHandled('xsl:stylesheet PUBLIC "htmlent.dtd"')
def test_real_xhtml_document(self):
"""A real XHTML document should come out more or less the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8").replace(b"\n", b""),
markup.replace(b"\n", b""))
def test_deepcopy(self):
"""Make sure you can copy the tree builder.
This is important because the builder is part of a
BeautifulSoup object, and we want to be able to copy that.
"""
copy.deepcopy(self.default_builder)
def test_p_tag_is_never_empty_element(self):
"""A <p> tag is never designated as an empty-element tag.
Even if the markup shows it as an empty-element tag, it
shouldn't be presented that way.
"""
soup = self.soup("<p/>")
self.assertFalse(soup.p.is_empty_element)
self.assertEqual(str(soup.p), "<p></p>")
def test_unclosed_tags_get_closed(self):
"""A tag that's not closed by the end of the document should be closed.
This applies to all tags except empty-element tags.
"""
self.assertSoupEquals("<p>", "<p></p>")
self.assertSoupEquals("<b>", "<b></b>")
self.assertSoupEquals("<br>", "<br/>")
def test_br_is_always_empty_element_tag(self):
"""A <br> tag is designated as an empty-element tag.
Some parsers treat <br></br> as one <br/> tag, some parsers as
two tags, but it should always be an empty-element tag.
"""
soup = self.soup("<br></br>")
self.assertTrue(soup.br.is_empty_element)
self.assertEqual(str(soup.br), "<br/>")
def test_nested_formatting_elements(self):
self.assertSoupEquals("<em><em></em></em>")
def test_comment(self):
# Comments are represented as Comment objects.
markup = "<p>foo<!--foobar-->baz</p>"
self.assertSoupEquals(markup)
soup = self.soup(markup)
comment = soup.find(text="foobar")
self.assertEqual(comment.__class__, Comment)
# The comment is properly integrated into the tree.
foo = soup.find(text="foo")
self.assertEqual(comment, foo.next_element)
baz = soup.find(text="baz")
self.assertEqual(comment, baz.previous_element)
def test_preserved_whitespace_in_pre_and_textarea(self):
"""Whitespace must be preserved in <pre> and <textarea> tags."""
self.assertSoupEquals("<pre> </pre>")
self.assertSoupEquals("<textarea> woo </textarea>")
def test_nested_inline_elements(self):
"""Inline elements can be nested indefinitely."""
b_tag = "<b>Inside a B tag</b>"
self.assertSoupEquals(b_tag)
nested_b_tag = "<p>A <i>nested <b>tag</b></i></p>"
self.assertSoupEquals(nested_b_tag)
double_nested_b_tag = "<p>A <a>doubly <i>nested <b>tag</b></i></a></p>"
self.assertSoupEquals(nested_b_tag)
def test_nested_block_level_elements(self):
"""Block elements can be nested."""
soup = self.soup('<blockquote><p><b>Foo</b></p></blockquote>')
blockquote = soup.blockquote
self.assertEqual(blockquote.p.b.string, 'Foo')
self.assertEqual(blockquote.b.string, 'Foo')
def test_correctly_nested_tables(self):
"""One table can go inside another one."""
markup = ('<table id="1">'
'<tr>'
"<td>Here's another table:"
'<table id="2">'
'<tr><td>foo</td></tr>'
'</table></td>')
self.assertSoupEquals(
markup,
'<table id="1"><tr><td>Here\'s another table:'
'<table id="2"><tr><td>foo</td></tr></table>'
'</td></tr></table>')
self.assertSoupEquals(
"<table><thead><tr><td>Foo</td></tr></thead>"
"<tbody><tr><td>Bar</td></tr></tbody>"
"<tfoot><tr><td>Baz</td></tr></tfoot></table>")
def test_deeply_nested_multivalued_attribute(self):
# html5lib can set the attributes of the same tag many times
# as it rearranges the tree. This has caused problems with
# multivalued attributes.
markup = '<table><div><div class="css"></div></div></table>'
soup = self.soup(markup)
self.assertEqual(["css"], soup.div.div['class'])
def test_angle_brackets_in_attribute_values_are_escaped(self):
self.assertSoupEquals('<a b="<a>"></a>', '<a b="<a>"></a>')
def test_entities_in_attributes_converted_to_unicode(self):
expect = u'<p id="pi\N{LATIN SMALL LETTER N WITH TILDE}ata"></p>'
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
self.assertSoupEquals('<p id="piñata"></p>', expect)
def test_entities_in_text_converted_to_unicode(self):
expect = u'<p>pi\N{LATIN SMALL LETTER N WITH TILDE}ata</p>'
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
self.assertSoupEquals("<p>piñata</p>", expect)
def test_quot_entity_converted_to_quotation_mark(self):
self.assertSoupEquals("<p>I said "good day!"</p>",
'<p>I said "good day!"</p>')
def test_out_of_range_entity(self):
expect = u"\N{REPLACEMENT CHARACTER}"
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
self.assertSoupEquals("�", expect)
def test_multipart_strings(self):
"Mostly to prevent a recurrence of a bug in the html5lib treebuilder."
soup = self.soup("<html><h2>\nfoo</h2><p></p></html>")
self.assertEqual("p", soup.h2.string.next_element.name)
self.assertEqual("p", soup.p.name)
def test_basic_namespaces(self):
"""Parsers don't need to *understand* namespaces, but at the
very least they should not choke on namespaces or lose
data."""
markup = b'<html xmlns="http://www.w3.org/1999/xhtml" xmlns:mathml="http://www.w3.org/1998/Math/MathML" xmlns:svg="http://www.w3.org/2000/svg"><head></head><body><mathml:msqrt>4</mathml:msqrt><b svg:fill="red"></b></body></html>'
soup = self.soup(markup)
self.assertEqual(markup, soup.encode())
html = soup.html
self.assertEqual('http://www.w3.org/1999/xhtml', soup.html['xmlns'])
self.assertEqual(
'http://www.w3.org/1998/Math/MathML', soup.html['xmlns:mathml'])
self.assertEqual(
'http://www.w3.org/2000/svg', soup.html['xmlns:svg'])
def test_multivalued_attribute_value_becomes_list(self):
markup = b'<a class="foo bar">'
soup = self.soup(markup)
self.assertEqual(['foo', 'bar'], soup.a['class'])
#
# Generally speaking, tests below this point are more tests of
# Beautiful Soup than tests of the tree builders. But parsers are
# weird, so we run these tests separately for every tree builder
# to detect any differences between them.
#
def test_can_parse_unicode_document(self):
# A seemingly innocuous document... but it's in Unicode! And
# it contains characters that can't be represented in the
# encoding found in the declaration! The horror!
markup = u'<html><head><meta encoding="euc-jp"></head><body>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</body>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.body.string)
def test_soupstrainer(self):
"""Parsers should be able to work with SoupStrainers."""
strainer = SoupStrainer("b")
soup = self.soup("A <b>bold</b> <meta/> <i>statement</i>",
parse_only=strainer)
self.assertEqual(soup.decode(), "<b>bold</b>")
def test_single_quote_attribute_values_become_double_quotes(self):
self.assertSoupEquals("<foo attr='bar'></foo>",
'<foo attr="bar"></foo>')
def test_attribute_values_with_nested_quotes_are_left_alone(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
self.assertSoupEquals(text)
def test_attribute_values_with_double_nested_quotes_get_quoted(self):
text = """<foo attr='bar "brawls" happen'>a</foo>"""
soup = self.soup(text)
soup.foo['attr'] = 'Brawls happen at "Bob\'s Bar"'
self.assertSoupEquals(
soup.foo.decode(),
"""<foo attr="Brawls happen at "Bob\'s Bar"">a</foo>""")
def test_ampersand_in_attribute_value_gets_escaped(self):
self.assertSoupEquals('<this is="really messed up & stuff"></this>',
'<this is="really messed up & stuff"></this>')
self.assertSoupEquals(
'<a href="http://example.org?a=1&b=2;3">foo</a>',
'<a href="http://example.org?a=1&b=2;3">foo</a>')
def test_escaped_ampersand_in_attribute_value_is_left_alone(self):
self.assertSoupEquals('<a href="http://example.org?a=1&b=2;3"></a>')
def test_entities_in_strings_converted_during_parsing(self):
# Both XML and HTML entities are converted to Unicode characters
# during parsing.
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>"
self.assertSoupEquals(text, expected)
def test_smart_quotes_converted_on_the_way_in(self):
# Microsoft smart quotes are converted to Unicode characters during
# parsing.
quote = b"<p>\x91Foo\x92</p>"
soup = self.soup(quote)
self.assertEqual(
soup.p.string,
u"\N{LEFT SINGLE QUOTATION MARK}Foo\N{RIGHT SINGLE QUOTATION MARK}")
def test_non_breaking_spaces_converted_on_the_way_in(self):
soup = self.soup("<a> </a>")
self.assertEqual(soup.a.string, u"\N{NO-BREAK SPACE}" * 2)
def test_entities_converted_on_the_way_out(self):
text = "<p><<sacré bleu!>></p>"
expected = u"<p><<sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!>></p>".encode("utf-8")
soup = self.soup(text)
self.assertEqual(soup.p.encode("utf-8"), expected)
def test_real_iso_latin_document(self):
# Smoke test of interrelated functionality, using an
# easy-to-understand document.
# Here it is in Unicode. Note that it claims to be in ISO-Latin-1.
unicode_html = u'<html><head><meta content="text/html; charset=ISO-Latin-1" http-equiv="Content-type"/></head><body><p>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</p></body></html>'
# That's because we're going to encode it into ISO-Latin-1, and use
# that to test.
iso_latin_html = unicode_html.encode("iso-8859-1")
# Parse the ISO-Latin-1 HTML.
soup = self.soup(iso_latin_html)
# Encode it to UTF-8.
result = soup.encode("utf-8")
# What do we expect the result to look like? Well, it would
# look like unicode_html, except that the META tag would say
# UTF-8 instead of ISO-Latin-1.
expected = unicode_html.replace("ISO-Latin-1", "utf-8")
# And, of course, it would be in UTF-8, not Unicode.
expected = expected.encode("utf-8")
# Ta-da!
self.assertEqual(result, expected)
def test_real_shift_jis_document(self):
# Smoke test to make sure the parser can handle a document in
# Shift-JIS encoding, without choking.
shift_jis_html = (
b'<html><head></head><body><pre>'
b'\x82\xb1\x82\xea\x82\xcdShift-JIS\x82\xc5\x83R\x81[\x83f'
b'\x83B\x83\x93\x83O\x82\xb3\x82\xea\x82\xbd\x93\xfa\x96{\x8c'
b'\xea\x82\xcc\x83t\x83@\x83C\x83\x8b\x82\xc5\x82\xb7\x81B'
b'</pre></body></html>')
unicode_html = shift_jis_html.decode("shift-jis")
soup = self.soup(unicode_html)
# Make sure the parse tree is correctly encoded to various
# encodings.
self.assertEqual(soup.encode("utf-8"), unicode_html.encode("utf-8"))
self.assertEqual(soup.encode("euc_jp"), unicode_html.encode("euc_jp"))
def test_real_hebrew_document(self):
# A real-world test to make sure we can convert ISO-8859-9 (a
# Hebrew encoding) to UTF-8.
hebrew_document = b'<html><head><title>Hebrew (ISO 8859-8) in Visual Directionality</title></head><body><h1>Hebrew (ISO 8859-8) in Visual Directionality</h1>\xed\xe5\xec\xf9</body></html>'
soup = self.soup(
hebrew_document, from_encoding="iso8859-8")
self.assertEqual(soup.original_encoding, 'iso8859-8')
self.assertEqual(
soup.encode('utf-8'),
hebrew_document.decode("iso8859-8").encode("utf-8"))
def test_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta content="text/html; charset=x-sjis" '
'http-equiv="Content-type"/>')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', {'http-equiv': 'Content-type'})
content = parsed_meta['content']
self.assertEqual('text/html; charset=x-sjis', content)
# But that value is actually a ContentMetaAttributeValue object.
self.assertTrue(isinstance(content, ContentMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('text/html; charset=utf8', content.encode("utf8"))
# For the rest of the story, see TestSubstitutions in
# test_tree.py.
def test_html5_style_meta_tag_reflects_current_encoding(self):
# Here's the <meta> tag saying that a document is
# encoded in Shift-JIS.
meta_tag = ('<meta id="encoding" charset="x-sjis" />')
# Here's a document incorporating that meta tag.
shift_jis_html = (
'<html><head>\n%s\n'
'<meta http-equiv="Content-language" content="ja"/>'
'</head><body>Shift-JIS markup goes here.') % meta_tag
soup = self.soup(shift_jis_html)
# Parse the document, and the charset is seemingly unaffected.
parsed_meta = soup.find('meta', id="encoding")
charset = parsed_meta['charset']
self.assertEqual('x-sjis', charset)
# But that value is actually a CharsetMetaAttributeValue object.
self.assertTrue(isinstance(charset, CharsetMetaAttributeValue))
# And it will take on a value that reflects its current
# encoding.
self.assertEqual('utf8', charset.encode("utf8"))
def test_tag_with_no_attributes_can_have_attributes_added(self):
data = self.soup("<a>text</a>")
data.a['foo'] = 'bar'
self.assertEqual('<a foo="bar">text</a>', data.a.decode())
class XMLTreeBuilderSmokeTest(object):
def test_docstring_generated(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode(), b'<?xml version="1.0" encoding="utf-8"?>\n<root/>')
def test_real_xhtml_document(self):
"""A real XHTML document should come out *exactly* the same as it went in."""
markup = b"""<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN">
<html xmlns="http://www.w3.org/1999/xhtml">
<head><title>Hello.</title></head>
<body>Goodbye.</body>
</html>"""
soup = self.soup(markup)
self.assertEqual(
soup.encode("utf-8"), markup)
def test_formatter_processes_script_tag_for_xml_documents(self):
doc = """
<script type="text/javascript">
</script>
"""
soup = BeautifulSoup(doc, "xml")
# lxml would have stripped this while parsing, but we can add
# it later.
soup.script.string = 'console.log("< < hey > > ");'
encoded = soup.encode()
self.assertTrue(b"< < hey > >" in encoded)
def test_can_parse_unicode_document(self):
markup = u'<?xml version="1.0" encoding="euc-jp"><root>Sacr\N{LATIN SMALL LETTER E WITH ACUTE} bleu!</root>'
soup = self.soup(markup)
self.assertEqual(u'Sacr\xe9 bleu!', soup.root.string)
def test_popping_namespaced_tag(self):
markup = '<rss xmlns:dc="foo"><dc:creator>b</dc:creator><dc:date>2012-07-02T20:33:42Z</dc:date><dc:rights>c</dc:rights></rss>'
soup = self.soup(markup)
self.assertEqual(
unicode(soup.rss), markup)
def test_docstring_includes_correct_encoding(self):
soup = self.soup("<root/>")
self.assertEqual(
soup.encode("latin1"),
b'<?xml version="1.0" encoding="latin1"?>\n<root/>')
def test_large_xml_document(self):
"""A large XML document should come out the same as it went in."""
markup = (b'<?xml version="1.0" encoding="utf-8"?>\n<root>'
+ b'0' * (2**12)
+ b'</root>')
soup = self.soup(markup)
self.assertEqual(soup.encode("utf-8"), markup)
def test_tags_are_empty_element_if_and_only_if_they_are_empty(self):
self.assertSoupEquals("<p>", "<p/>")
self.assertSoupEquals("<p>foo</p>")
def test_namespaces_are_preserved(self):
markup = '<root xmlns:a="http://example.com/" xmlns:b="http://example.net/"><a:foo>This tag is in the a namespace</a:foo><b:foo>This tag is in the b namespace</b:foo></root>'
soup = self.soup(markup)
root = soup.root
self.assertEqual("http://example.com/", root['xmlns:a'])
self.assertEqual("http://example.net/", root['xmlns:b'])
def test_closing_namespaced_tag(self):
markup = '<p xmlns:dc="http://purl.org/dc/elements/1.1/"><dc:date>20010504</dc:date></p>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.p), markup)
def test_namespaced_attributes(self):
markup = '<foo xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><bar xsi:schemaLocation="http://www.example.com"/></foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
def test_namespaced_attributes_xml_namespace(self):
markup = '<foo xml:lang="fr">bar</foo>'
soup = self.soup(markup)
self.assertEqual(unicode(soup.foo), markup)
class HTML5TreeBuilderSmokeTest(HTMLTreeBuilderSmokeTest):
"""Smoke test for a tree builder that supports HTML5."""
def test_real_xhtml_document(self):
# Since XHTML is not HTML5, HTML5 parsers are not tested to handle
# XHTML documents in any particular way.
pass
def test_html_tags_have_namespace(self):
markup = "<a>"
soup = self.soup(markup)
self.assertEqual("http://www.w3.org/1999/xhtml", soup.a.namespace)
def test_svg_tags_have_namespace(self):
markup = '<svg><circle/></svg>'
soup = self.soup(markup)
namespace = "http://www.w3.org/2000/svg"
self.assertEqual(namespace, soup.svg.namespace)
self.assertEqual(namespace, soup.circle.namespace)
def test_mathml_tags_have_namespace(self):
markup = '<math><msqrt>5</msqrt></math>'
soup = self.soup(markup)
namespace = 'http://www.w3.org/1998/Math/MathML'
self.assertEqual(namespace, soup.math.namespace)
self.assertEqual(namespace, soup.msqrt.namespace)
def test_xml_declaration_becomes_comment(self):
markup = '<?xml version="1.0" encoding="utf-8"?><html></html>'
soup = self.soup(markup)
self.assertTrue(isinstance(soup.contents[0], Comment))
self.assertEqual(soup.contents[0], '?xml version="1.0" encoding="utf-8"?')
self.assertEqual("html", soup.contents[0].next_element.name)
def skipIf(condition, reason):
def nothing(test, *args, **kwargs):
return None
def decorator(test_item):
if condition:
return nothing
else:
return test_item
return decorator
| 24,510
|
Python
|
.py
| 477
| 42.677149
| 237
| 0.631073
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,308
|
dammit.py
|
CouchPotato_CouchPotatoServer/libs/bs4/dammit.py
|
# -*- coding: utf-8 -*-
"""Beautiful Soup bonus library: Unicode, Dammit
This library converts a bytestream to Unicode through any means
necessary. It is heavily based on code from Mark Pilgrim's Universal
Feed Parser. It works best on XML and XML, but it does not rewrite the
XML or HTML to reflect a new encoding; that's the tree builder's job.
"""
import codecs
from htmlentitydefs import codepoint2name
import re
import logging
import string
# Import a library to autodetect character encodings.
chardet_type = None
try:
# First try the fast C implementation.
# PyPI package: cchardet
import cchardet
def chardet_dammit(s):
return cchardet.detect(s)['encoding']
except ImportError:
try:
# Fall back to the pure Python implementation
# Debian package: python-chardet
# PyPI package: chardet
import chardet
def chardet_dammit(s):
return chardet.detect(s)['encoding']
#import chardet.constants
#chardet.constants._debug = 1
except ImportError:
# No chardet available.
def chardet_dammit(s):
return None
# Available from http://cjkpython.i18n.org/.
try:
import iconv_codec
except ImportError:
pass
xml_encoding_re = re.compile(
'^<\?.*encoding=[\'"](.*?)[\'"].*\?>'.encode(), re.I)
html_meta_re = re.compile(
'<\s*meta[^>]+charset\s*=\s*["\']?([^>]*?)[ /;\'">]'.encode(), re.I)
class EntitySubstitution(object):
"""Substitute XML or HTML entities for the corresponding characters."""
def _populate_class_variables():
lookup = {}
reverse_lookup = {}
characters_for_re = []
for codepoint, name in list(codepoint2name.items()):
character = unichr(codepoint)
if codepoint != 34:
# There's no point in turning the quotation mark into
# ", unless it happens within an attribute value, which
# is handled elsewhere.
characters_for_re.append(character)
lookup[character] = name
# But we do want to turn " into the quotation mark.
reverse_lookup[name] = character
re_definition = "[%s]" % "".join(characters_for_re)
return lookup, reverse_lookup, re.compile(re_definition)
(CHARACTER_TO_HTML_ENTITY, HTML_ENTITY_TO_CHARACTER,
CHARACTER_TO_HTML_ENTITY_RE) = _populate_class_variables()
CHARACTER_TO_XML_ENTITY = {
"'": "apos",
'"': "quot",
"&": "amp",
"<": "lt",
">": "gt",
}
BARE_AMPERSAND_OR_BRACKET = re.compile("([<>]|"
"&(?!#\d+;|#x[0-9a-fA-F]+;|\w+;)"
")")
AMPERSAND_OR_BRACKET = re.compile("([<>&])")
@classmethod
def _substitute_html_entity(cls, matchobj):
entity = cls.CHARACTER_TO_HTML_ENTITY.get(matchobj.group(0))
return "&%s;" % entity
@classmethod
def _substitute_xml_entity(cls, matchobj):
"""Used with a regular expression to substitute the
appropriate XML entity for an XML special character."""
entity = cls.CHARACTER_TO_XML_ENTITY[matchobj.group(0)]
return "&%s;" % entity
@classmethod
def quoted_attribute_value(self, value):
"""Make a value into a quoted XML attribute, possibly escaping it.
Most strings will be quoted using double quotes.
Bob's Bar -> "Bob's Bar"
If a string contains double quotes, it will be quoted using
single quotes.
Welcome to "my bar" -> 'Welcome to "my bar"'
If a string contains both single and double quotes, the
double quotes will be escaped, and the string will be quoted
using double quotes.
Welcome to "Bob's Bar" -> "Welcome to "Bob's bar"
"""
quote_with = '"'
if '"' in value:
if "'" in value:
# The string contains both single and double
# quotes. Turn the double quotes into
# entities. We quote the double quotes rather than
# the single quotes because the entity name is
# """ whether this is HTML or XML. If we
# quoted the single quotes, we'd have to decide
# between ' and &squot;.
replace_with = """
value = value.replace('"', replace_with)
else:
# There are double quotes but no single quotes.
# We can use single quotes to quote the attribute.
quote_with = "'"
return quote_with + value + quote_with
@classmethod
def substitute_xml(cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign
will become <, the greater-than sign will become >,
and any ampersands will become &. If you want ampersands
that appear to be part of an entity definition to be left
alone, use substitute_xml_containing_entities() instead.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets and ampersands.
value = cls.AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_xml_containing_entities(
cls, value, make_quoted_attribute=False):
"""Substitute XML entities for special XML characters.
:param value: A string to be substituted. The less-than sign will
become <, the greater-than sign will become >, and any
ampersands that are not part of an entity defition will
become &.
:param make_quoted_attribute: If True, then the string will be
quoted, as befits an attribute value.
"""
# Escape angle brackets, and ampersands that aren't part of
# entities.
value = cls.BARE_AMPERSAND_OR_BRACKET.sub(
cls._substitute_xml_entity, value)
if make_quoted_attribute:
value = cls.quoted_attribute_value(value)
return value
@classmethod
def substitute_html(cls, s):
"""Replace certain Unicode characters with named HTML entities.
This differs from data.encode(encoding, 'xmlcharrefreplace')
in that the goal is to make the result more readable (to those
with ASCII displays) rather than to recover from
errors. There's absolutely nothing wrong with a UTF-8 string
containg a LATIN SMALL LETTER E WITH ACUTE, but replacing that
character with "é" will make it more readable to some
people.
"""
return cls.CHARACTER_TO_HTML_ENTITY_RE.sub(
cls._substitute_html_entity, s)
class EncodingDetector:
"""Suggests a number of possible encodings for a bytestring.
Order of precedence:
1. Encodings you specifically tell EncodingDetector to try first
(the override_encodings argument to the constructor).
2. An encoding declared within the bytestring itself, either in an
XML declaration (if the bytestring is to be interpreted as an XML
document), or in a <meta> tag (if the bytestring is to be
interpreted as an HTML document.)
3. An encoding detected through textual analysis by chardet,
cchardet, or a similar external library.
4. UTF-8.
5. Windows-1252.
"""
def __init__(self, markup, override_encodings=None, is_html=False):
self.override_encodings = override_encodings or []
self.chardet_encoding = None
self.is_html = is_html
self.declared_encoding = None
# First order of business: strip a byte-order mark.
self.markup, self.sniffed_encoding = self.strip_byte_order_mark(markup)
def _usable(self, encoding, tried):
if encoding is not None:
encoding = encoding.lower()
if encoding not in tried:
tried.add(encoding)
return True
return False
@property
def encodings(self):
"""Yield a number of encodings that might work for this markup."""
tried = set()
for e in self.override_encodings:
if self._usable(e, tried):
yield e
# Did the document originally start with a byte-order mark
# that indicated its encoding?
if self._usable(self.sniffed_encoding, tried):
yield self.sniffed_encoding
# Look within the document for an XML or HTML encoding
# declaration.
if self.declared_encoding is None:
self.declared_encoding = self.find_declared_encoding(
self.markup, self.is_html)
if self._usable(self.declared_encoding, tried):
yield self.declared_encoding
# Use third-party character set detection to guess at the
# encoding.
if self.chardet_encoding is None:
self.chardet_encoding = chardet_dammit(self.markup)
if self._usable(self.chardet_encoding, tried):
yield self.chardet_encoding
# As a last-ditch effort, try utf-8 and windows-1252.
for e in ('utf-8', 'windows-1252'):
if self._usable(e, tried):
yield e
@classmethod
def strip_byte_order_mark(cls, data):
"""If a byte-order mark is present, strip it and return the encoding it implies."""
encoding = None
if (len(data) >= 4) and (data[:2] == b'\xfe\xff') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16be'
data = data[2:]
elif (len(data) >= 4) and (data[:2] == b'\xff\xfe') \
and (data[2:4] != '\x00\x00'):
encoding = 'utf-16le'
data = data[2:]
elif data[:3] == b'\xef\xbb\xbf':
encoding = 'utf-8'
data = data[3:]
elif data[:4] == b'\x00\x00\xfe\xff':
encoding = 'utf-32be'
data = data[4:]
elif data[:4] == b'\xff\xfe\x00\x00':
encoding = 'utf-32le'
data = data[4:]
return data, encoding
@classmethod
def find_declared_encoding(cls, markup, is_html=False, search_entire_document=False):
"""Given a document, tries to find its declared encoding.
An XML encoding is declared at the beginning of the document.
An HTML encoding is declared in a <meta> tag, hopefully near the
beginning of the document.
"""
if search_entire_document:
xml_endpos = html_endpos = len(markup)
else:
xml_endpos = 1024
html_endpos = max(2048, int(len(markup) * 0.05))
declared_encoding = None
declared_encoding_match = xml_encoding_re.search(markup, endpos=xml_endpos)
if not declared_encoding_match and is_html:
declared_encoding_match = html_meta_re.search(markup, endpos=html_endpos)
if declared_encoding_match is not None:
declared_encoding = declared_encoding_match.groups()[0].decode(
'ascii')
if declared_encoding:
return declared_encoding.lower()
return None
class UnicodeDammit:
"""A class for detecting the encoding of a *ML document and
converting it to a Unicode string. If the source encoding is
windows-1252, can replace MS smart quotes with their HTML or XML
equivalents."""
# This dictionary maps commonly seen values for "charset" in HTML
# meta tags to the corresponding Python codec names. It only covers
# values that aren't in Python's aliases and can't be determined
# by the heuristics in find_codec.
CHARSET_ALIASES = {"macintosh": "mac-roman",
"x-sjis": "shift-jis"}
ENCODINGS_WITH_SMART_QUOTES = [
"windows-1252",
"iso-8859-1",
"iso-8859-2",
]
def __init__(self, markup, override_encodings=[],
smart_quotes_to=None, is_html=False):
self.smart_quotes_to = smart_quotes_to
self.tried_encodings = []
self.contains_replacement_characters = False
self.is_html = is_html
self.detector = EncodingDetector(markup, override_encodings, is_html)
# Short-circuit if the data is in Unicode to begin with.
if isinstance(markup, unicode) or markup == '':
self.markup = markup
self.unicode_markup = unicode(markup)
self.original_encoding = None
return
# The encoding detector may have stripped a byte-order mark.
# Use the stripped markup from this point on.
self.markup = self.detector.markup
u = None
for encoding in self.detector.encodings:
markup = self.detector.markup
u = self._convert_from(encoding)
if u is not None:
break
if not u:
# None of the encodings worked. As an absolute last resort,
# try them again with character replacement.
for encoding in self.detector.encodings:
if encoding != "ascii":
u = self._convert_from(encoding, "replace")
if u is not None:
logging.warning(
"Some characters could not be decoded, and were "
"replaced with REPLACEMENT CHARACTER.")
self.contains_replacement_characters = True
break
# If none of that worked, we could at this point force it to
# ASCII, but that would destroy so much data that I think
# giving up is better.
self.unicode_markup = u
if not u:
self.original_encoding = None
def _sub_ms_char(self, match):
"""Changes a MS smart quote character to an XML or HTML
entity, or an ASCII character."""
orig = match.group(1)
if self.smart_quotes_to == 'ascii':
sub = self.MS_CHARS_TO_ASCII.get(orig).encode()
else:
sub = self.MS_CHARS.get(orig)
if type(sub) == tuple:
if self.smart_quotes_to == 'xml':
sub = '&#x'.encode() + sub[1].encode() + ';'.encode()
else:
sub = '&'.encode() + sub[0].encode() + ';'.encode()
else:
sub = sub.encode()
return sub
def _convert_from(self, proposed, errors="strict"):
proposed = self.find_codec(proposed)
if not proposed or (proposed, errors) in self.tried_encodings:
return None
self.tried_encodings.append((proposed, errors))
markup = self.markup
# Convert smart quotes to HTML if coming from an encoding
# that might have them.
if (self.smart_quotes_to is not None
and proposed in self.ENCODINGS_WITH_SMART_QUOTES):
smart_quotes_re = b"([\x80-\x9f])"
smart_quotes_compiled = re.compile(smart_quotes_re)
markup = smart_quotes_compiled.sub(self._sub_ms_char, markup)
try:
#print "Trying to convert document to %s (errors=%s)" % (
# proposed, errors)
u = self._to_unicode(markup, proposed, errors)
self.markup = u
self.original_encoding = proposed
except Exception as e:
#print "That didn't work!"
#print e
return None
#print "Correct encoding: %s" % proposed
return self.markup
def _to_unicode(self, data, encoding, errors="strict"):
'''Given a string and its encoding, decodes the string into Unicode.
%encoding is a string recognized by encodings.aliases'''
return unicode(data, encoding, errors)
@property
def declared_html_encoding(self):
if not self.is_html:
return None
return self.detector.declared_encoding
def find_codec(self, charset):
value = (self._codec(self.CHARSET_ALIASES.get(charset, charset))
or (charset and self._codec(charset.replace("-", "")))
or (charset and self._codec(charset.replace("-", "_")))
or (charset and charset.lower())
or charset
)
if value:
return value.lower()
return None
def _codec(self, charset):
if not charset:
return charset
codec = None
try:
codecs.lookup(charset)
codec = charset
except (LookupError, ValueError):
pass
return codec
# A partial mapping of ISO-Latin-1 to HTML entities/XML numeric entities.
MS_CHARS = {b'\x80': ('euro', '20AC'),
b'\x81': ' ',
b'\x82': ('sbquo', '201A'),
b'\x83': ('fnof', '192'),
b'\x84': ('bdquo', '201E'),
b'\x85': ('hellip', '2026'),
b'\x86': ('dagger', '2020'),
b'\x87': ('Dagger', '2021'),
b'\x88': ('circ', '2C6'),
b'\x89': ('permil', '2030'),
b'\x8A': ('Scaron', '160'),
b'\x8B': ('lsaquo', '2039'),
b'\x8C': ('OElig', '152'),
b'\x8D': '?',
b'\x8E': ('#x17D', '17D'),
b'\x8F': '?',
b'\x90': '?',
b'\x91': ('lsquo', '2018'),
b'\x92': ('rsquo', '2019'),
b'\x93': ('ldquo', '201C'),
b'\x94': ('rdquo', '201D'),
b'\x95': ('bull', '2022'),
b'\x96': ('ndash', '2013'),
b'\x97': ('mdash', '2014'),
b'\x98': ('tilde', '2DC'),
b'\x99': ('trade', '2122'),
b'\x9a': ('scaron', '161'),
b'\x9b': ('rsaquo', '203A'),
b'\x9c': ('oelig', '153'),
b'\x9d': '?',
b'\x9e': ('#x17E', '17E'),
b'\x9f': ('Yuml', ''),}
# A parochial partial mapping of ISO-Latin-1 to ASCII. Contains
# horrors like stripping diacritical marks to turn á into a, but also
# contains non-horrors like turning “ into ".
MS_CHARS_TO_ASCII = {
b'\x80' : 'EUR',
b'\x81' : ' ',
b'\x82' : ',',
b'\x83' : 'f',
b'\x84' : ',,',
b'\x85' : '...',
b'\x86' : '+',
b'\x87' : '++',
b'\x88' : '^',
b'\x89' : '%',
b'\x8a' : 'S',
b'\x8b' : '<',
b'\x8c' : 'OE',
b'\x8d' : '?',
b'\x8e' : 'Z',
b'\x8f' : '?',
b'\x90' : '?',
b'\x91' : "'",
b'\x92' : "'",
b'\x93' : '"',
b'\x94' : '"',
b'\x95' : '*',
b'\x96' : '-',
b'\x97' : '--',
b'\x98' : '~',
b'\x99' : '(TM)',
b'\x9a' : 's',
b'\x9b' : '>',
b'\x9c' : 'oe',
b'\x9d' : '?',
b'\x9e' : 'z',
b'\x9f' : 'Y',
b'\xa0' : ' ',
b'\xa1' : '!',
b'\xa2' : 'c',
b'\xa3' : 'GBP',
b'\xa4' : '$', #This approximation is especially parochial--this is the
#generic currency symbol.
b'\xa5' : 'YEN',
b'\xa6' : '|',
b'\xa7' : 'S',
b'\xa8' : '..',
b'\xa9' : '',
b'\xaa' : '(th)',
b'\xab' : '<<',
b'\xac' : '!',
b'\xad' : ' ',
b'\xae' : '(R)',
b'\xaf' : '-',
b'\xb0' : 'o',
b'\xb1' : '+-',
b'\xb2' : '2',
b'\xb3' : '3',
b'\xb4' : ("'", 'acute'),
b'\xb5' : 'u',
b'\xb6' : 'P',
b'\xb7' : '*',
b'\xb8' : ',',
b'\xb9' : '1',
b'\xba' : '(th)',
b'\xbb' : '>>',
b'\xbc' : '1/4',
b'\xbd' : '1/2',
b'\xbe' : '3/4',
b'\xbf' : '?',
b'\xc0' : 'A',
b'\xc1' : 'A',
b'\xc2' : 'A',
b'\xc3' : 'A',
b'\xc4' : 'A',
b'\xc5' : 'A',
b'\xc6' : 'AE',
b'\xc7' : 'C',
b'\xc8' : 'E',
b'\xc9' : 'E',
b'\xca' : 'E',
b'\xcb' : 'E',
b'\xcc' : 'I',
b'\xcd' : 'I',
b'\xce' : 'I',
b'\xcf' : 'I',
b'\xd0' : 'D',
b'\xd1' : 'N',
b'\xd2' : 'O',
b'\xd3' : 'O',
b'\xd4' : 'O',
b'\xd5' : 'O',
b'\xd6' : 'O',
b'\xd7' : '*',
b'\xd8' : 'O',
b'\xd9' : 'U',
b'\xda' : 'U',
b'\xdb' : 'U',
b'\xdc' : 'U',
b'\xdd' : 'Y',
b'\xde' : 'b',
b'\xdf' : 'B',
b'\xe0' : 'a',
b'\xe1' : 'a',
b'\xe2' : 'a',
b'\xe3' : 'a',
b'\xe4' : 'a',
b'\xe5' : 'a',
b'\xe6' : 'ae',
b'\xe7' : 'c',
b'\xe8' : 'e',
b'\xe9' : 'e',
b'\xea' : 'e',
b'\xeb' : 'e',
b'\xec' : 'i',
b'\xed' : 'i',
b'\xee' : 'i',
b'\xef' : 'i',
b'\xf0' : 'o',
b'\xf1' : 'n',
b'\xf2' : 'o',
b'\xf3' : 'o',
b'\xf4' : 'o',
b'\xf5' : 'o',
b'\xf6' : 'o',
b'\xf7' : '/',
b'\xf8' : 'o',
b'\xf9' : 'u',
b'\xfa' : 'u',
b'\xfb' : 'u',
b'\xfc' : 'u',
b'\xfd' : 'y',
b'\xfe' : 'b',
b'\xff' : 'y',
}
# A map used when removing rogue Windows-1252/ISO-8859-1
# characters in otherwise UTF-8 documents.
#
# Note that \x81, \x8d, \x8f, \x90, and \x9d are undefined in
# Windows-1252.
WINDOWS_1252_TO_UTF8 = {
0x80 : b'\xe2\x82\xac', # €
0x82 : b'\xe2\x80\x9a', # ‚
0x83 : b'\xc6\x92', # ƒ
0x84 : b'\xe2\x80\x9e', # „
0x85 : b'\xe2\x80\xa6', # …
0x86 : b'\xe2\x80\xa0', # †
0x87 : b'\xe2\x80\xa1', # ‡
0x88 : b'\xcb\x86', # ˆ
0x89 : b'\xe2\x80\xb0', # ‰
0x8a : b'\xc5\xa0', # Š
0x8b : b'\xe2\x80\xb9', # ‹
0x8c : b'\xc5\x92', # Œ
0x8e : b'\xc5\xbd', # Ž
0x91 : b'\xe2\x80\x98', # ‘
0x92 : b'\xe2\x80\x99', # ’
0x93 : b'\xe2\x80\x9c', # “
0x94 : b'\xe2\x80\x9d', # ”
0x95 : b'\xe2\x80\xa2', # •
0x96 : b'\xe2\x80\x93', # –
0x97 : b'\xe2\x80\x94', # —
0x98 : b'\xcb\x9c', # ˜
0x99 : b'\xe2\x84\xa2', # ™
0x9a : b'\xc5\xa1', # š
0x9b : b'\xe2\x80\xba', # ›
0x9c : b'\xc5\x93', # œ
0x9e : b'\xc5\xbe', # ž
0x9f : b'\xc5\xb8', # Ÿ
0xa0 : b'\xc2\xa0', #
0xa1 : b'\xc2\xa1', # ¡
0xa2 : b'\xc2\xa2', # ¢
0xa3 : b'\xc2\xa3', # £
0xa4 : b'\xc2\xa4', # ¤
0xa5 : b'\xc2\xa5', # ¥
0xa6 : b'\xc2\xa6', # ¦
0xa7 : b'\xc2\xa7', # §
0xa8 : b'\xc2\xa8', # ¨
0xa9 : b'\xc2\xa9', # ©
0xaa : b'\xc2\xaa', # ª
0xab : b'\xc2\xab', # «
0xac : b'\xc2\xac', # ¬
0xad : b'\xc2\xad', #
0xae : b'\xc2\xae', # ®
0xaf : b'\xc2\xaf', # ¯
0xb0 : b'\xc2\xb0', # °
0xb1 : b'\xc2\xb1', # ±
0xb2 : b'\xc2\xb2', # ²
0xb3 : b'\xc2\xb3', # ³
0xb4 : b'\xc2\xb4', # ´
0xb5 : b'\xc2\xb5', # µ
0xb6 : b'\xc2\xb6', # ¶
0xb7 : b'\xc2\xb7', # ·
0xb8 : b'\xc2\xb8', # ¸
0xb9 : b'\xc2\xb9', # ¹
0xba : b'\xc2\xba', # º
0xbb : b'\xc2\xbb', # »
0xbc : b'\xc2\xbc', # ¼
0xbd : b'\xc2\xbd', # ½
0xbe : b'\xc2\xbe', # ¾
0xbf : b'\xc2\xbf', # ¿
0xc0 : b'\xc3\x80', # À
0xc1 : b'\xc3\x81', # Á
0xc2 : b'\xc3\x82', # Â
0xc3 : b'\xc3\x83', # Ã
0xc4 : b'\xc3\x84', # Ä
0xc5 : b'\xc3\x85', # Å
0xc6 : b'\xc3\x86', # Æ
0xc7 : b'\xc3\x87', # Ç
0xc8 : b'\xc3\x88', # È
0xc9 : b'\xc3\x89', # É
0xca : b'\xc3\x8a', # Ê
0xcb : b'\xc3\x8b', # Ë
0xcc : b'\xc3\x8c', # Ì
0xcd : b'\xc3\x8d', # Í
0xce : b'\xc3\x8e', # Î
0xcf : b'\xc3\x8f', # Ï
0xd0 : b'\xc3\x90', # Ð
0xd1 : b'\xc3\x91', # Ñ
0xd2 : b'\xc3\x92', # Ò
0xd3 : b'\xc3\x93', # Ó
0xd4 : b'\xc3\x94', # Ô
0xd5 : b'\xc3\x95', # Õ
0xd6 : b'\xc3\x96', # Ö
0xd7 : b'\xc3\x97', # ×
0xd8 : b'\xc3\x98', # Ø
0xd9 : b'\xc3\x99', # Ù
0xda : b'\xc3\x9a', # Ú
0xdb : b'\xc3\x9b', # Û
0xdc : b'\xc3\x9c', # Ü
0xdd : b'\xc3\x9d', # Ý
0xde : b'\xc3\x9e', # Þ
0xdf : b'\xc3\x9f', # ß
0xe0 : b'\xc3\xa0', # à
0xe1 : b'\xa1', # á
0xe2 : b'\xc3\xa2', # â
0xe3 : b'\xc3\xa3', # ã
0xe4 : b'\xc3\xa4', # ä
0xe5 : b'\xc3\xa5', # å
0xe6 : b'\xc3\xa6', # æ
0xe7 : b'\xc3\xa7', # ç
0xe8 : b'\xc3\xa8', # è
0xe9 : b'\xc3\xa9', # é
0xea : b'\xc3\xaa', # ê
0xeb : b'\xc3\xab', # ë
0xec : b'\xc3\xac', # ì
0xed : b'\xc3\xad', # í
0xee : b'\xc3\xae', # î
0xef : b'\xc3\xaf', # ï
0xf0 : b'\xc3\xb0', # ð
0xf1 : b'\xc3\xb1', # ñ
0xf2 : b'\xc3\xb2', # ò
0xf3 : b'\xc3\xb3', # ó
0xf4 : b'\xc3\xb4', # ô
0xf5 : b'\xc3\xb5', # õ
0xf6 : b'\xc3\xb6', # ö
0xf7 : b'\xc3\xb7', # ÷
0xf8 : b'\xc3\xb8', # ø
0xf9 : b'\xc3\xb9', # ù
0xfa : b'\xc3\xba', # ú
0xfb : b'\xc3\xbb', # û
0xfc : b'\xc3\xbc', # ü
0xfd : b'\xc3\xbd', # ý
0xfe : b'\xc3\xbe', # þ
}
MULTIBYTE_MARKERS_AND_SIZES = [
(0xc2, 0xdf, 2), # 2-byte characters start with a byte C2-DF
(0xe0, 0xef, 3), # 3-byte characters start with E0-EF
(0xf0, 0xf4, 4), # 4-byte characters start with F0-F4
]
FIRST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[0][0]
LAST_MULTIBYTE_MARKER = MULTIBYTE_MARKERS_AND_SIZES[-1][1]
@classmethod
def detwingle(cls, in_bytes, main_encoding="utf8",
embedded_encoding="windows-1252"):
"""Fix characters from one encoding embedded in some other encoding.
Currently the only situation supported is Windows-1252 (or its
subset ISO-8859-1), embedded in UTF-8.
The input must be a bytestring. If you've already converted
the document to Unicode, you're too late.
The output is a bytestring in which `embedded_encoding`
characters have been converted to their `main_encoding`
equivalents.
"""
if embedded_encoding.replace('_', '-').lower() not in (
'windows-1252', 'windows_1252'):
raise NotImplementedError(
"Windows-1252 and ISO-8859-1 are the only currently supported "
"embedded encodings.")
if main_encoding.lower() not in ('utf8', 'utf-8'):
raise NotImplementedError(
"UTF-8 is the only currently supported main encoding.")
byte_chunks = []
chunk_start = 0
pos = 0
while pos < len(in_bytes):
byte = in_bytes[pos]
if not isinstance(byte, int):
# Python 2.x
byte = ord(byte)
if (byte >= cls.FIRST_MULTIBYTE_MARKER
and byte <= cls.LAST_MULTIBYTE_MARKER):
# This is the start of a UTF-8 multibyte character. Skip
# to the end.
for start, end, size in cls.MULTIBYTE_MARKERS_AND_SIZES:
if byte >= start and byte <= end:
pos += size
break
elif byte >= 0x80 and byte in cls.WINDOWS_1252_TO_UTF8:
# We found a Windows-1252 character!
# Save the string up to this point as a chunk.
byte_chunks.append(in_bytes[chunk_start:pos])
# Now translate the Windows-1252 character into UTF-8
# and add it as another, one-byte chunk.
byte_chunks.append(cls.WINDOWS_1252_TO_UTF8[byte])
pos += 1
chunk_start = pos
else:
# Go on to the next character.
pos += 1
if chunk_start == 0:
# The string is unchanged.
return in_bytes
else:
# Store the final chunk.
byte_chunks.append(in_bytes[chunk_start:])
return b''.join(byte_chunks)
| 29,302
|
Python
|
.py
| 746
| 28.896783
| 91
| 0.50821
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,309
|
_htmlparser.py
|
CouchPotato_CouchPotatoServer/libs/bs4/builder/_htmlparser.py
|
"""Use the HTMLParser library to parse HTML files that aren't too bad."""
__all__ = [
'HTMLParserTreeBuilder',
]
from HTMLParser import (
HTMLParser,
HTMLParseError,
)
import sys
import warnings
# Starting in Python 3.2, the HTMLParser constructor takes a 'strict'
# argument, which we'd like to set to False. Unfortunately,
# http://bugs.python.org/issue13273 makes strict=True a better bet
# before Python 3.2.3.
#
# At the end of this file, we monkeypatch HTMLParser so that
# strict=True works well on Python 3.2.2.
major, minor, release = sys.version_info[:3]
CONSTRUCTOR_TAKES_STRICT = (
major > 3
or (major == 3 and minor > 2)
or (major == 3 and minor == 2 and release >= 3))
from bs4.element import (
CData,
Comment,
Declaration,
Doctype,
ProcessingInstruction,
)
from bs4.dammit import EntitySubstitution, UnicodeDammit
from bs4.builder import (
HTML,
HTMLTreeBuilder,
STRICT,
)
HTMLPARSER = 'html.parser'
class BeautifulSoupHTMLParser(HTMLParser):
def handle_starttag(self, name, attrs):
# XXX namespace
attr_dict = {}
for key, value in attrs:
# Change None attribute values to the empty string
# for consistency with the other tree builders.
if value is None:
value = ''
attr_dict[key] = value
attrvalue = '""'
self.soup.handle_starttag(name, None, None, attr_dict)
def handle_endtag(self, name):
self.soup.handle_endtag(name)
def handle_data(self, data):
self.soup.handle_data(data)
def handle_charref(self, name):
# XXX workaround for a bug in HTMLParser. Remove this once
# it's fixed.
if name.startswith('x'):
real_name = int(name.lstrip('x'), 16)
elif name.startswith('X'):
real_name = int(name.lstrip('X'), 16)
else:
real_name = int(name)
try:
data = unichr(real_name)
except (ValueError, OverflowError), e:
data = u"\N{REPLACEMENT CHARACTER}"
self.handle_data(data)
def handle_entityref(self, name):
character = EntitySubstitution.HTML_ENTITY_TO_CHARACTER.get(name)
if character is not None:
data = character
else:
data = "&%s;" % name
self.handle_data(data)
def handle_comment(self, data):
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(Comment)
def handle_decl(self, data):
self.soup.endData()
if data.startswith("DOCTYPE "):
data = data[len("DOCTYPE "):]
elif data == 'DOCTYPE':
# i.e. "<!DOCTYPE>"
data = ''
self.soup.handle_data(data)
self.soup.endData(Doctype)
def unknown_decl(self, data):
if data.upper().startswith('CDATA['):
cls = CData
data = data[len('CDATA['):]
else:
cls = Declaration
self.soup.endData()
self.soup.handle_data(data)
self.soup.endData(cls)
def handle_pi(self, data):
self.soup.endData()
if data.endswith("?") and data.lower().startswith("xml"):
# "An XHTML processing instruction using the trailing '?'
# will cause the '?' to be included in data." - HTMLParser
# docs.
#
# Strip the question mark so we don't end up with two
# question marks.
data = data[:-1]
self.soup.handle_data(data)
self.soup.endData(ProcessingInstruction)
class HTMLParserTreeBuilder(HTMLTreeBuilder):
is_xml = False
features = [HTML, STRICT, HTMLPARSER]
def __init__(self, *args, **kwargs):
if CONSTRUCTOR_TAKES_STRICT:
kwargs['strict'] = False
self.parser_args = (args, kwargs)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:return: A 4-tuple (markup, original encoding, encoding
declared within markup, whether any characters had to be
replaced with REPLACEMENT CHARACTER).
"""
if isinstance(markup, unicode):
yield (markup, None, None, False)
return
try_encodings = [user_specified_encoding, document_declared_encoding]
dammit = UnicodeDammit(markup, try_encodings, is_html=True)
yield (dammit.markup, dammit.original_encoding,
dammit.declared_html_encoding,
dammit.contains_replacement_characters)
def feed(self, markup):
args, kwargs = self.parser_args
parser = BeautifulSoupHTMLParser(*args, **kwargs)
parser.soup = self.soup
try:
parser.feed(markup)
except HTMLParseError, e:
warnings.warn(RuntimeWarning(
"Python's built-in HTMLParser cannot parse the given document. This is not a bug in Beautiful Soup. The best solution is to install an external parser (lxml or html5lib), and use Beautiful Soup with that parser. See http://www.crummy.com/software/BeautifulSoup/bs4/doc/#installing-a-parser for help."))
raise e
# Patch 3.2 versions of HTMLParser earlier than 3.2.3 to use some
# 3.2.3 code. This ensures they don't treat markup like <p></p> as a
# string.
#
# XXX This code can be removed once most Python 3 users are on 3.2.3.
if major == 3 and minor == 2 and not CONSTRUCTOR_TAKES_STRICT:
import re
attrfind_tolerant = re.compile(
r'\s*((?<=[\'"\s])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?')
HTMLParserTreeBuilder.attrfind_tolerant = attrfind_tolerant
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
BeautifulSoupHTMLParser.locatestarttagend = locatestarttagend
from html.parser import tagfind, attrfind
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = rawdata[i+1:k].lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
BeautifulSoupHTMLParser.parse_starttag = parse_starttag
BeautifulSoupHTMLParser.set_cdata_mode = set_cdata_mode
CONSTRUCTOR_TAKES_STRICT = True
| 8,839
|
Python
|
.py
| 224
| 30.459821
| 318
| 0.57942
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,310
|
_lxml.py
|
CouchPotato_CouchPotatoServer/libs/bs4/builder/_lxml.py
|
__all__ = [
'LXMLTreeBuilderForXML',
'LXMLTreeBuilder',
]
from io import BytesIO
from StringIO import StringIO
import collections
from lxml import etree
from bs4.element import Comment, Doctype, NamespacedAttribute
from bs4.builder import (
FAST,
HTML,
HTMLTreeBuilder,
PERMISSIVE,
ParserRejectedMarkup,
TreeBuilder,
XML)
from bs4.dammit import EncodingDetector
LXML = 'lxml'
class LXMLTreeBuilderForXML(TreeBuilder):
DEFAULT_PARSER_CLASS = etree.XMLParser
is_xml = True
# Well, it's permissive by XML parser standards.
features = [LXML, XML, FAST, PERMISSIVE]
CHUNK_SIZE = 512
# This namespace mapping is specified in the XML Namespace
# standard.
DEFAULT_NSMAPS = {'http://www.w3.org/XML/1998/namespace' : "xml"}
def default_parser(self, encoding):
# This can either return a parser object or a class, which
# will be instantiated with default arguments.
if self._default_parser is not None:
return self._default_parser
return etree.XMLParser(
target=self, strip_cdata=False, recover=True, encoding=encoding)
def parser_for(self, encoding):
# Use the default parser.
parser = self.default_parser(encoding)
if isinstance(parser, collections.Callable):
# Instantiate the parser with default arguments
parser = parser(target=self, strip_cdata=False, encoding=encoding)
return parser
def __init__(self, parser=None, empty_element_tags=None):
# TODO: Issue a warning if parser is present but not a
# callable, since that means there's no way to create new
# parsers for different encodings.
self._default_parser = parser
if empty_element_tags is not None:
self.empty_element_tags = set(empty_element_tags)
self.soup = None
self.nsmaps = [self.DEFAULT_NSMAPS]
def _getNsTag(self, tag):
# Split the namespace URL out of a fully-qualified lxml tag
# name. Copied from lxml's src/lxml/sax.py.
if tag[0] == '{':
return tuple(tag[1:].split('}', 1))
else:
return (None, tag)
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
"""
:yield: A series of 4-tuples.
(markup, encoding, declared encoding,
has undergone character replacement)
Each 4-tuple represents a strategy for parsing the document.
"""
if isinstance(markup, unicode):
# We were given Unicode. Maybe lxml can parse Unicode on
# this system?
yield markup, None, document_declared_encoding, False
if isinstance(markup, unicode):
# No, apparently not. Convert the Unicode to UTF-8 and
# tell lxml to parse it as UTF-8.
yield (markup.encode("utf8"), "utf8",
document_declared_encoding, False)
# Instead of using UnicodeDammit to convert the bytestring to
# Unicode using different encodings, use EncodingDetector to
# iterate over the encodings, and tell lxml to try to parse
# the document as each one in turn.
is_html = not self.is_xml
try_encodings = [user_specified_encoding, document_declared_encoding]
detector = EncodingDetector(markup, try_encodings, is_html)
for encoding in detector.encodings:
yield (detector.markup, encoding, document_declared_encoding, False)
def feed(self, markup):
if isinstance(markup, bytes):
markup = BytesIO(markup)
elif isinstance(markup, unicode):
markup = StringIO(markup)
# Call feed() at least once, even if the markup is empty,
# or the parser won't be initialized.
data = markup.read(self.CHUNK_SIZE)
try:
self.parser = self.parser_for(self.soup.original_encoding)
self.parser.feed(data)
while len(data) != 0:
# Now call feed() on the rest of the data, chunk by chunk.
data = markup.read(self.CHUNK_SIZE)
if len(data) != 0:
self.parser.feed(data)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def close(self):
self.nsmaps = [self.DEFAULT_NSMAPS]
def start(self, name, attrs, nsmap={}):
# Make sure attrs is a mutable dict--lxml may send an immutable dictproxy.
attrs = dict(attrs)
nsprefix = None
# Invert each namespace map as it comes in.
if len(self.nsmaps) > 1:
# There are no new namespaces for this tag, but
# non-default namespaces are in play, so we need a
# separate tag stack to know when they end.
self.nsmaps.append(None)
elif len(nsmap) > 0:
# A new namespace mapping has come into play.
inverted_nsmap = dict((value, key) for key, value in nsmap.items())
self.nsmaps.append(inverted_nsmap)
# Also treat the namespace mapping as a set of attributes on the
# tag, so we can recreate it later.
attrs = attrs.copy()
for prefix, namespace in nsmap.items():
attribute = NamespacedAttribute(
"xmlns", prefix, "http://www.w3.org/2000/xmlns/")
attrs[attribute] = namespace
# Namespaces are in play. Find any attributes that came in
# from lxml with namespaces attached to their names, and
# turn then into NamespacedAttribute objects.
new_attrs = {}
for attr, value in attrs.items():
namespace, attr = self._getNsTag(attr)
if namespace is None:
new_attrs[attr] = value
else:
nsprefix = self._prefix_for_namespace(namespace)
attr = NamespacedAttribute(nsprefix, attr, namespace)
new_attrs[attr] = value
attrs = new_attrs
namespace, name = self._getNsTag(name)
nsprefix = self._prefix_for_namespace(namespace)
self.soup.handle_starttag(name, namespace, nsprefix, attrs)
def _prefix_for_namespace(self, namespace):
"""Find the currently active prefix for the given namespace."""
if namespace is None:
return None
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
return inverted_nsmap[namespace]
return None
def end(self, name):
self.soup.endData()
completed_tag = self.soup.tagStack[-1]
namespace, name = self._getNsTag(name)
nsprefix = None
if namespace is not None:
for inverted_nsmap in reversed(self.nsmaps):
if inverted_nsmap is not None and namespace in inverted_nsmap:
nsprefix = inverted_nsmap[namespace]
break
self.soup.handle_endtag(name, nsprefix)
if len(self.nsmaps) > 1:
# This tag, or one of its parents, introduced a namespace
# mapping, so pop it off the stack.
self.nsmaps.pop()
def pi(self, target, data):
pass
def data(self, content):
self.soup.handle_data(content)
def doctype(self, name, pubid, system):
self.soup.endData()
doctype = Doctype.for_name_and_ids(name, pubid, system)
self.soup.object_was_parsed(doctype)
def comment(self, content):
"Handle comments as Comment objects."
self.soup.endData()
self.soup.handle_data(content)
self.soup.endData(Comment)
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<?xml version="1.0" encoding="utf-8"?>\n%s' % fragment
class LXMLTreeBuilder(HTMLTreeBuilder, LXMLTreeBuilderForXML):
features = [LXML, HTML, FAST, PERMISSIVE]
is_xml = False
def default_parser(self, encoding):
return etree.HTMLParser
def feed(self, markup):
encoding = self.soup.original_encoding
try:
self.parser = self.parser_for(encoding)
self.parser.feed(markup)
self.parser.close()
except (UnicodeDecodeError, LookupError, etree.ParserError), e:
raise ParserRejectedMarkup(str(e))
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><body>%s</body></html>' % fragment
| 8,661
|
Python
|
.py
| 197
| 34.248731
| 82
| 0.628382
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,311
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/bs4/builder/__init__.py
|
from collections import defaultdict
import itertools
import sys
from bs4.element import (
CharsetMetaAttributeValue,
ContentMetaAttributeValue,
whitespace_re
)
__all__ = [
'HTMLTreeBuilder',
'SAXTreeBuilder',
'TreeBuilder',
'TreeBuilderRegistry',
]
# Some useful features for a TreeBuilder to have.
FAST = 'fast'
PERMISSIVE = 'permissive'
STRICT = 'strict'
XML = 'xml'
HTML = 'html'
HTML_5 = 'html5'
class TreeBuilderRegistry(object):
def __init__(self):
self.builders_for_feature = defaultdict(list)
self.builders = []
def register(self, treebuilder_class):
"""Register a treebuilder based on its advertised features."""
for feature in treebuilder_class.features:
self.builders_for_feature[feature].insert(0, treebuilder_class)
self.builders.insert(0, treebuilder_class)
def lookup(self, *features):
if len(self.builders) == 0:
# There are no builders at all.
return None
if len(features) == 0:
# They didn't ask for any features. Give them the most
# recently registered builder.
return self.builders[0]
# Go down the list of features in order, and eliminate any builders
# that don't match every feature.
features = list(features)
features.reverse()
candidates = None
candidate_set = None
while len(features) > 0:
feature = features.pop()
we_have_the_feature = self.builders_for_feature.get(feature, [])
if len(we_have_the_feature) > 0:
if candidates is None:
candidates = we_have_the_feature
candidate_set = set(candidates)
else:
# Eliminate any candidates that don't have this feature.
candidate_set = candidate_set.intersection(
set(we_have_the_feature))
# The only valid candidates are the ones in candidate_set.
# Go through the original list of candidates and pick the first one
# that's in candidate_set.
if candidate_set is None:
return None
for candidate in candidates:
if candidate in candidate_set:
return candidate
return None
# The BeautifulSoup class will take feature lists from developers and use them
# to look up builders in this registry.
builder_registry = TreeBuilderRegistry()
class TreeBuilder(object):
"""Turn a document into a Beautiful Soup object tree."""
features = []
is_xml = False
preserve_whitespace_tags = set()
empty_element_tags = None # A tag will be considered an empty-element
# tag when and only when it has no contents.
# A value for these tag/attribute combinations is a space- or
# comma-separated list of CDATA, rather than a single CDATA.
cdata_list_attributes = {}
def __init__(self):
self.soup = None
def reset(self):
pass
def can_be_empty_element(self, tag_name):
"""Might a tag with this name be an empty-element tag?
The final markup may or may not actually present this tag as
self-closing.
For instance: an HTMLBuilder does not consider a <p> tag to be
an empty-element tag (it's not in
HTMLBuilder.empty_element_tags). This means an empty <p> tag
will be presented as "<p></p>", not "<p />".
The default implementation has no opinion about which tags are
empty-element tags, so a tag will be presented as an
empty-element tag if and only if it has no contents.
"<foo></foo>" will become "<foo />", and "<foo>bar</foo>" will
be left alone.
"""
if self.empty_element_tags is None:
return True
return tag_name in self.empty_element_tags
def feed(self, markup):
raise NotImplementedError()
def prepare_markup(self, markup, user_specified_encoding=None,
document_declared_encoding=None):
return markup, None, None, False
def test_fragment_to_document(self, fragment):
"""Wrap an HTML fragment to make it look like a document.
Different parsers do this differently. For instance, lxml
introduces an empty <head> tag, and html5lib
doesn't. Abstracting this away lets us write simple tests
which run HTML fragments through the parser and compare the
results against other HTML fragments.
This method should not be used outside of tests.
"""
return fragment
def set_up_substitutions(self, tag):
return False
def _replace_cdata_list_attribute_values(self, tag_name, attrs):
"""Replaces class="foo bar" with class=["foo", "bar"]
Modifies its input in place.
"""
if not attrs:
return attrs
if self.cdata_list_attributes:
universal = self.cdata_list_attributes.get('*', [])
tag_specific = self.cdata_list_attributes.get(
tag_name.lower(), None)
for attr in attrs.keys():
if attr in universal or (tag_specific and attr in tag_specific):
# We have a "class"-type attribute whose string
# value is a whitespace-separated list of
# values. Split it into a list.
value = attrs[attr]
if isinstance(value, basestring):
values = whitespace_re.split(value)
else:
# html5lib sometimes calls setAttributes twice
# for the same tag when rearranging the parse
# tree. On the second call the attribute value
# here is already a list. If this happens,
# leave the value alone rather than trying to
# split it again.
values = value
attrs[attr] = values
return attrs
class SAXTreeBuilder(TreeBuilder):
"""A Beautiful Soup treebuilder that listens for SAX events."""
def feed(self, markup):
raise NotImplementedError()
def close(self):
pass
def startElement(self, name, attrs):
attrs = dict((key[1], value) for key, value in list(attrs.items()))
#print "Start %s, %r" % (name, attrs)
self.soup.handle_starttag(name, attrs)
def endElement(self, name):
#print "End %s" % name
self.soup.handle_endtag(name)
def startElementNS(self, nsTuple, nodeName, attrs):
# Throw away (ns, nodeName) for now.
self.startElement(nodeName, attrs)
def endElementNS(self, nsTuple, nodeName):
# Throw away (ns, nodeName) for now.
self.endElement(nodeName)
#handler.endElementNS((ns, node.nodeName), node.nodeName)
def startPrefixMapping(self, prefix, nodeValue):
# Ignore the prefix for now.
pass
def endPrefixMapping(self, prefix):
# Ignore the prefix for now.
# handler.endPrefixMapping(prefix)
pass
def characters(self, content):
self.soup.handle_data(content)
def startDocument(self):
pass
def endDocument(self):
pass
class HTMLTreeBuilder(TreeBuilder):
"""This TreeBuilder knows facts about HTML.
Such as which tags are empty-element tags.
"""
preserve_whitespace_tags = set(['pre', 'textarea'])
empty_element_tags = set(['br' , 'hr', 'input', 'img', 'meta',
'spacer', 'link', 'frame', 'base'])
# The HTML standard defines these attributes as containing a
# space-separated list of values, not a single value. That is,
# class="foo bar" means that the 'class' attribute has two values,
# 'foo' and 'bar', not the single value 'foo bar'. When we
# encounter one of these attributes, we will parse its value into
# a list of values if possible. Upon output, the list will be
# converted back into a string.
cdata_list_attributes = {
"*" : ['class', 'accesskey', 'dropzone'],
"a" : ['rel', 'rev'],
"link" : ['rel', 'rev'],
"td" : ["headers"],
"th" : ["headers"],
"td" : ["headers"],
"form" : ["accept-charset"],
"object" : ["archive"],
# These are HTML5 specific, as are *.accesskey and *.dropzone above.
"area" : ["rel"],
"icon" : ["sizes"],
"iframe" : ["sandbox"],
"output" : ["for"],
}
def set_up_substitutions(self, tag):
# We are only interested in <meta> tags
if tag.name != 'meta':
return False
http_equiv = tag.get('http-equiv')
content = tag.get('content')
charset = tag.get('charset')
# We are interested in <meta> tags that say what encoding the
# document was originally in. This means HTML 5-style <meta>
# tags that provide the "charset" attribute. It also means
# HTML 4-style <meta> tags that provide the "content"
# attribute and have "http-equiv" set to "content-type".
#
# In both cases we will replace the value of the appropriate
# attribute with a standin object that can take on any
# encoding.
meta_encoding = None
if charset is not None:
# HTML 5 style:
# <meta charset="utf8">
meta_encoding = charset
tag['charset'] = CharsetMetaAttributeValue(charset)
elif (content is not None and http_equiv is not None
and http_equiv.lower() == 'content-type'):
# HTML 4 style:
# <meta http-equiv="content-type" content="text/html; charset=utf8">
tag['content'] = ContentMetaAttributeValue(content)
return (meta_encoding is not None)
def register_treebuilders_from(module):
"""Copy TreeBuilders from the given module into this module."""
# I'm fairly sure this is not the best way to do this.
this_module = sys.modules['bs4.builder']
for name in module.__all__:
obj = getattr(module, name)
if issubclass(obj, TreeBuilder):
setattr(this_module, name, obj)
this_module.__all__.append(name)
# Register the builder while we're at it.
this_module.builder_registry.register(obj)
class ParserRejectedMarkup(Exception):
pass
# Builders are registered in reverse order of priority, so that custom
# builder registrations will take precedence. In general, we want lxml
# to take precedence over html5lib, because it's faster. And we only
# want to use HTMLParser as a last result.
from . import _htmlparser
register_treebuilders_from(_htmlparser)
try:
from . import _html5lib
register_treebuilders_from(_html5lib)
except ImportError:
# They don't have html5lib installed.
pass
try:
from . import _lxml
register_treebuilders_from(_lxml)
except ImportError:
# They don't have lxml installed.
pass
| 11,151
|
Python
|
.py
| 264
| 33.109848
| 80
| 0.620222
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,312
|
_html5lib.py
|
CouchPotato_CouchPotatoServer/libs/bs4/builder/_html5lib.py
|
__all__ = [
'HTML5TreeBuilder',
]
import warnings
from bs4.builder import (
PERMISSIVE,
HTML,
HTML_5,
HTMLTreeBuilder,
)
from bs4.element import NamespacedAttribute
import html5lib
from html5lib.constants import namespaces
from bs4.element import (
Comment,
Doctype,
NavigableString,
Tag,
)
class HTML5TreeBuilder(HTMLTreeBuilder):
"""Use html5lib to build a tree."""
features = ['html5lib', PERMISSIVE, HTML_5, HTML]
def prepare_markup(self, markup, user_specified_encoding):
# Store the user-specified encoding for use later on.
self.user_specified_encoding = user_specified_encoding
yield (markup, None, None, False)
# These methods are defined by Beautiful Soup.
def feed(self, markup):
if self.soup.parse_only is not None:
warnings.warn("You provided a value for parse_only, but the html5lib tree builder doesn't support parse_only. The entire document will be parsed.")
parser = html5lib.HTMLParser(tree=self.create_treebuilder)
doc = parser.parse(markup, encoding=self.user_specified_encoding)
# Set the character encoding detected by the tokenizer.
if isinstance(markup, unicode):
# We need to special-case this because html5lib sets
# charEncoding to UTF-8 if it gets Unicode input.
doc.original_encoding = None
else:
doc.original_encoding = parser.tokenizer.stream.charEncoding[0]
def create_treebuilder(self, namespaceHTMLElements):
self.underlying_builder = TreeBuilderForHtml5lib(
self.soup, namespaceHTMLElements)
return self.underlying_builder
def test_fragment_to_document(self, fragment):
"""See `TreeBuilder`."""
return u'<html><head></head><body>%s</body></html>' % fragment
class TreeBuilderForHtml5lib(html5lib.treebuilders._base.TreeBuilder):
def __init__(self, soup, namespaceHTMLElements):
self.soup = soup
super(TreeBuilderForHtml5lib, self).__init__(namespaceHTMLElements)
def documentClass(self):
self.soup.reset()
return Element(self.soup, self.soup, None)
def insertDoctype(self, token):
name = token["name"]
publicId = token["publicId"]
systemId = token["systemId"]
doctype = Doctype.for_name_and_ids(name, publicId, systemId)
self.soup.object_was_parsed(doctype)
def elementClass(self, name, namespace):
tag = self.soup.new_tag(name, namespace)
return Element(tag, self.soup, namespace)
def commentClass(self, data):
return TextNode(Comment(data), self.soup)
def fragmentClass(self):
self.soup = BeautifulSoup("")
self.soup.name = "[document_fragment]"
return Element(self.soup, self.soup, None)
def appendChild(self, node):
# XXX This code is not covered by the BS4 tests.
self.soup.append(node.element)
def getDocument(self):
return self.soup
def getFragment(self):
return html5lib.treebuilders._base.TreeBuilder.getFragment(self).element
class AttrList(object):
def __init__(self, element):
self.element = element
self.attrs = dict(self.element.attrs)
def __iter__(self):
return list(self.attrs.items()).__iter__()
def __setitem__(self, name, value):
"set attr", name, value
self.element[name] = value
def items(self):
return list(self.attrs.items())
def keys(self):
return list(self.attrs.keys())
def __len__(self):
return len(self.attrs)
def __getitem__(self, name):
return self.attrs[name]
def __contains__(self, name):
return name in list(self.attrs.keys())
class Element(html5lib.treebuilders._base.Node):
def __init__(self, element, soup, namespace):
html5lib.treebuilders._base.Node.__init__(self, element.name)
self.element = element
self.soup = soup
self.namespace = namespace
def appendChild(self, node):
string_child = child = None
if isinstance(node, basestring):
# Some other piece of code decided to pass in a string
# instead of creating a TextElement object to contain the
# string.
string_child = child = node
elif isinstance(node, Tag):
# Some other piece of code decided to pass in a Tag
# instead of creating an Element object to contain the
# Tag.
child = node
elif node.element.__class__ == NavigableString:
string_child = child = node.element
else:
child = node.element
if not isinstance(child, basestring) and child.parent is not None:
node.element.extract()
if (string_child and self.element.contents
and self.element.contents[-1].__class__ == NavigableString):
# We are appending a string onto another string.
# TODO This has O(n^2) performance, for input like
# "a</a>a</a>a</a>..."
old_element = self.element.contents[-1]
new_element = self.soup.new_string(old_element + string_child)
old_element.replace_with(new_element)
self.soup._most_recent_element = new_element
else:
if isinstance(node, basestring):
# Create a brand new NavigableString from this string.
child = self.soup.new_string(node)
# Tell Beautiful Soup to act as if it parsed this element
# immediately after the parent's last descendant. (Or
# immediately after the parent, if it has no children.)
if self.element.contents:
most_recent_element = self.element._last_descendant(False)
else:
most_recent_element = self.element
self.soup.object_was_parsed(
child, parent=self.element,
most_recent_element=most_recent_element)
def getAttributes(self):
return AttrList(self.element)
def setAttributes(self, attributes):
if attributes is not None and len(attributes) > 0:
converted_attributes = []
for name, value in list(attributes.items()):
if isinstance(name, tuple):
new_name = NamespacedAttribute(*name)
del attributes[name]
attributes[new_name] = value
self.soup.builder._replace_cdata_list_attribute_values(
self.name, attributes)
for name, value in attributes.items():
self.element[name] = value
# The attributes may contain variables that need substitution.
# Call set_up_substitutions manually.
#
# The Tag constructor called this method when the Tag was created,
# but we just set/changed the attributes, so call it again.
self.soup.builder.set_up_substitutions(self.element)
attributes = property(getAttributes, setAttributes)
def insertText(self, data, insertBefore=None):
if insertBefore:
text = TextNode(self.soup.new_string(data), self.soup)
self.insertBefore(data, insertBefore)
else:
self.appendChild(data)
def insertBefore(self, node, refNode):
index = self.element.index(refNode.element)
if (node.element.__class__ == NavigableString and self.element.contents
and self.element.contents[index-1].__class__ == NavigableString):
# (See comments in appendChild)
old_node = self.element.contents[index-1]
new_str = self.soup.new_string(old_node + node.element)
old_node.replace_with(new_str)
else:
self.element.insert(index, node.element)
node.parent = self
def removeChild(self, node):
node.element.extract()
def reparentChildren(self, new_parent):
"""Move all of this tag's children into another tag."""
element = self.element
new_parent_element = new_parent.element
# Determine what this tag's next_element will be once all the children
# are removed.
final_next_element = element.next_sibling
new_parents_last_descendant = new_parent_element._last_descendant(False, False)
if len(new_parent_element.contents) > 0:
# The new parent already contains children. We will be
# appending this tag's children to the end.
new_parents_last_child = new_parent_element.contents[-1]
new_parents_last_descendant_next_element = new_parents_last_descendant.next_element
else:
# The new parent contains no children.
new_parents_last_child = None
new_parents_last_descendant_next_element = new_parent_element.next_element
to_append = element.contents
append_after = new_parent.element.contents
if len(to_append) > 0:
# Set the first child's previous_element and previous_sibling
# to elements within the new parent
first_child = to_append[0]
first_child.previous_element = new_parents_last_descendant
first_child.previous_sibling = new_parents_last_child
# Fix the last child's next_element and next_sibling
last_child = to_append[-1]
last_child.next_element = new_parents_last_descendant_next_element
last_child.next_sibling = None
for child in to_append:
child.parent = new_parent_element
new_parent_element.contents.append(child)
# Now that this element has no children, change its .next_element.
element.contents = []
element.next_element = final_next_element
def cloneNode(self):
tag = self.soup.new_tag(self.element.name, self.namespace)
node = Element(tag, self.soup, self.namespace)
for key,value in self.attributes:
node.attributes[key] = value
return node
def hasContent(self):
return self.element.contents
def getNameTuple(self):
if self.namespace == None:
return namespaces["html"], self.name
else:
return self.namespace, self.name
nameTuple = property(getNameTuple)
class TextNode(Element):
def __init__(self, element, soup):
html5lib.treebuilders._base.Node.__init__(self, None)
self.element = element
self.soup = soup
def cloneNode(self):
raise NotImplementedError
| 10,647
|
Python
|
.py
| 237
| 35.299578
| 159
| 0.639452
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,313
|
client.py
|
CouchPotato_CouchPotatoServer/libs/suds/client.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{2nd generation} service proxy provides access to web services.
See I{README.txt}
"""
import suds
import suds.metrics as metrics
from cookielib import CookieJar
from suds import *
from suds.reader import DefinitionsReader
from suds.transport import TransportError, Request
from suds.transport.https import HttpAuthenticated
from suds.servicedefinition import ServiceDefinition
from suds import sudsobject
from sudsobject import Factory as InstFactory
from sudsobject import Object
from suds.resolver import PathResolver
from suds.builder import Builder
from suds.wsdl import Definitions
from suds.cache import ObjectCache
from suds.sax.document import Document
from suds.sax.parser import Parser
from suds.options import Options
from suds.properties import Unskin
from urlparse import urlparse
from copy import deepcopy
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Client(object):
"""
A lightweight web services client.
I{(2nd generation)} API.
@ivar wsdl: The WSDL object.
@type wsdl:L{Definitions}
@ivar service: The service proxy used to invoke operations.
@type service: L{Service}
@ivar factory: The factory used to create objects.
@type factory: L{Factory}
@ivar sd: The service definition
@type sd: L{ServiceDefinition}
@ivar messages: The last sent/received messages.
@type messages: str[2]
"""
@classmethod
def items(cls, sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
return sudsobject.items(sobject)
@classmethod
def dict(cls, sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return sudsobject.asdict(sobject)
@classmethod
def metadata(cls, sobject):
"""
Extract the metadata from a suds object.
@param sobject: A suds object
@type sobject: L{Object}
@return: The object's metadata
@rtype: L{sudsobject.Metadata}
"""
return sobject.__metadata__
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@see: L{Options}
"""
options = Options()
options.transport = HttpAuthenticated()
self.options = options
options.cache = ObjectCache(days=1)
self.set_options(**kwargs)
reader = DefinitionsReader(options, Definitions)
self.wsdl = reader.open(url)
plugins = PluginContainer(options.plugins)
plugins.init.initialized(wsdl=self.wsdl)
self.factory = Factory(self.wsdl)
self.service = ServiceSelector(self, self.wsdl.services)
self.sd = []
for s in self.wsdl.services:
sd = ServiceDefinition(self.wsdl, s)
self.sd.append(sd)
self.messages = dict(tx=None, rx=None)
def set_options(self, **kwargs):
"""
Set options.
@param kwargs: keyword arguments.
@see: L{Options}
"""
p = Unskin(self.options)
p.update(kwargs)
def add_prefix(self, prefix, uri):
"""
Add I{static} mapping of an XML namespace prefix to a namespace.
This is useful for cases when a wsdl and referenced schemas make heavy
use of namespaces and those namespaces are subject to changed.
@param prefix: An XML namespace prefix.
@type prefix: str
@param uri: An XML namespace URI.
@type uri: str
@raise Exception: when prefix is already mapped.
"""
root = self.wsdl.root
mapped = root.resolvePrefix(prefix, None)
if mapped is None:
root.addPrefix(prefix, uri)
return
if mapped[1] != uri:
raise Exception('"%s" already mapped as "%s"' % (prefix, mapped))
def last_sent(self):
"""
Get last sent I{soap} message.
@return: The last sent I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('tx')
def last_received(self):
"""
Get last received I{soap} message.
@return: The last received I{soap} message.
@rtype: L{Document}
"""
return self.messages.get('rx')
def clone(self):
"""
Get a shallow clone of this object.
The clone only shares the WSDL. All other attributes are
unique to the cloned object including options.
@return: A shallow clone.
@rtype: L{Client}
"""
class Uninitialized(Client):
def __init__(self):
pass
clone = Uninitialized()
clone.options = Options()
cp = Unskin(clone.options)
mp = Unskin(self.options)
cp.update(deepcopy(mp))
clone.wsdl = self.wsdl
clone.factory = self.factory
clone.service = ServiceSelector(clone, self.wsdl.services)
clone.sd = self.sd
clone.messages = dict(tx=None, rx=None)
return clone
def __str__(self):
return unicode(self)
def __unicode__(self):
s = ['\n']
build = suds.__build__.split()
s.append('Suds ( https://fedorahosted.org/suds/ )')
s.append(' version: %s' % suds.__version__)
s.append(' %s build: %s' % (build[0], build[1]))
for sd in self.sd:
s.append('\n\n%s' % unicode(sd))
return ''.join(s)
class Factory:
"""
A factory for instantiating types defined in the wsdl
@ivar resolver: A schema type resolver.
@type resolver: L{PathResolver}
@ivar builder: A schema object builder.
@type builder: L{Builder}
"""
def __init__(self, wsdl):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.resolver = PathResolver(wsdl)
self.builder = Builder(self.resolver)
def create(self, name):
"""
create a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: The requested object.
@rtype: L{Object}
"""
timer = metrics.Timer()
timer.start()
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
if type.enum():
result = InstFactory.object(name)
for e, a in type.children():
setattr(result, e.name, e.name)
else:
try:
result = self.builder.build(type)
except Exception, e:
log.error("create '%s' failed", name, exc_info=True)
raise BuildError(name, e)
timer.stop()
metrics.log.debug('%s created: %s', name, timer)
return result
def separator(self, ps):
"""
Set the path separator.
@param ps: The new path separator.
@type ps: char
"""
self.resolver = PathResolver(self.wsdl, ps)
class ServiceSelector:
"""
The B{service} selector is used to select a web service.
In most cases, the wsdl only defines (1) service in which access
by subscript is passed through to a L{PortSelector}. This is also the
behavior when a I{default} service has been specified. In cases
where multiple services have been defined and no default has been
specified, the service is found by name (or index) and a L{PortSelector}
for the service is returned. In all cases, attribute access is
forwarded to the L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __services: A list of I{wsdl} services.
@type __services: list
"""
def __init__(self, client, services):
"""
@param client: A suds client.
@type client: L{Client}
@param services: A list of I{wsdl} services.
@type services: list
"""
self.__client = client
self.__services = services
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{PortSelector} for either the I{first} service or the
I{default} service (when specified).
@param name: The name of a method.
@type name: str
@return: A L{PortSelector}.
@rtype: L{PortSelector}.
"""
default = self.__ds()
if default is None:
port = self.__find(0)
else:
port = default
return getattr(port, name)
def __getitem__(self, name):
"""
Provides selection of the I{service} by name (string) or
index (integer). In cases where only (1) service is defined
or a I{default} has been specified, the request is forwarded
to the L{PortSelector}.
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the specified service.
@rtype: L{PortSelector}.
"""
if len(self.__services) == 1:
port = self.__find(0)
return port[name]
default = self.__ds()
if default is not None:
port = default
return port[name]
return self.__find(name)
def __find(self, name):
"""
Find a I{service} by name (string) or index (integer).
@param name: The name (or index) of a service.
@type name: (int|str)
@return: A L{PortSelector} for the found service.
@rtype: L{PortSelector}.
"""
service = None
if not len(self.__services):
raise Exception, 'No services defined'
if isinstance(name, int):
try:
service = self.__services[name]
name = service.name
except IndexError:
raise ServiceNotFound, 'at [%d]' % name
else:
for s in self.__services:
if name == s.name:
service = s
break
if service is None:
raise ServiceNotFound, name
return PortSelector(self.__client, service.ports, name)
def __ds(self):
"""
Get the I{default} service if defined in the I{options}.
@return: A L{PortSelector} for the I{default} service.
@rtype: L{PortSelector}.
"""
ds = self.__client.options.service
if ds is None:
return None
else:
return self.__find(ds)
class PortSelector:
"""
The B{port} selector is used to select a I{web service} B{port}.
In cases where multiple ports have been defined and no default has been
specified, the port is found by name (or index) and a L{MethodSelector}
for the port is returned. In all cases, attribute access is
forwarded to the L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __ports: A list of I{service} ports.
@type __ports: list
@ivar __qn: The I{qualified} name of the port (used for logging).
@type __qn: str
"""
def __init__(self, client, ports, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param ports: A list of I{service} ports.
@type ports: list
@param qn: The name of the service.
@type qn: str
"""
self.__client = client
self.__ports = ports
self.__qn = qn
def __getattr__(self, name):
"""
Request to access an attribute is forwarded to the
L{MethodSelector} for either the I{first} port or the
I{default} port (when specified).
@param name: The name of a method.
@type name: str
@return: A L{MethodSelector}.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
m = self.__find(0)
else:
m = default
return getattr(m, name)
def __getitem__(self, name):
"""
Provides selection of the I{port} by name (string) or
index (integer). In cases where only (1) port is defined
or a I{default} has been specified, the request is forwarded
to the L{MethodSelector}.
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the specified port.
@rtype: L{MethodSelector}.
"""
default = self.__dp()
if default is None:
return self.__find(name)
else:
return default
def __find(self, name):
"""
Find a I{port} by name (string) or index (integer).
@param name: The name (or index) of a port.
@type name: (int|str)
@return: A L{MethodSelector} for the found port.
@rtype: L{MethodSelector}.
"""
port = None
if not len(self.__ports):
raise Exception, 'No ports defined: %s' % self.__qn
if isinstance(name, int):
qn = '%s[%d]' % (self.__qn, name)
try:
port = self.__ports[name]
except IndexError:
raise PortNotFound, qn
else:
qn = '.'.join((self.__qn, name))
for p in self.__ports:
if name == p.name:
port = p
break
if port is None:
raise PortNotFound, qn
qn = '.'.join((self.__qn, port.name))
return MethodSelector(self.__client, port.methods, qn)
def __dp(self):
"""
Get the I{default} port if defined in the I{options}.
@return: A L{MethodSelector} for the I{default} port.
@rtype: L{MethodSelector}.
"""
dp = self.__client.options.port
if dp is None:
return None
else:
return self.__find(dp)
class MethodSelector:
"""
The B{method} selector is used to select a B{method} by name.
@ivar __client: A suds client.
@type __client: L{Client}
@ivar __methods: A dictionary of methods.
@type __methods: dict
@ivar __qn: The I{qualified} name of the method (used for logging).
@type __qn: str
"""
def __init__(self, client, methods, qn):
"""
@param client: A suds client.
@type client: L{Client}
@param methods: A dictionary of methods.
@type methods: dict
@param qn: The I{qualified} name of the port.
@type qn: str
"""
self.__client = client
self.__methods = methods
self.__qn = qn
def __getattr__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
return self[name]
def __getitem__(self, name):
"""
Get a method by name and return it in an I{execution wrapper}.
@param name: The name of a method.
@type name: str
@return: An I{execution wrapper} for the specified method name.
@rtype: L{Method}
"""
m = self.__methods.get(name)
if m is None:
qn = '.'.join((self.__qn, name))
raise MethodNotFound, qn
return Method(self.__client, m)
class Method:
"""
The I{method} (namespace) object.
@ivar client: A client object.
@type client: L{Client}
@ivar method: A I{wsdl} method.
@type I{wsdl} Method.
"""
def __init__(self, client, method):
"""
@param client: A client object.
@type client: L{Client}
@param method: A I{raw} method.
@type I{raw} Method.
"""
self.client = client
self.method = method
def __call__(self, *args, **kwargs):
"""
Invoke the method.
"""
clientclass = self.clientclass(kwargs)
client = clientclass(self.client, self.method)
if not self.faults():
try:
return client.invoke(args, kwargs)
except WebFault, e:
return (500, e)
else:
return client.invoke(args, kwargs)
def faults(self):
""" get faults option """
return self.client.options.faults
def clientclass(self, kwargs):
""" get soap client class """
if SimClient.simulation(kwargs):
return SimClient
else:
return SoapClient
class SoapClient:
"""
A lightweight soap based web client B{**not intended for external use}
@ivar service: The target method.
@type service: L{Service}
@ivar method: A target method.
@type method: L{Method}
@ivar options: A dictonary of options.
@type options: dict
@ivar cookiejar: A cookie jar.
@type cookiejar: libcookie.CookieJar
"""
def __init__(self, client, method):
"""
@param client: A suds client.
@type client: L{Client}
@param method: A target method.
@type method: L{Method}
"""
self.client = client
self.method = method
self.options = client.options
self.cookiejar = CookieJar()
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin}|I{subclass of} L{Object}
"""
timer = metrics.Timer()
timer.start()
result = None
binding = self.method.binding.input
soapenv = binding.get_message(self.method, args, kwargs)
timer.stop()
metrics.log.debug(
"message for '%s' created: %s",
self.method.name,
timer)
timer.start()
result = self.send(soapenv)
timer.stop()
metrics.log.debug(
"method '%s' invoked: %s",
self.method.name,
timer)
return result
def send(self, soapenv):
"""
Send soap message.
@param soapenv: A soap envelope to send.
@type soapenv: L{Document}
@return: The reply to the sent message.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
result = None
location = self.location()
binding = self.method.binding.input
transport = self.options.transport
retxml = self.options.retxml
prettyxml = self.options.prettyxml
log.debug('sending to (%s)\nmessage:\n%s', location, soapenv)
try:
self.last_sent(soapenv)
plugins = PluginContainer(self.options.plugins)
plugins.message.marshalled(envelope=soapenv.root())
if prettyxml:
soapenv = soapenv.str()
else:
soapenv = soapenv.plain()
soapenv = soapenv.encode('utf-8')
plugins.message.sending(envelope=soapenv)
request = Request(location, soapenv)
request.headers = self.headers()
reply = transport.send(request)
ctx = plugins.message.received(reply=reply.message)
reply.message = ctx.reply
if retxml:
result = reply.message
else:
result = self.succeeded(binding, reply.message)
except TransportError, e:
if e.httpcode in (202,204):
result = None
else:
log.error(self.last_sent())
result = self.failed(binding, e)
return result
def headers(self):
"""
Get http headers or the http/https request.
@return: A dictionary of header/values.
@rtype: dict
"""
action = self.method.soap.action
stock = { 'Content-Type' : 'text/xml; charset=utf-8', 'SOAPAction': action }
result = dict(stock, **self.options.headers)
log.debug('headers = %s', result)
return result
def succeeded(self, binding, reply):
"""
Request succeeded, process the reply
@param binding: The binding to be used to process the reply.
@type binding: L{bindings.binding.Binding}
@param reply: The raw reply text.
@type reply: str
@return: The method result.
@rtype: I{builtin}, L{Object}
@raise WebFault: On server.
"""
log.debug('http succeeded:\n%s', reply)
plugins = PluginContainer(self.options.plugins)
if len(reply) > 0:
reply, result = binding.get_reply(self.method, reply)
self.last_received(reply)
else:
result = None
ctx = plugins.message.unmarshalled(reply=result)
result = ctx.reply
if self.options.faults:
return result
else:
return (200, result)
def failed(self, binding, error):
"""
Request failed, process reply based on reason
@param binding: The binding to be used to process the reply.
@type binding: L{suds.bindings.binding.Binding}
@param error: The http error message
@type error: L{transport.TransportError}
"""
status, reason = (error.httpcode, tostr(error))
reply = error.fp.read()
log.debug('http failed:\n%s', reply)
if status == 500:
if len(reply) > 0:
r, p = binding.get_fault(reply)
self.last_received(r)
return (status, p)
else:
return (status, None)
if self.options.faults:
raise Exception((status, reason))
else:
return (status, None)
def location(self):
p = Unskin(self.options)
return p.get('location', self.method.location)
def last_sent(self, d=None):
key = 'tx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
def last_received(self, d=None):
key = 'rx'
messages = self.client.messages
if d is None:
return messages.get(key)
else:
messages[key] = d
class SimClient(SoapClient):
"""
Loopback client used for message/reply simulation.
"""
injkey = '__inject'
@classmethod
def simulation(cls, kwargs):
""" get whether loopback has been specified in the I{kwargs}. """
return kwargs.has_key(SimClient.injkey)
def invoke(self, args, kwargs):
"""
Send the required soap message to invoke the specified method
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The result of the method invocation.
@rtype: I{builtin} or I{subclass of} L{Object}
"""
simulation = kwargs[self.injkey]
msg = simulation.get('msg')
reply = simulation.get('reply')
fault = simulation.get('fault')
if msg is None:
if reply is not None:
return self.__reply(reply, args, kwargs)
if fault is not None:
return self.__fault(fault)
raise Exception('(reply|fault) expected when msg=None')
sax = Parser()
msg = sax.parse(string=msg)
return self.send(msg)
def __reply(self, reply, args, kwargs):
""" simulate the reply """
binding = self.method.binding.input
msg = binding.get_message(self.method, args, kwargs)
log.debug('inject (simulated) send message:\n%s', msg)
binding = self.method.binding.output
return self.succeeded(binding, reply)
def __fault(self, reply):
""" simulate the (fault) reply """
binding = self.method.binding.output
if self.options.faults:
r, p = binding.get_fault(reply)
self.last_received(r)
return (500, p)
else:
return (500, None)
| 25,971
|
Python
|
.py
| 725
| 26.853793
| 84
| 0.589591
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,314
|
wsse.py
|
CouchPotato_CouchPotatoServer/libs/suds/wsse.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{wsse} module provides WS-Security.
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Object
from suds.sax.element import Element
from suds.sax.date import UTC
from datetime import datetime, timedelta
try:
from hashlib import md5
except ImportError:
# Python 2.4 compatibility
from md5 import md5
dsns = \
('ds',
'http://www.w3.org/2000/09/xmldsig#')
wssens = \
('wsse',
'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd')
wsuns = \
('wsu',
'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd')
wsencns = \
('wsenc',
'http://www.w3.org/2001/04/xmlenc#')
class Security(Object):
"""
WS-Security object.
@ivar tokens: A list of security tokens
@type tokens: [L{Token},...]
@ivar signatures: A list of signatures.
@type signatures: TBD
@ivar references: A list of references.
@type references: TBD
@ivar keys: A list of encryption keys.
@type keys: TBD
"""
def __init__(self):
""" """
Object.__init__(self)
self.mustUnderstand = True
self.tokens = []
self.signatures = []
self.references = []
self.keys = []
def xml(self):
"""
Get xml representation of the object.
@return: The root node.
@rtype: L{Element}
"""
root = Element('Security', ns=wssens)
root.set('mustUnderstand', str(self.mustUnderstand).lower())
for t in self.tokens:
root.append(t.xml())
return root
class Token(Object):
""" I{Abstract} security token. """
@classmethod
def now(cls):
return datetime.now()
@classmethod
def utc(cls):
return datetime.utcnow()
@classmethod
def sysdate(cls):
utc = UTC()
return str(utc)
def __init__(self):
Object.__init__(self)
class UsernameToken(Token):
"""
Represents a basic I{UsernameToken} WS-Secuirty token.
@ivar username: A username.
@type username: str
@ivar password: A password.
@type password: str
@ivar nonce: A set of bytes to prevent reply attacks.
@type nonce: str
@ivar created: The token created.
@type created: L{datetime}
"""
def __init__(self, username=None, password=None):
"""
@param username: A username.
@type username: str
@param password: A password.
@type password: str
"""
Token.__init__(self)
self.username = username
self.password = password
self.nonce = None
self.created = None
def setnonce(self, text=None):
"""
Set I{nonce} which is arbitraty set of bytes to prevent
reply attacks.
@param text: The nonce text value.
Generated when I{None}.
@type text: str
"""
if text is None:
s = []
s.append(self.username)
s.append(self.password)
s.append(Token.sysdate())
m = md5()
m.update(':'.join(s))
self.nonce = m.hexdigest()
else:
self.nonce = text
def setcreated(self, dt=None):
"""
Set I{created}.
@param dt: The created date & time.
Set as datetime.utc() when I{None}.
@type dt: L{datetime}
"""
if dt is None:
self.created = Token.utc()
else:
self.created = dt
def xml(self):
"""
Get xml representation of the object.
@return: The root node.
@rtype: L{Element}
"""
root = Element('UsernameToken', ns=wssens)
u = Element('Username', ns=wssens)
u.setText(self.username)
root.append(u)
p = Element('Password', ns=wssens)
p.setText(self.password)
root.append(p)
if self.nonce is not None:
n = Element('Nonce', ns=wssens)
n.setText(self.nonce)
root.append(n)
if self.created is not None:
n = Element('Created', ns=wsuns)
n.setText(str(UTC(self.created)))
root.append(n)
return root
class Timestamp(Token):
"""
Represents the I{Timestamp} WS-Secuirty token.
@ivar created: The token created.
@type created: L{datetime}
@ivar expires: The token expires.
@type expires: L{datetime}
"""
def __init__(self, validity=90):
"""
@param validity: The time in seconds.
@type validity: int
"""
Token.__init__(self)
self.created = Token.utc()
self.expires = self.created + timedelta(seconds=validity)
def xml(self):
root = Element("Timestamp", ns=wsuns)
created = Element('Created', ns=wsuns)
created.setText(str(UTC(self.created)))
expires = Element('Expires', ns=wsuns)
expires.setText(str(UTC(self.expires)))
root.append(created)
root.append(expires)
return root
| 5,981
|
Python
|
.py
| 186
| 24.860215
| 90
| 0.611189
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,315
|
sudsobject.py
|
CouchPotato_CouchPotatoServer/libs/suds/sudsobject.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sudsobject} module provides a collection of suds objects
that are primarily used for the highly dynamic interactions with
wsdl/xsd defined types.
"""
from logging import getLogger
from suds import *
from new import classobj
log = getLogger(__name__)
def items(sobject):
"""
Extract the I{items} from a suds object much like the
items() method works on I{dict}.
@param sobject: A suds object
@type sobject: L{Object}
@return: A list of items contained in I{sobject}.
@rtype: [(key, value),...]
"""
for item in sobject:
yield item
def asdict(sobject):
"""
Convert a sudsobject into a dictionary.
@param sobject: A suds object
@type sobject: L{Object}
@return: A python dictionary containing the
items contained in I{sobject}.
@rtype: dict
"""
return dict(items(sobject))
def merge(a, b):
"""
Merge all attributes and metadata from I{a} to I{b}.
@param a: A I{source} object
@type a: L{Object}
@param b: A I{destination} object
@type b: L{Object}
"""
for item in a:
setattr(b, item[0], item[1])
b.__metadata__ = b.__metadata__
return b
def footprint(sobject):
"""
Get the I{virtual footprint} of the object.
This is really a count of the attributes in the branch with a significant value.
@param sobject: A suds object.
@type sobject: L{Object}
@return: The branch footprint.
@rtype: int
"""
n = 0
for a in sobject.__keylist__:
v = getattr(sobject, a)
if v is None: continue
if isinstance(v, Object):
n += footprint(v)
continue
if hasattr(v, '__len__'):
if len(v): n += 1
continue
n +=1
return n
class Factory:
cache = {}
@classmethod
def subclass(cls, name, bases, dict={}):
if not isinstance(bases, tuple):
bases = (bases,)
name = name.encode('utf-8')
key = '.'.join((name, str(bases)))
subclass = cls.cache.get(key)
if subclass is None:
subclass = classobj(name, bases, dict)
cls.cache[key] = subclass
return subclass
@classmethod
def object(cls, classname=None, dict={}):
if classname is not None:
subclass = cls.subclass(classname, Object)
inst = subclass()
else:
inst = Object()
for a in dict.items():
setattr(inst, a[0], a[1])
return inst
@classmethod
def metadata(cls):
return Metadata()
@classmethod
def property(cls, name, value=None):
subclass = cls.subclass(name, Property)
return subclass(value)
class Object:
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
self.__metadata__ = Metadata()
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if not builtin and \
name not in self.__keylist__:
self.__keylist__.append(name)
self.__dict__[name] = value
def __delattr__(self, name):
try:
del self.__dict__[name]
builtin = name.startswith('__') and name.endswith('__')
if not builtin:
self.__keylist__.remove(name)
except:
cls = self.__class__.__name__
raise AttributeError, "%s has no attribute '%s'" % (cls, name)
def __getitem__(self, name):
if isinstance(name, int):
name = self.__keylist__[int(name)]
return getattr(self, name)
def __setitem__(self, name, value):
setattr(self, name, value)
def __iter__(self):
return Iter(self)
def __len__(self):
return len(self.__keylist__)
def __contains__(self, name):
return name in self.__keylist__
def __repr__(self):
return str(self)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.__printer__.tostr(self)
class Iter:
def __init__(self, sobject):
self.sobject = sobject
self.keylist = self.__keylist(sobject)
self.index = 0
def next(self):
keylist = self.keylist
nkeys = len(self.keylist)
while self.index < nkeys:
k = keylist[self.index]
self.index += 1
if hasattr(self.sobject, k):
v = getattr(self.sobject, k)
return (k, v)
raise StopIteration()
def __keylist(self, sobject):
keylist = sobject.__keylist__
try:
keyset = set(keylist)
ordering = sobject.__metadata__.ordering
ordered = set(ordering)
if not ordered.issuperset(keyset):
log.debug(
'%s must be superset of %s, ordering ignored',
keylist,
ordering)
raise KeyError()
return ordering
except:
return keylist
def __iter__(self):
return self
class Metadata(Object):
def __init__(self):
self.__keylist__ = []
self.__printer__ = Printer()
class Facade(Object):
def __init__(self, name):
Object.__init__(self)
md = self.__metadata__
md.facade = name
class Property(Object):
def __init__(self, value):
Object.__init__(self)
self.value = value
def items(self):
for item in self:
if item[0] != 'value':
yield item
def get(self):
return self.value
def set(self, value):
self.value = value
return self
class Printer:
"""
Pretty printing of a Object object.
"""
@classmethod
def indent(cls, n): return '%*s'%(n*3,' ')
def tostr(self, object, indent=-2):
""" get s string representation of object """
history = []
return self.process(object, history, indent)
def process(self, object, h, n=0, nl=False):
""" print object using the specified indent (n) and newline (nl). """
if object is None:
return 'None'
if isinstance(object, Object):
if len(object) == 0:
return '<empty>'
else:
return self.print_object(object, h, n+2, nl)
if isinstance(object, dict):
if len(object) == 0:
return '<empty>'
else:
return self.print_dictionary(object, h, n+2, nl)
if isinstance(object, (list,tuple)):
if len(object) == 0:
return '<empty>'
else:
return self.print_collection(object, h, n+2)
if isinstance(object, basestring):
return '"%s"' % tostr(object)
return '%s' % tostr(object)
def print_object(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
s = []
cls = d.__class__
md = d.__metadata__
if d in h:
s.append('(')
s.append(cls.__name__)
s.append(')')
s.append('...')
return ''.join(s)
h.append(d)
if nl:
s.append('\n')
s.append(self.indent(n))
if cls != Object:
s.append('(')
if isinstance(d, Facade):
s.append(md.facade)
else:
s.append(cls.__name__)
s.append(')')
s.append('{')
for item in d:
if self.exclude(d, item):
continue
item = self.unwrap(d, item)
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(item[0])
s.append('[]')
else:
s.append(item[0])
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_dictionary(self, d, h, n, nl=False):
""" print complex using the specified indent (n) and newline (nl). """
if d in h: return '{}...'
h.append(d)
s = []
if nl:
s.append('\n')
s.append(self.indent(n))
s.append('{')
for item in d.items():
s.append('\n')
s.append(self.indent(n+1))
if isinstance(item[1], (list,tuple)):
s.append(tostr(item[0]))
s.append('[]')
else:
s.append(tostr(item[0]))
s.append(' = ')
s.append(self.process(item[1], h, n, True))
s.append('\n')
s.append(self.indent(n))
s.append('}')
h.pop()
return ''.join(s)
def print_collection(self, c, h, n):
""" print collection using the specified indent (n) and newline (nl). """
if c in h: return '[]...'
h.append(c)
s = []
for item in c:
s.append('\n')
s.append(self.indent(n))
s.append(self.process(item, h, n-2))
s.append(',')
h.pop()
return ''.join(s)
def unwrap(self, d, item):
""" translate (unwrap) using an optional wrapper function """
nopt = ( lambda x: x )
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return item
wrappers = getattr(pmd, 'wrappers', {})
fn = wrappers.get(item[0], nopt)
return (item[0], fn(item[1]))
except:
pass
return item
def exclude(self, d, item):
""" check metadata for excluded items """
try:
md = d.__metadata__
pmd = getattr(md, '__print__', None)
if pmd is None:
return False
excludes = getattr(pmd, 'excludes', [])
return ( item[0] in excludes )
except:
pass
return False
| 11,165
|
Python
|
.py
| 335
| 23.883582
| 84
| 0.536643
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,316
|
plugin.py
|
CouchPotato_CouchPotatoServer/libs/suds/plugin.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The plugin module provides classes for implementation
of suds plugins.
"""
from suds import *
from logging import getLogger
log = getLogger(__name__)
class Context(object):
"""
Plugin context.
"""
pass
class InitContext(Context):
"""
Init Context.
@ivar wsdl: The wsdl.
@type wsdl: L{wsdl.Definitions}
"""
pass
class DocumentContext(Context):
"""
The XML document load context.
@ivar url: The URL.
@type url: str
@ivar document: Either the XML text or the B{parsed} document root.
@type document: (str|L{sax.element.Element})
"""
pass
class MessageContext(Context):
"""
The context for sending the soap envelope.
@ivar envelope: The soap envelope to be sent.
@type envelope: (str|L{sax.element.Element})
@ivar reply: The reply.
@type reply: (str|L{sax.element.Element}|object)
"""
pass
class Plugin:
"""
Plugin base.
"""
pass
class InitPlugin(Plugin):
"""
The base class for suds I{init} plugins.
"""
def initialized(self, context):
"""
Suds client initialization.
Called after wsdl the has been loaded. Provides the plugin
with the opportunity to inspect/modify the WSDL.
@param context: The init context.
@type context: L{InitContext}
"""
pass
class DocumentPlugin(Plugin):
"""
The base class for suds I{document} plugins.
"""
def loaded(self, context):
"""
Suds has loaded a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the unparsed document.
Called after each WSDL/XSD document is loaded.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
def parsed(self, context):
"""
Suds has parsed a WSDL/XSD document. Provides the plugin
with an opportunity to inspect/modify the parsed document.
Called after each WSDL/XSD document is parsed.
@param context: The document context.
@type context: L{DocumentContext}
"""
pass
class MessagePlugin(Plugin):
"""
The base class for suds I{soap message} plugins.
"""
def marshalled(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the envelope Document before it is sent.
@param context: The send context.
The I{envelope} is the envelope docuemnt.
@type context: L{MessageContext}
"""
pass
def sending(self, context):
"""
Suds will send the specified soap envelope.
Provides the plugin with the opportunity to inspect/modify
the message text it is sent.
@param context: The send context.
The I{envelope} is the envelope text.
@type context: L{MessageContext}
"""
pass
def received(self, context):
"""
Suds has received the specified reply.
Provides the plugin with the opportunity to inspect/modify
the received XML text before it is SAX parsed.
@param context: The reply context.
The I{reply} is the raw text.
@type context: L{MessageContext}
"""
pass
def parsed(self, context):
"""
Suds has sax parsed the received reply.
Provides the plugin with the opportunity to inspect/modify
the sax parsed DOM tree for the reply before it is unmarshalled.
@param context: The reply context.
The I{reply} is DOM tree.
@type context: L{MessageContext}
"""
pass
def unmarshalled(self, context):
"""
Suds has unmarshalled the received reply.
Provides the plugin with the opportunity to inspect/modify
the unmarshalled reply object before it is returned.
@param context: The reply context.
The I{reply} is unmarshalled suds object.
@type context: L{MessageContext}
"""
pass
class PluginContainer:
"""
Plugin container provides easy method invocation.
@ivar plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
@cvar ctxclass: A dict of plugin method / context classes.
@type ctxclass: dict
"""
domains = {\
'init': (InitContext, InitPlugin),
'document': (DocumentContext, DocumentPlugin),
'message': (MessageContext, MessagePlugin ),
}
def __init__(self, plugins):
"""
@param plugins: A list of plugin objects.
@type plugins: [L{Plugin},]
"""
self.plugins = plugins
def __getattr__(self, name):
domain = self.domains.get(name)
if domain:
plugins = []
ctx, pclass = domain
for p in self.plugins:
if isinstance(p, pclass):
plugins.append(p)
return PluginDomain(ctx, plugins)
else:
raise Exception, 'plugin domain (%s), invalid' % name
class PluginDomain:
"""
The plugin domain.
@ivar ctx: A context.
@type ctx: L{Context}
@ivar plugins: A list of plugins (targets).
@type plugins: list
"""
def __init__(self, ctx, plugins):
self.ctx = ctx
self.plugins = plugins
def __getattr__(self, name):
return Method(name, self)
class Method:
"""
Plugin method.
@ivar name: The method name.
@type name: str
@ivar domain: The plugin domain.
@type domain: L{PluginDomain}
"""
def __init__(self, name, domain):
"""
@param name: The method name.
@type name: str
@param domain: A plugin domain.
@type domain: L{PluginDomain}
"""
self.name = name
self.domain = domain
def __call__(self, **kwargs):
ctx = self.domain.ctx()
ctx.__dict__.update(kwargs)
for plugin in self.domain.plugins:
try:
method = getattr(plugin, self.name, None)
if method and callable(method):
method(ctx)
except Exception, pe:
log.exception(pe)
return ctx
| 7,228
|
Python
|
.py
| 217
| 25.801843
| 76
| 0.63047
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,317
|
properties.py
|
CouchPotato_CouchPotatoServer/libs/suds/properties.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Properties classes.
"""
from logging import getLogger
log = getLogger(__name__)
class AutoLinker(object):
"""
Base class, provides interface for I{automatic} link
management between a L{Properties} object and the L{Properties}
contained within I{values}.
"""
def updated(self, properties, prev, next):
"""
Notification that a values was updated and the linkage
between the I{properties} contained with I{prev} need to
be relinked to the L{Properties} contained within the
I{next} value.
"""
pass
class Link(object):
"""
Property link object.
@ivar endpoints: A tuple of the (2) endpoints of the link.
@type endpoints: tuple(2)
"""
def __init__(self, a, b):
"""
@param a: Property (A) to link.
@type a: L{Property}
@param b: Property (B) to link.
@type b: L{Property}
"""
pA = Endpoint(self, a)
pB = Endpoint(self, b)
self.endpoints = (pA, pB)
self.validate(a, b)
a.links.append(pB)
b.links.append(pA)
def validate(self, pA, pB):
"""
Validate that the two properties may be linked.
@param pA: Endpoint (A) to link.
@type pA: L{Endpoint}
@param pB: Endpoint (B) to link.
@type pB: L{Endpoint}
@return: self
@rtype: L{Link}
"""
if pA in pB.links or \
pB in pA.links:
raise Exception, 'Already linked'
dA = pA.domains()
dB = pB.domains()
for d in dA:
if d in dB:
raise Exception, 'Duplicate domain "%s" found' % d
for d in dB:
if d in dA:
raise Exception, 'Duplicate domain "%s" found' % d
kA = pA.keys()
kB = pB.keys()
for k in kA:
if k in kB:
raise Exception, 'Duplicate key %s found' % k
for k in kB:
if k in kA:
raise Exception, 'Duplicate key %s found' % k
return self
def teardown(self):
"""
Teardown the link.
Removes endpoints from properties I{links} collection.
@return: self
@rtype: L{Link}
"""
pA, pB = self.endpoints
if pA in pB.links:
pB.links.remove(pA)
if pB in pA.links:
pA.links.remove(pB)
return self
class Endpoint(object):
"""
Link endpoint (wrapper).
@ivar link: The associated link.
@type link: L{Link}
@ivar target: The properties object.
@type target: L{Property}
"""
def __init__(self, link, target):
self.link = link
self.target = target
def teardown(self):
return self.link.teardown()
def __eq__(self, rhs):
return ( self.target == rhs )
def __hash__(self):
return hash(self.target)
def __getattr__(self, name):
return getattr(self.target, name)
class Definition:
"""
Property definition.
@ivar name: The property name.
@type name: str
@ivar classes: The (class) list of permitted values
@type classes: tuple
@ivar default: The default value.
@ivar type: any
"""
def __init__(self, name, classes, default, linker=AutoLinker()):
"""
@param name: The property name.
@type name: str
@param classes: The (class) list of permitted values
@type classes: tuple
@param default: The default value.
@type default: any
"""
if not isinstance(classes, (list, tuple)):
classes = (classes,)
self.name = name
self.classes = classes
self.default = default
self.linker = linker
def nvl(self, value=None):
"""
Convert the I{value} into the default when I{None}.
@param value: The proposed value.
@type value: any
@return: The I{default} when I{value} is I{None}, else I{value}.
@rtype: any
"""
if value is None:
return self.default
else:
return value
def validate(self, value):
"""
Validate the I{value} is of the correct class.
@param value: The value to validate.
@type value: any
@raise AttributeError: When I{value} is invalid.
"""
if value is None:
return
if len(self.classes) and \
not isinstance(value, self.classes):
msg = '"%s" must be: %s' % (self.name, self.classes)
raise AttributeError,msg
def __repr__(self):
return '%s: %s' % (self.name, str(self))
def __str__(self):
s = []
if len(self.classes):
s.append('classes=%s' % str(self.classes))
else:
s.append('classes=*')
s.append("default=%s" % str(self.default))
return ', '.join(s)
class Properties:
"""
Represents basic application properties.
Provides basic type validation, default values and
link/synchronization behavior.
@ivar domain: The domain name.
@type domain: str
@ivar definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@ivar links: A list of linked property objects used to create
a network of properties.
@type links: [L{Property},..]
@ivar defined: A dict of property values.
@type defined: dict
"""
def __init__(self, domain, definitions, kwargs):
"""
@param domain: The property domain name.
@type domain: str
@param definitions: A table of property definitions.
@type definitions: {name: L{Definition}}
@param kwargs: A list of property name/values to set.
@type kwargs: dict
"""
self.definitions = {}
for d in definitions:
self.definitions[d.name] = d
self.domain = domain
self.links = []
self.defined = {}
self.modified = set()
self.prime()
self.update(kwargs)
def definition(self, name):
"""
Get the definition for the property I{name}.
@param name: The property I{name} to find the definition for.
@type name: str
@return: The property definition
@rtype: L{Definition}
@raise AttributeError: On not found.
"""
d = self.definitions.get(name)
if d is None:
raise AttributeError(name)
return d
def update(self, other):
"""
Update the property values as specified by keyword/value.
@param other: An object to update from.
@type other: (dict|L{Properties})
@return: self
@rtype: L{Properties}
"""
if isinstance(other, Properties):
other = other.defined
for n,v in other.items():
self.set(n, v)
return self
def notset(self, name):
"""
Get whether a property has never been set by I{name}.
@param name: A property name.
@type name: str
@return: True if never been set.
@rtype: bool
"""
self.provider(name).__notset(name)
def set(self, name, value):
"""
Set the I{value} of a property by I{name}.
The value is validated against the definition and set
to the default when I{value} is None.
@param name: The property name.
@type name: str
@param value: The new property value.
@type value: any
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, value)
return self
def unset(self, name):
"""
Unset a property by I{name}.
@param name: A property name.
@type name: str
@return: self
@rtype: L{Properties}
"""
self.provider(name).__set(name, None)
return self
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.provider(name).__get(name, *df)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
Link(self, other)
return self
def unlink(self, *others):
"""
Unlink (disassociate) the specified properties object.
@param others: The list object to unlink. Unspecified means unlink all.
@type others: [L{Properties},..]
@return: self
@rtype: L{Properties}
"""
if not len(others):
others = self.links[:]
for p in self.links[:]:
if p in others:
p.teardown()
return self
def provider(self, name, history=None):
"""
Find the provider of the property by I{name}.
@param name: The property name.
@type name: str
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: The provider when found. Otherwise, None (when nested)
and I{self} when not nested.
@rtype: L{Properties}
"""
if history is None:
history = []
history.append(self)
if name in self.definitions:
return self
for x in self.links:
if x in history:
continue
provider = x.provider(name, history)
if provider is not None:
return provider
history.remove(self)
if len(history):
return None
return self
def keys(self, history=None):
"""
Get the set of I{all} property names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of property names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
keys = set()
keys.update(self.definitions.keys())
for x in self.links:
if x in history:
continue
keys.update(x.keys(history))
history.remove(self)
return keys
def domains(self, history=None):
"""
Get the set of I{all} domain names.
@param history: A history of nodes checked to prevent
circular hunting.
@type history: [L{Properties},..]
@return: A set of domain names.
@rtype: list
"""
if history is None:
history = []
history.append(self)
domains = set()
domains.add(self.domain)
for x in self.links:
if x in history:
continue
domains.update(x.domains(history))
history.remove(self)
return domains
def prime(self):
"""
Prime the stored values based on default values
found in property definitions.
@return: self
@rtype: L{Properties}
"""
for d in self.definitions.values():
self.defined[d.name] = d.default
return self
def __notset(self, name):
return not (name in self.modified)
def __set(self, name, value):
d = self.definition(name)
d.validate(value)
value = d.nvl(value)
prev = self.defined[name]
self.defined[name] = value
self.modified.add(name)
d.linker.updated(self, prev, value)
def __get(self, name, *df):
d = self.definition(name)
value = self.defined.get(name)
if value == d.default and len(df):
value = df[0]
return value
def str(self, history):
s = []
s.append('Definitions:')
for d in self.definitions.values():
s.append('\t%s' % repr(d))
s.append('Content:')
for d in self.defined.items():
s.append('\t%s' % str(d))
if self not in history:
history.append(self)
s.append('Linked:')
for x in self.links:
s.append(x.str(history))
history.remove(self)
return '\n'.join(s)
def __repr__(self):
return str(self)
def __str__(self):
return self.str([])
class Skin(object):
"""
The meta-programming I{skin} around the L{Properties} object.
@ivar __pts__: The wrapped object.
@type __pts__: L{Properties}.
"""
def __init__(self, domain, definitions, kwargs):
self.__pts__ = Properties(domain, definitions, kwargs)
def __setattr__(self, name, value):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
self.__dict__[name] = value
return
self.__pts__.set(name, value)
def __getattr__(self, name):
return self.__pts__.get(name)
def __repr__(self):
return str(self)
def __str__(self):
return str(self.__pts__)
class Unskin(object):
def __new__(self, *args, **kwargs):
return args[0].__pts__
class Inspector:
"""
Wrapper inspector.
"""
def __init__(self, options):
self.properties = options.__pts__
def get(self, name, *df):
"""
Get the value of a property by I{name}.
@param name: The property name.
@type name: str
@param df: An optional value to be returned when the value
is not set
@type df: [1].
@return: The stored value, or I{df[0]} if not set.
@rtype: any
"""
return self.properties.get(name, *df)
def update(self, **kwargs):
"""
Update the property values as specified by keyword/value.
@param kwargs: A list of property name/values to set.
@type kwargs: dict
@return: self
@rtype: L{Properties}
"""
return self.properties.update(**kwargs)
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p)
def unlink(self, other):
"""
Unlink (disassociate) the specified properties object.
@param other: The object to unlink.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.unlink(p)
| 16,223
|
Python
|
.py
| 487
| 24.262834
| 80
| 0.57116
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,318
|
options.py
|
CouchPotato_CouchPotatoServer/libs/suds/options.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Suds basic options classes.
"""
from suds.properties import *
from suds.wsse import Security
from suds.xsd.doctor import Doctor
from suds.transport import Transport
from suds.cache import Cache, NoCache
class TpLinker(AutoLinker):
"""
Transport (auto) linker used to manage linkage between
transport objects Properties and those Properties that contain them.
"""
def updated(self, properties, prev, next):
if isinstance(prev, Transport):
tp = Unskin(prev.options)
properties.unlink(tp)
if isinstance(next, Transport):
tp = Unskin(next.options)
properties.link(tp)
class Options(Skin):
"""
Options:
- B{cache} - The XML document cache. May be set (None) for no caching.
- type: L{Cache}
- default: L{NoCache}
- B{faults} - Raise faults raised by server,
else return tuple from service method invocation as (httpcode, object).
- type: I{bool}
- default: True
- B{service} - The default service name.
- type: I{str}
- default: None
- B{port} - The default service port name, not tcp port.
- type: I{str}
- default: None
- B{location} - This overrides the service port address I{URL} defined
in the WSDL.
- type: I{str}
- default: None
- B{transport} - The message transport.
- type: L{Transport}
- default: None
- B{soapheaders} - The soap headers to be included in the soap message.
- type: I{any}
- default: None
- B{wsse} - The web services I{security} provider object.
- type: L{Security}
- default: None
- B{doctor} - A schema I{doctor} object.
- type: L{Doctor}
- default: None
- B{xstq} - The B{x}ml B{s}chema B{t}ype B{q}ualified flag indicates
that the I{xsi:type} attribute values should be qualified by namespace.
- type: I{bool}
- default: True
- B{prefixes} - Elements of the soap message should be qualified (when needed)
using XML prefixes as opposed to xmlns="" syntax.
- type: I{bool}
- default: True
- B{retxml} - Flag that causes the I{raw} soap envelope to be returned instead
of the python object graph.
- type: I{bool}
- default: False
- B{prettyxml} - Flag that causes I{pretty} xml to be rendered when generating
the outbound soap envelope.
- type: I{bool}
- default: False
- B{autoblend} - Flag that ensures that the schema(s) defined within the
WSDL import each other.
- type: I{bool}
- default: False
- B{cachingpolicy} - The caching policy.
- type: I{int}
- 0 = Cache XML documents.
- 1 = Cache WSDL (pickled) object.
- default: 0
- B{plugins} - A plugin container.
- type: I{list}
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('cache', Cache, NoCache()),
Definition('faults', bool, True),
Definition('transport', Transport, None, TpLinker()),
Definition('service', (int, basestring), None),
Definition('port', (int, basestring), None),
Definition('location', basestring, None),
Definition('soapheaders', (), ()),
Definition('wsse', Security, None),
Definition('doctor', Doctor, None),
Definition('xstq', bool, True),
Definition('prefixes', bool, True),
Definition('retxml', bool, False),
Definition('prettyxml', bool, False),
Definition('autoblend', bool, False),
Definition('cachingpolicy', int, 0),
Definition('plugins', (list, tuple), []),
]
Skin.__init__(self, domain, definitions, kwargs)
| 5,074
|
Python
|
.py
| 116
| 33.387931
| 86
| 0.585203
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,319
|
cache.py
|
CouchPotato_CouchPotatoServer/libs/suds/cache.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains basic caching classes.
"""
import os
import suds
from tempfile import gettempdir as tmp
from suds.transport import *
from suds.sax.parser import Parser
from suds.sax.element import Element
from datetime import datetime as dt
from datetime import timedelta
from cStringIO import StringIO
from logging import getLogger
try:
import cPickle as pickle
except:
import pickle
log = getLogger(__name__)
class Cache:
"""
An object object cache.
"""
def get(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def getf(self, id):
"""
Get a object from the cache by ID.
@param id: The object ID.
@type id: str
@return: The object, else None
@rtype: any
"""
raise Exception('not-implemented')
def put(self, id, object):
"""
Put a object into the cache.
@param id: The object ID.
@type id: str
@param object: The object to add.
@type object: any
"""
raise Exception('not-implemented')
def putf(self, id, fp):
"""
Write a fp into the cache.
@param id: The object ID.
@type id: str
@param fp: File pointer.
@type fp: file-like object.
"""
raise Exception('not-implemented')
def purge(self, id):
"""
Purge a object from the cache by id.
@param id: A object ID.
@type id: str
"""
raise Exception('not-implemented')
def clear(self):
"""
Clear all objects from the cache.
"""
raise Exception('not-implemented')
class NoCache(Cache):
"""
The passthru object cache.
"""
def get(self, id):
return None
def getf(self, id):
return None
def put(self, id, object):
pass
def putf(self, id, fp):
pass
class FileCache(Cache):
"""
A file-based URL cache.
@cvar fnprefix: The file name prefix.
@type fnsuffix: str
@ivar duration: The cached file duration which defines how
long the file will be cached.
@type duration: (unit, value)
@ivar location: The directory for the cached files.
@type location: str
"""
fnprefix = 'suds'
units = ('months', 'weeks', 'days', 'hours', 'minutes', 'seconds')
def __init__(self, location=None, **duration):
"""
@param location: The directory for the cached files.
@type location: str
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if location is None:
location = os.path.join(tmp(), 'suds')
self.location = location
self.duration = (None, 0)
self.setduration(**duration)
self.checkversion()
def fnsuffix(self):
"""
Get the file name suffix
@return: The suffix
@rtype: str
"""
return 'gcf'
def setduration(self, **duration):
"""
Set the caching duration which defines how long the
file will be cached.
@param duration: The cached file duration which defines how
long the file will be cached. A duration=0 means forever.
The duration may be: (months|weeks|days|hours|minutes|seconds).
@type duration: {unit:value}
"""
if len(duration) == 1:
arg = duration.items()[0]
if not arg[0] in self.units:
raise Exception('must be: %s' % str(self.units))
self.duration = arg
return self
def setlocation(self, location):
"""
Set the location (directory) for the cached files.
@param location: The directory for the cached files.
@type location: str
"""
self.location = location
def mktmp(self):
"""
Make the I{location} directory if it doesn't already exits.
"""
try:
if not os.path.isdir(self.location):
os.makedirs(self.location)
except:
log.debug(self.location, exc_info=1)
return self
def put(self, id, bfr):
try:
fn = self.__fn(id)
f = self.open(fn, 'w')
f.write(bfr)
f.close()
return bfr
except:
log.debug(id, exc_info=1)
return bfr
def putf(self, id, fp):
try:
fn = self.__fn(id)
f = self.open(fn, 'w')
f.write(fp.read())
fp.close()
f.close()
return open(fn)
except:
log.debug(id, exc_info=1)
return fp
def get(self, id):
try:
f = self.getf(id)
bfr = f.read()
f.close()
return bfr
except:
pass
def getf(self, id):
try:
fn = self.__fn(id)
self.validate(fn)
return self.open(fn)
except:
pass
def validate(self, fn):
"""
Validate that the file has not expired based on the I{duration}.
@param fn: The file name.
@type fn: str
"""
if self.duration[1] < 1:
return
created = dt.fromtimestamp(os.path.getctime(fn))
d = { self.duration[0]:self.duration[1] }
expired = created+timedelta(**d)
if expired < dt.now():
log.debug('%s expired, deleted', fn)
os.remove(fn)
def clear(self):
for fn in os.listdir(self.location):
if os.path.isdir(fn):
continue
if fn.startswith(self.fnprefix):
log.debug('deleted: %s', fn)
os.remove(os.path.join(self.location, fn))
def purge(self, id):
fn = self.__fn(id)
try:
os.remove(fn)
except:
pass
def open(self, fn, *args):
"""
Open the cache file making sure the directory is created.
"""
self.mktmp()
return open(fn, *args)
def checkversion(self):
path = os.path.join(self.location, 'version')
try:
f = self.open(path)
version = f.read()
f.close()
if version != suds.__version__:
raise Exception()
except:
self.clear()
f = self.open(path, 'w')
f.write(suds.__version__)
f.close()
def __fn(self, id):
name = id
suffix = self.fnsuffix()
fn = '%s-%s.%s' % (self.fnprefix, name, suffix)
return os.path.join(self.location, fn)
class DocumentCache(FileCache):
"""
Provides xml document caching.
"""
def fnsuffix(self):
return 'xml'
def get(self, id):
try:
fp = FileCache.getf(self, id)
if fp is None:
return None
p = Parser()
return p.parse(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
if isinstance(object, Element):
FileCache.put(self, id, str(object))
return object
class ObjectCache(FileCache):
"""
Provides pickled object caching.
@cvar protocol: The pickling protocol.
@type protocol: int
"""
protocol = 2
def fnsuffix(self):
return 'px'
def get(self, id):
try:
fp = FileCache.getf(self, id)
if fp is None:
return None
else:
return pickle.load(fp)
except:
FileCache.purge(self, id)
def put(self, id, object):
bfr = pickle.dumps(object, self.protocol)
FileCache.put(self, id, bfr)
return object
| 9,133
|
Python
|
.py
| 292
| 22.181507
| 76
| 0.564829
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,320
|
metrics.py
|
CouchPotato_CouchPotatoServer/libs/suds/metrics.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{metrics} module defines classes and other resources
designed for collecting and reporting performance metrics.
"""
import time
from logging import getLogger
from suds import *
from math import modf
log = getLogger(__name__)
class Timer:
def __init__(self):
self.started = 0
self.stopped = 0
def start(self):
self.started = time.time()
self.stopped = 0
return self
def stop(self):
if self.started > 0:
self.stopped = time.time()
return self
def duration(self):
return ( self.stopped - self.started )
def __str__(self):
if self.started == 0:
return 'not-running'
if self.started > 0 and self.stopped == 0:
return 'started: %d (running)' % self.started
duration = self.duration()
jmod = ( lambda m : (m[1], m[0]*1000) )
if duration < 1:
ms = (duration*1000)
return '%d (ms)' % ms
if duration < 60:
m = modf(duration)
return '%d.%.3d (seconds)' % jmod(m)
m = modf(duration/60)
return '%d.%.3d (minutes)' % jmod(m)
| 2,004
|
Python
|
.py
| 53
| 32.037736
| 76
| 0.65242
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,321
|
builder.py
|
CouchPotato_CouchPotatoServer/libs/suds/builder.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{builder} module provides an wsdl/xsd defined types factory
"""
from logging import getLogger
from suds import *
from suds.sudsobject import Factory
log = getLogger(__name__)
class Builder:
""" Builder used to construct an object for types defined in the schema """
def __init__(self, resolver):
"""
@param resolver: A schema object name resolver.
@type resolver: L{resolver.Resolver}
"""
self.resolver = resolver
def build(self, name):
""" build a an object for the specified typename as defined in the schema """
if isinstance(name, basestring):
type = self.resolver.find(name)
if type is None:
raise TypeNotFound(name)
else:
type = name
cls = type.name
if type.mixed():
data = Factory.property(cls)
else:
data = Factory.object(cls)
resolved = type.resolve()
md = data.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
history = []
self.add_attributes(data, resolved)
for child, ancestry in type.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
return data
def process(self, data, type, history):
""" process the specified type then process its children """
if type in history:
return
if type.enum():
return
history.append(type)
resolved = type.resolve()
value = None
if type.unbounded():
value = []
else:
if len(resolved) > 0:
if resolved.mixed():
value = Factory.property(resolved.name)
md = value.__metadata__
md.sxtype = resolved
else:
value = Factory.object(resolved.name)
md = value.__metadata__
md.sxtype = resolved
md.ordering = self.ordering(resolved)
setattr(data, type.name, value)
if value is not None:
data = value
if not isinstance(data, list):
self.add_attributes(data, resolved)
for child, ancestry in resolved.children():
if self.skip_child(child, ancestry):
continue
self.process(data, child, history[:])
def add_attributes(self, data, type):
""" add required attributes """
for attr, ancestry in type.attributes():
name = '_%s' % attr.name
value = attr.get_default()
setattr(data, name, value)
def skip_child(self, child, ancestry):
""" get whether or not to skip the specified child """
if child.any(): return True
for x in ancestry:
if x.choice():
return True
return False
def ordering(self, type):
""" get the ordering """
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
| 4,220
|
Python
|
.py
| 109
| 28.688073
| 85
| 0.590504
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,322
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Suds is a lightweight SOAP python client that provides a
service proxy for Web Services.
"""
import os
import sys
#
# Project properties
#
__version__ = '0.4'
__build__="GA R699-20100913"
#
# Exceptions
#
class MethodNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Method not found: '%s'" % name)
class PortNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Port not found: '%s'" % name)
class ServiceNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Service not found: '%s'" % name)
class TypeNotFound(Exception):
def __init__(self, name):
Exception.__init__(self, "Type not found: '%s'" % tostr(name))
class BuildError(Exception):
msg = \
"""
An error occured while building a instance of (%s). As a result
the object you requested could not be constructed. It is recommended
that you construct the type manually using a Suds object.
Please open a ticket with a description of this error.
Reason: %s
"""
def __init__(self, name, exception):
Exception.__init__(self, BuildError.msg % (name, exception))
class SoapHeadersNotPermitted(Exception):
msg = \
"""
Method (%s) was invoked with SOAP headers. The WSDL does not
define SOAP headers for this method. Retry without the soapheaders
keyword argument.
"""
def __init__(self, name):
Exception.__init__(self, self.msg % name)
class WebFault(Exception):
def __init__(self, fault, document):
if hasattr(fault, 'faultstring'):
Exception.__init__(self, "Server raised fault: '%s'" % fault.faultstring)
self.fault = fault
self.document = document
#
# Logging
#
class Repr:
def __init__(self, x):
self.x = x
def __str__(self):
return repr(self.x)
#
# Utility
#
def tostr(object, encoding=None):
""" get a unicode safe string representation of an object """
if isinstance(object, basestring):
if encoding is None:
return object
else:
return object.encode(encoding)
if isinstance(object, tuple):
s = ['(']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(')')
return ''.join(s)
if isinstance(object, list):
s = ['[']
for item in object:
if isinstance(item, basestring):
s.append(item)
else:
s.append(tostr(item))
s.append(', ')
s.append(']')
return ''.join(s)
if isinstance(object, dict):
s = ['{']
for item in object.items():
if isinstance(item[0], basestring):
s.append(item[0])
else:
s.append(tostr(item[0]))
s.append(' = ')
if isinstance(item[1], basestring):
s.append(item[1])
else:
s.append(tostr(item[1]))
s.append(', ')
s.append('}')
return ''.join(s)
try:
return unicode(object)
except:
return str(object)
class null:
"""
The I{null} object.
Used to pass NULL for optional XML nodes.
"""
pass
def objid(obj):
return obj.__class__.__name__\
+':'+hex(id(obj))
import client
| 4,394
|
Python
|
.py
| 134
| 25.88806
| 85
| 0.608913
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,323
|
wsdl.py
|
CouchPotato_CouchPotatoServer/libs/suds/wsdl.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{wsdl} module provides an objectification of the WSDL.
The primary class is I{Definitions} as it represends the root element
found in the document.
"""
from logging import getLogger
from suds import *
from suds.sax import splitPrefix
from suds.sax.element import Element
from suds.bindings.document import Document
from suds.bindings.rpc import RPC, Encoded
from suds.xsd import qualify, Namespace
from suds.xsd.schema import Schema, SchemaCollection
from suds.xsd.query import ElementQuery
from suds.sudsobject import Object, Facade, Metadata
from suds.reader import DocumentReader, DefinitionsReader
from urlparse import urljoin
import re, soaparray
log = getLogger(__name__)
wsdlns = (None, "http://schemas.xmlsoap.org/wsdl/")
soapns = (None, 'http://schemas.xmlsoap.org/wsdl/soap/')
soap12ns = (None, 'http://schemas.xmlsoap.org/wsdl/soap12/')
class WObject(Object):
"""
Base object for wsdl types.
@ivar root: The XML I{root} element.
@type root: L{Element}
"""
def __init__(self, root, definitions=None):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
Object.__init__(self)
self.root = root
pmd = Metadata()
pmd.excludes = ['root']
pmd.wrappers = dict(qname=repr)
self.__metadata__.__print__ = pmd
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
pass
class NamedObject(WObject):
"""
A B{named} WSDL object.
@ivar name: The name of the object.
@type name: str
@ivar qname: The I{qualified} name of the object.
@type qname: (name, I{namespace-uri}).
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.name = root.get('name')
self.qname = (self.name, definitions.tns[1])
pmd = self.__metadata__.__print__
pmd.wrappers['qname'] = repr
class Definitions(WObject):
"""
Represents the I{root} container of the WSDL objects as defined
by <wsdl:definitions/>
@ivar id: The object id.
@type id: str
@ivar options: An options dictionary.
@type options: L{options.Options}
@ivar url: The URL used to load the object.
@type url: str
@ivar tns: The target namespace for the WSDL.
@type tns: str
@ivar schema: The collective WSDL schema object.
@type schema: L{SchemaCollection}
@ivar children: The raw list of child objects.
@type children: [L{WObject},...]
@ivar imports: The list of L{Import} children.
@type imports: [L{Import},...]
@ivar messages: The dictionary of L{Message} children key'd by I{qname}
@type messages: [L{Message},...]
@ivar port_types: The dictionary of L{PortType} children key'd by I{qname}
@type port_types: [L{PortType},...]
@ivar bindings: The dictionary of L{Binding} children key'd by I{qname}
@type bindings: [L{Binding},...]
@ivar service: The service object.
@type service: L{Service}
"""
Tag = 'definitions'
def __init__(self, url, options):
"""
@param url: A URL to the WSDL.
@type url: str
@param options: An options dictionary.
@type options: L{options.Options}
"""
log.debug('reading wsdl at: %s ...', url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
WObject.__init__(self, root)
self.id = objid(self)
self.options = options
self.url = url
self.tns = self.mktns(root)
self.types = []
self.schema = None
self.children = []
self.imports = []
self.messages = {}
self.port_types = {}
self.bindings = {}
self.services = []
self.add_children(self.root)
self.children.sort()
pmd = self.__metadata__.__print__
pmd.excludes.append('children')
pmd.excludes.append('wsdl')
pmd.wrappers['schema'] = repr
self.open_imports()
self.resolve()
self.build_schema()
self.set_wrapped()
for s in self.services:
self.add_methods(s)
log.debug("wsdl at '%s' loaded:\n%s", url, self)
def mktns(self, root):
""" Get/create the target namespace """
tns = root.get('targetNamespace')
prefix = root.findPrefix(tns)
if prefix is None:
log.debug('warning: tns (%s), not mapped to prefix', tns)
prefix = 'tns'
return (prefix, tns)
def add_children(self, root):
""" Add child objects using the factory """
for c in root.getChildren(ns=wsdlns):
child = Factory.create(c, self)
if child is None: continue
self.children.append(child)
if isinstance(child, Import):
self.imports.append(child)
continue
if isinstance(child, Types):
self.types.append(child)
continue
if isinstance(child, Message):
self.messages[child.qname] = child
continue
if isinstance(child, PortType):
self.port_types[child.qname] = child
continue
if isinstance(child, Binding):
self.bindings[child.qname] = child
continue
if isinstance(child, Service):
self.services.append(child)
continue
def open_imports(self):
""" Import the I{imported} WSDLs. """
for imp in self.imports:
imp.load(self)
def resolve(self):
""" Tell all children to resolve themselves """
for c in self.children:
c.resolve(self)
def build_schema(self):
""" Process L{Types} objects and create the schema collection """
container = SchemaCollection(self)
for t in [t for t in self.types if t.local()]:
for root in t.contents():
schema = Schema(root, self.url, self.options, container)
container.add(schema)
if not len(container): # empty
root = Element.buildPath(self.root, 'types/schema')
schema = Schema(root, self.url, self.options, container)
container.add(schema)
self.schema = container.load(self.options)
for s in [t.schema() for t in self.types if t.imported()]:
self.schema.merge(s)
return self.schema
def add_methods(self, service):
""" Build method view for service """
bindings = {
'document/literal' : Document(self),
'rpc/literal' : RPC(self),
'rpc/encoded' : Encoded(self)
}
for p in service.ports:
binding = p.binding
ptype = p.binding.type
operations = p.binding.type.operations.values()
for name in [op.name for op in operations]:
m = Facade('Method')
m.name = name
m.location = p.location
m.binding = Facade('binding')
op = binding.operation(name)
m.soap = op.soap
key = '/'.join((op.soap.style, op.soap.input.body.use))
m.binding.input = bindings.get(key)
key = '/'.join((op.soap.style, op.soap.output.body.use))
m.binding.output = bindings.get(key)
op = ptype.operation(name)
p.methods[name] = m
def set_wrapped(self):
""" set (wrapped|bare) flag on messages """
for b in self.bindings.values():
for op in b.operations.values():
for body in (op.soap.input.body, op.soap.output.body):
body.wrapped = False
if len(body.parts) != 1:
continue
for p in body.parts:
if p.element is None:
continue
query = ElementQuery(p.element)
pt = query.execute(self.schema)
if pt is None:
raise TypeNotFound(query.ref)
resolved = pt.resolve()
if resolved.builtin():
continue
body.wrapped = True
def __getstate__(self):
nopickle = ('options',)
state = self.__dict__.copy()
for k in nopickle:
if k in state:
del state[k]
return state
def __repr__(self):
return 'Definitions (id=%s)' % self.id
class Import(WObject):
"""
Represents the <wsdl:import/>.
@ivar location: The value of the I{location} attribute.
@type location: str
@ivar ns: The value of the I{namespace} attribute.
@type ns: str
@ivar imported: The imported object.
@type imported: L{Definitions}
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.location = root.get('location')
self.ns = root.get('namespace')
self.imported = None
pmd = self.__metadata__.__print__
pmd.wrappers['imported'] = repr
def load(self, definitions):
""" Load the object by opening the URL """
url = self.location
log.debug('importing (%s)', url)
if '://' not in url:
url = urljoin(definitions.url, url)
options = definitions.options
d = Definitions(url, options)
if d.root.match(Definitions.Tag, wsdlns):
self.import_definitions(definitions, d)
return
if d.root.match(Schema.Tag, Namespace.xsdns):
self.import_schema(definitions, d)
return
raise Exception('document at "%s" is unknown' % url)
def import_definitions(self, definitions, d):
""" import/merge wsdl definitions """
definitions.types += d.types
definitions.messages.update(d.messages)
definitions.port_types.update(d.port_types)
definitions.bindings.update(d.bindings)
self.imported = d
log.debug('imported (WSDL):\n%s', d)
def import_schema(self, definitions, d):
""" import schema as <types/> content """
if not len(definitions.types):
types = Types.create(definitions)
definitions.types.append(types)
else:
types = definitions.types[-1]
types.root.append(d.root)
log.debug('imported (XSD):\n%s', d.root)
def __gt__(self, other):
return False
class Types(WObject):
"""
Represents <types><schema/></types>.
"""
@classmethod
def create(cls, definitions):
root = Element('types', ns=wsdlns)
definitions.root.insert(root)
return Types(root, definitions)
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
WObject.__init__(self, root, definitions)
self.definitions = definitions
def contents(self):
return self.root.getChildren('schema', Namespace.xsdns)
def schema(self):
return self.definitions.schema
def local(self):
return ( self.definitions.schema is None )
def imported(self):
return ( not self.local() )
def __gt__(self, other):
return isinstance(other, Import)
class Part(NamedObject):
"""
Represents <message><part/></message>.
@ivar element: The value of the {element} attribute.
Stored as a I{qref} as converted by L{suds.xsd.qualify}.
@type element: str
@ivar type: The value of the {type} attribute.
Stored as a I{qref} as converted by L{suds.xsd.qualify}.
@type type: str
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
pmd = Metadata()
pmd.wrappers = dict(element=repr, type=repr)
self.__metadata__.__print__ = pmd
tns = definitions.tns
self.element = self.__getref('element', tns)
self.type = self.__getref('type', tns)
def __getref(self, a, tns):
""" Get the qualified value of attribute named 'a'."""
s = self.root.get(a)
if s is None:
return s
else:
return qualify(s, self.root, tns)
class Message(NamedObject):
"""
Represents <message/>.
@ivar parts: A list of message parts.
@type parts: [I{Part},...]
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.parts = []
for p in root.getChildren('part'):
part = Part(p, definitions)
self.parts.append(part)
def __gt__(self, other):
return isinstance(other, (Import, Types))
class PortType(NamedObject):
"""
Represents <portType/>.
@ivar operations: A list of contained operations.
@type operations: list
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.operations = {}
for c in root.getChildren('operation'):
op = Facade('Operation')
op.name = c.get('name')
op.tns = definitions.tns
input = c.getChild('input')
if input is None:
op.input = None
else:
op.input = input.get('message')
output = c.getChild('output')
if output is None:
op.output = None
else:
op.output = output.get('message')
faults = []
for fault in c.getChildren('fault'):
f = Facade('Fault')
f.name = fault.get('name')
f.message = fault.get('message')
faults.append(f)
op.faults = faults
self.operations[op.name] = op
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
for op in self.operations.values():
if op.input is None:
op.input = Message(Element('no-input'), definitions)
else:
qref = qualify(op.input, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception("msg '%s', not-found" % op.input)
else:
op.input = msg
if op.output is None:
op.output = Message(Element('no-output'), definitions)
else:
qref = qualify(op.output, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception("msg '%s', not-found" % op.output)
else:
op.output = msg
for f in op.faults:
qref = qualify(f.message, self.root, definitions.tns)
msg = definitions.messages.get(qref)
if msg is None:
raise Exception, "msg '%s', not-found" % f.message
f.message = msg
def operation(self, name):
"""
Shortcut used to get a contained operation by name.
@param name: An operation name.
@type name: str
@return: The named operation.
@rtype: Operation
@raise L{MethodNotFound}: When not found.
"""
try:
return self.operations[name]
except Exception, e:
raise MethodNotFound(name)
def __gt__(self, other):
return isinstance(other, (Import, Types, Message))
class Binding(NamedObject):
"""
Represents <binding/>
@ivar operations: A list of contained operations.
@type operations: list
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.operations = {}
self.type = root.get('type')
sr = self.soaproot()
if sr is None:
self.soap = None
log.debug('binding: "%s" not a soap binding', self.name)
return
soap = Facade('soap')
self.soap = soap
self.soap.style = sr.get('style', default='document')
self.add_operations(self.root, definitions)
def soaproot(self):
""" get the soap:binding """
for ns in (soapns, soap12ns):
sr = self.root.getChild('binding', ns=ns)
if sr is not None:
return sr
return None
def add_operations(self, root, definitions):
""" Add <operation/> children """
dsop = Element('operation', ns=soapns)
for c in root.getChildren('operation'):
op = Facade('Operation')
op.name = c.get('name')
sop = c.getChild('operation', default=dsop)
soap = Facade('soap')
soap.action = '"%s"' % sop.get('soapAction', default='')
soap.style = sop.get('style', default=self.soap.style)
soap.input = Facade('Input')
soap.input.body = Facade('Body')
soap.input.headers = []
soap.output = Facade('Output')
soap.output.body = Facade('Body')
soap.output.headers = []
op.soap = soap
input = c.getChild('input')
if input is None:
input = Element('input', ns=wsdlns)
body = input.getChild('body')
self.body(definitions, soap.input.body, body)
for header in input.getChildren('header'):
self.header(definitions, soap.input, header)
output = c.getChild('output')
if output is None:
output = Element('output', ns=wsdlns)
body = output.getChild('body')
self.body(definitions, soap.output.body, body)
for header in output.getChildren('header'):
self.header(definitions, soap.output, header)
faults = []
for fault in c.getChildren('fault'):
sf = fault.getChild('fault')
if sf is None:
continue
fn = fault.get('name')
f = Facade('Fault')
f.name = sf.get('name', default=fn)
f.use = sf.get('use', default='literal')
faults.append(f)
soap.faults = faults
self.operations[op.name] = op
def body(self, definitions, body, root):
""" add the input/output body properties """
if root is None:
body.use = 'literal'
body.namespace = definitions.tns
body.parts = ()
return
parts = root.get('parts')
if parts is None:
body.parts = ()
else:
body.parts = re.split('[\s,]', parts)
body.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
body.namespace = definitions.tns
else:
prefix = root.findPrefix(ns, 'b0')
body.namespace = (prefix, ns)
def header(self, definitions, parent, root):
""" add the input/output header properties """
if root is None:
return
header = Facade('Header')
parent.headers.append(header)
header.use = root.get('use', default='literal')
ns = root.get('namespace')
if ns is None:
header.namespace = definitions.tns
else:
prefix = root.findPrefix(ns, 'h0')
header.namespace = (prefix, ns)
msg = root.get('message')
if msg is not None:
header.message = msg
part = root.get('part')
if part is not None:
header.part = part
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects. This includes
cross-linking information (from) the portType (to) the I{soap}
protocol information on the binding for each operation.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
self.resolveport(definitions)
for op in self.operations.values():
self.resolvesoapbody(definitions, op)
self.resolveheaders(definitions, op)
self.resolvefaults(definitions, op)
def resolveport(self, definitions):
"""
Resolve port_type reference.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
ref = qualify(self.type, self.root, definitions.tns)
port_type = definitions.port_types.get(ref)
if port_type is None:
raise Exception("portType '%s', not-found" % self.type)
else:
self.type = port_type
def resolvesoapbody(self, definitions, op):
"""
Resolve soap body I{message} parts by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
parts = soap.input.body.parts
if len(parts):
pts = []
for p in ptop.input.parts:
if p.name in parts:
pts.append(p)
soap.input.body.parts = pts
else:
soap.input.body.parts = ptop.input.parts
parts = soap.output.body.parts
if len(parts):
pts = []
for p in ptop.output.parts:
if p.name in parts:
pts.append(p)
soap.output.body.parts = pts
else:
soap.output.body.parts = ptop.output.parts
def resolveheaders(self, definitions, op):
"""
Resolve soap header I{message} references.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
soap = op.soap
headers = soap.input.headers + soap.output.headers
for header in headers:
mn = header.message
ref = qualify(mn, self.root, definitions.tns)
message = definitions.messages.get(ref)
if message is None:
raise Exception, "message'%s', not-found" % mn
pn = header.part
for p in message.parts:
if p.name == pn:
header.part = p
break
if pn == header.part:
raise Exception, \
"message '%s' has not part named '%s'" % (ref, pn)
def resolvefaults(self, definitions, op):
"""
Resolve soap fault I{message} references by
cross-referencing with operation defined in port type.
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param op: An I{operation} object.
@type op: I{operation}
"""
ptop = self.type.operation(op.name)
if ptop is None:
raise Exception, \
"operation '%s' not defined in portType" % op.name
soap = op.soap
for fault in soap.faults:
for f in ptop.faults:
if f.name == fault.name:
fault.parts = f.message.parts
continue
if hasattr(fault, 'parts'):
continue
raise Exception, \
"fault '%s' not defined in portType '%s'" % (fault.name, self.type.name)
def operation(self, name):
"""
Shortcut used to get a contained operation by name.
@param name: An operation name.
@type name: str
@return: The named operation.
@rtype: Operation
@raise L{MethodNotFound}: When not found.
"""
try:
return self.operations[name]
except:
raise MethodNotFound(name)
def __gt__(self, other):
return ( not isinstance(other, Service) )
class Port(NamedObject):
"""
Represents a service port.
@ivar service: A service.
@type service: L{Service}
@ivar binding: A binding name.
@type binding: str
@ivar location: The service location (url).
@type location: str
"""
def __init__(self, root, definitions, service):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
@param service: A service object.
@type service: L{Service}
"""
NamedObject.__init__(self, root, definitions)
self.__service = service
self.binding = root.get('binding')
address = root.getChild('address')
if address is None:
self.location = None
else:
self.location = address.get('location').encode('utf-8')
self.methods = {}
def method(self, name):
"""
Get a method defined in this portType by name.
@param name: A method name.
@type name: str
@return: The requested method object.
@rtype: I{Method}
"""
return self.methods.get(name)
class Service(NamedObject):
"""
Represents <service/>.
@ivar port: The contained ports.
@type port: [Port,..]
@ivar methods: The contained methods for all ports.
@type methods: [Method,..]
"""
def __init__(self, root, definitions):
"""
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
NamedObject.__init__(self, root, definitions)
self.ports = []
for p in root.getChildren('port'):
port = Port(p, definitions, self)
self.ports.append(port)
def port(self, name):
"""
Locate a port by name.
@param name: A port name.
@type name: str
@return: The port object.
@rtype: L{Port}
"""
for p in self.ports:
if p.name == name:
return p
return None
def setlocation(self, url, names=None):
"""
Override the invocation location (url) for service method.
@param url: A url location.
@type url: A url.
@param names: A list of method names. None=ALL
@type names: [str,..]
"""
for p in self.ports:
for m in p.methods.values():
if names is None or m.name in names:
m.location = url
def resolve(self, definitions):
"""
Resolve named references to other WSDL objects.
Ports without soap bindings are discarded.
@param definitions: A definitions object.
@type definitions: L{Definitions}
"""
filtered = []
for p in self.ports:
ref = qualify(p.binding, self.root, definitions.tns)
binding = definitions.bindings.get(ref)
if binding is None:
raise Exception("binding '%s', not-found" % p.binding)
if binding.soap is None:
log.debug('binding "%s" - not a soap, discarded', binding.name)
continue
p.binding = binding
filtered.append(p)
self.ports = filtered
def __gt__(self, other):
return True
class Factory:
"""
Simple WSDL object factory.
@cvar tags: Dictionary of tag->constructor mappings.
@type tags: dict
"""
tags =\
{
'import' : Import,
'types' : Types,
'message' : Message,
'portType' : PortType,
'binding' : Binding,
'service' : Service,
}
@classmethod
def create(cls, root, definitions):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param definitions: A definitions object.
@type definitions: L{Definitions}
@return: The created object.
@rtype: L{WObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(root, definitions)
else:
return None
| 31,503
|
Python
|
.py
| 839
| 27.147795
| 88
| 0.569207
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,324
|
reader.py
|
CouchPotato_CouchPotatoServer/libs/suds/reader.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains xml document reader classes.
"""
from suds.sax.parser import Parser
from suds.transport import Request
from suds.cache import Cache, NoCache
from suds.store import DocumentStore
from suds.plugin import PluginContainer
from logging import getLogger
log = getLogger(__name__)
class Reader:
"""
The reader provides integration with cache.
@ivar options: An options object.
@type options: I{Options}
"""
def __init__(self, options):
"""
@param options: An options object.
@type options: I{Options}
"""
self.options = options
self.plugins = PluginContainer(options.plugins)
def mangle(self, name, x):
"""
Mangle the name by hashing the I{name} and appending I{x}.
@return: the mangled name.
"""
h = abs(hash(name))
return '%s-%s' % (h, x)
class DocumentReader(Reader):
"""
The XML document reader provides an integration
between the SAX L{Parser} and the document cache.
"""
def open(self, url):
"""
Open an XML document at the specified I{url}.
First, the document attempted to be retrieved from
the I{object cache}. If not found, it is downloaded and
parsed using the SAX parser. The result is added to the
cache for the next open().
@param url: A document url.
@type url: str.
@return: The specified XML document.
@rtype: I{Document}
"""
cache = self.cache()
id = self.mangle(url, 'document')
d = cache.get(id)
if d is None:
d = self.download(url)
cache.put(id, d)
self.plugins.document.parsed(url=url, document=d.root())
return d
def download(self, url):
"""
Download the docuemnt.
@param url: A document url.
@type url: str.
@return: A file pointer to the docuemnt.
@rtype: file-like
"""
store = DocumentStore()
fp = store.open(url)
if fp is None:
fp = self.options.transport.open(Request(url))
content = fp.read()
fp.close()
ctx = self.plugins.document.loaded(url=url, document=content)
content = ctx.document
sax = Parser()
return sax.parse(string=content)
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{0}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 0:
return self.options.cache
else:
return NoCache()
class DefinitionsReader(Reader):
"""
The WSDL definitions reader provides an integration
between the Definitions and the object cache.
@ivar fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
def __init__(self, options, fn):
"""
@param options: An options object.
@type options: I{Options}
@param fn: A factory function (constructor) used to
create the object not found in the cache.
@type fn: I{Constructor}
"""
Reader.__init__(self, options)
self.fn = fn
def open(self, url):
"""
Open a WSDL at the specified I{url}.
First, the WSDL attempted to be retrieved from
the I{object cache}. After unpickled from the cache, the
I{options} attribute is restored.
If not found, it is downloaded and instantiated using the
I{fn} constructor and added to the cache for the next open().
@param url: A WSDL url.
@type url: str.
@return: The WSDL object.
@rtype: I{Definitions}
"""
cache = self.cache()
id = self.mangle(url, 'wsdl')
d = cache.get(id)
if d is None:
d = self.fn(url, self.options)
cache.put(id, d)
else:
d.options = self.options
for imp in d.imports:
imp.imported.options = self.options
return d
def cache(self):
"""
Get the cache.
@return: The I{options} when I{cachingpolicy} = B{1}.
@rtype: L{Cache}
"""
if self.options.cachingpolicy == 1:
return self.options.cache
else:
return NoCache()
| 5,243
|
Python
|
.py
| 150
| 27.413333
| 76
| 0.618793
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,325
|
store.py
|
CouchPotato_CouchPotatoServer/libs/suds/store.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains XML text for documents to be distributed
with the suds lib. Also, contains classes for accessing
these documents.
"""
from StringIO import StringIO
from logging import getLogger
log = getLogger(__name__)
#
# Soap section 5 encoding schema.
#
encoding = \
"""<?xml version="1.0" encoding="UTF-8"?>
<xs:schema xmlns:xs="http://www.w3.org/2001/XMLSchema" xmlns:tns="http://schemas.xmlsoap.org/soap/encoding/" targetNamespace="http://schemas.xmlsoap.org/soap/encoding/">
<xs:attribute name="root">
<xs:annotation>
<xs:documentation>
'root' can be used to distinguish serialization roots from other
elements that are present in a serialization but are not roots of
a serialized value graph
</xs:documentation>
</xs:annotation>
<xs:simpleType>
<xs:restriction base="xs:boolean">
<xs:pattern value="0|1"/>
</xs:restriction>
</xs:simpleType>
</xs:attribute>
<xs:attributeGroup name="commonAttributes">
<xs:annotation>
<xs:documentation>
Attributes common to all elements that function as accessors or
represent independent (multi-ref) values. The href attribute is
intended to be used in a manner like CONREF. That is, the element
content should be empty iff the href attribute appears
</xs:documentation>
</xs:annotation>
<xs:attribute name="id" type="xs:ID"/>
<xs:attribute name="href" type="xs:anyURI"/>
<xs:anyAttribute namespace="##other" processContents="lax"/>
</xs:attributeGroup>
<!-- Global Attributes. The following attributes are intended to be usable via qualified attribute names on any complex type referencing them. -->
<!-- Array attributes. Needed to give the type and dimensions of an array's contents, and the offset for partially-transmitted arrays. -->
<xs:simpleType name="arrayCoordinate">
<xs:restriction base="xs:string"/>
</xs:simpleType>
<xs:attribute name="arrayType" type="xs:string"/>
<xs:attribute name="offset" type="tns:arrayCoordinate"/>
<xs:attributeGroup name="arrayAttributes">
<xs:attribute ref="tns:arrayType"/>
<xs:attribute ref="tns:offset"/>
</xs:attributeGroup>
<xs:attribute name="position" type="tns:arrayCoordinate"/>
<xs:attributeGroup name="arrayMemberAttributes">
<xs:attribute ref="tns:position"/>
</xs:attributeGroup>
<xs:group name="Array">
<xs:sequence>
<xs:any namespace="##any" minOccurs="0" maxOccurs="unbounded" processContents="lax"/>
</xs:sequence>
</xs:group>
<xs:element name="Array" type="tns:Array"/>
<xs:complexType name="Array">
<xs:annotation>
<xs:documentation>
'Array' is a complex type for accessors identified by position
</xs:documentation>
</xs:annotation>
<xs:group ref="tns:Array" minOccurs="0"/>
<xs:attributeGroup ref="tns:arrayAttributes"/>
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:complexType>
<!-- 'Struct' is a complex type for accessors identified by name.
Constraint: No element may be have the same name as any other,
nor may any element have a maxOccurs > 1. -->
<xs:element name="Struct" type="tns:Struct"/>
<xs:group name="Struct">
<xs:sequence>
<xs:any namespace="##any" minOccurs="0" maxOccurs="unbounded" processContents="lax"/>
</xs:sequence>
</xs:group>
<xs:complexType name="Struct">
<xs:group ref="tns:Struct" minOccurs="0"/>
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:complexType>
<!-- 'Base64' can be used to serialize binary data using base64 encoding
as defined in RFC2045 but without the MIME line length limitation. -->
<xs:simpleType name="base64">
<xs:restriction base="xs:base64Binary"/>
</xs:simpleType>
<!-- Element declarations corresponding to each of the simple types in the
XML Schemas Specification. -->
<xs:element name="duration" type="tns:duration"/>
<xs:complexType name="duration">
<xs:simpleContent>
<xs:extension base="xs:duration">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="dateTime" type="tns:dateTime"/>
<xs:complexType name="dateTime">
<xs:simpleContent>
<xs:extension base="xs:dateTime">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NOTATION" type="tns:NOTATION"/>
<xs:complexType name="NOTATION">
<xs:simpleContent>
<xs:extension base="xs:QName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="time" type="tns:time"/>
<xs:complexType name="time">
<xs:simpleContent>
<xs:extension base="xs:time">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="date" type="tns:date"/>
<xs:complexType name="date">
<xs:simpleContent>
<xs:extension base="xs:date">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gYearMonth" type="tns:gYearMonth"/>
<xs:complexType name="gYearMonth">
<xs:simpleContent>
<xs:extension base="xs:gYearMonth">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gYear" type="tns:gYear"/>
<xs:complexType name="gYear">
<xs:simpleContent>
<xs:extension base="xs:gYear">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gMonthDay" type="tns:gMonthDay"/>
<xs:complexType name="gMonthDay">
<xs:simpleContent>
<xs:extension base="xs:gMonthDay">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gDay" type="tns:gDay"/>
<xs:complexType name="gDay">
<xs:simpleContent>
<xs:extension base="xs:gDay">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="gMonth" type="tns:gMonth"/>
<xs:complexType name="gMonth">
<xs:simpleContent>
<xs:extension base="xs:gMonth">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="boolean" type="tns:boolean"/>
<xs:complexType name="boolean">
<xs:simpleContent>
<xs:extension base="xs:boolean">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="base64Binary" type="tns:base64Binary"/>
<xs:complexType name="base64Binary">
<xs:simpleContent>
<xs:extension base="xs:base64Binary">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="hexBinary" type="tns:hexBinary"/>
<xs:complexType name="hexBinary">
<xs:simpleContent>
<xs:extension base="xs:hexBinary">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="float" type="tns:float"/>
<xs:complexType name="float">
<xs:simpleContent>
<xs:extension base="xs:float">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="double" type="tns:double"/>
<xs:complexType name="double">
<xs:simpleContent>
<xs:extension base="xs:double">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="anyURI" type="tns:anyURI"/>
<xs:complexType name="anyURI">
<xs:simpleContent>
<xs:extension base="xs:anyURI">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="QName" type="tns:QName"/>
<xs:complexType name="QName">
<xs:simpleContent>
<xs:extension base="xs:QName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="string" type="tns:string"/>
<xs:complexType name="string">
<xs:simpleContent>
<xs:extension base="xs:string">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="normalizedString" type="tns:normalizedString"/>
<xs:complexType name="normalizedString">
<xs:simpleContent>
<xs:extension base="xs:normalizedString">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="token" type="tns:token"/>
<xs:complexType name="token">
<xs:simpleContent>
<xs:extension base="xs:token">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="language" type="tns:language"/>
<xs:complexType name="language">
<xs:simpleContent>
<xs:extension base="xs:language">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="Name" type="tns:Name"/>
<xs:complexType name="Name">
<xs:simpleContent>
<xs:extension base="xs:Name">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NMTOKEN" type="tns:NMTOKEN"/>
<xs:complexType name="NMTOKEN">
<xs:simpleContent>
<xs:extension base="xs:NMTOKEN">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NCName" type="tns:NCName"/>
<xs:complexType name="NCName">
<xs:simpleContent>
<xs:extension base="xs:NCName">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="NMTOKENS" type="tns:NMTOKENS"/>
<xs:complexType name="NMTOKENS">
<xs:simpleContent>
<xs:extension base="xs:NMTOKENS">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ID" type="tns:ID"/>
<xs:complexType name="ID">
<xs:simpleContent>
<xs:extension base="xs:ID">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="IDREF" type="tns:IDREF"/>
<xs:complexType name="IDREF">
<xs:simpleContent>
<xs:extension base="xs:IDREF">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ENTITY" type="tns:ENTITY"/>
<xs:complexType name="ENTITY">
<xs:simpleContent>
<xs:extension base="xs:ENTITY">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="IDREFS" type="tns:IDREFS"/>
<xs:complexType name="IDREFS">
<xs:simpleContent>
<xs:extension base="xs:IDREFS">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="ENTITIES" type="tns:ENTITIES"/>
<xs:complexType name="ENTITIES">
<xs:simpleContent>
<xs:extension base="xs:ENTITIES">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="decimal" type="tns:decimal"/>
<xs:complexType name="decimal">
<xs:simpleContent>
<xs:extension base="xs:decimal">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="integer" type="tns:integer"/>
<xs:complexType name="integer">
<xs:simpleContent>
<xs:extension base="xs:integer">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="nonPositiveInteger" type="tns:nonPositiveInteger"/>
<xs:complexType name="nonPositiveInteger">
<xs:simpleContent>
<xs:extension base="xs:nonPositiveInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="negativeInteger" type="tns:negativeInteger"/>
<xs:complexType name="negativeInteger">
<xs:simpleContent>
<xs:extension base="xs:negativeInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="long" type="tns:long"/>
<xs:complexType name="long">
<xs:simpleContent>
<xs:extension base="xs:long">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="int" type="tns:int"/>
<xs:complexType name="int">
<xs:simpleContent>
<xs:extension base="xs:int">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="short" type="tns:short"/>
<xs:complexType name="short">
<xs:simpleContent>
<xs:extension base="xs:short">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="byte" type="tns:byte"/>
<xs:complexType name="byte">
<xs:simpleContent>
<xs:extension base="xs:byte">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="nonNegativeInteger" type="tns:nonNegativeInteger"/>
<xs:complexType name="nonNegativeInteger">
<xs:simpleContent>
<xs:extension base="xs:nonNegativeInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedLong" type="tns:unsignedLong"/>
<xs:complexType name="unsignedLong">
<xs:simpleContent>
<xs:extension base="xs:unsignedLong">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedInt" type="tns:unsignedInt"/>
<xs:complexType name="unsignedInt">
<xs:simpleContent>
<xs:extension base="xs:unsignedInt">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedShort" type="tns:unsignedShort"/>
<xs:complexType name="unsignedShort">
<xs:simpleContent>
<xs:extension base="xs:unsignedShort">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="unsignedByte" type="tns:unsignedByte"/>
<xs:complexType name="unsignedByte">
<xs:simpleContent>
<xs:extension base="xs:unsignedByte">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="positiveInteger" type="tns:positiveInteger"/>
<xs:complexType name="positiveInteger">
<xs:simpleContent>
<xs:extension base="xs:positiveInteger">
<xs:attributeGroup ref="tns:commonAttributes"/>
</xs:extension>
</xs:simpleContent>
</xs:complexType>
<xs:element name="anyType"/>
</xs:schema>
"""
class DocumentStore:
"""
The I{suds} document store provides a local repository
for xml documnts.
@cvar protocol: The URL protocol for the store.
@type protocol: str
@cvar store: The mapping of URL location to documents.
@type store: dict
"""
protocol = 'suds'
store = {
'schemas.xmlsoap.org/soap/encoding/' : encoding
}
def open(self, url):
"""
Open a document at the specified url.
@param url: A document URL.
@type url: str
@return: A file pointer to the document.
@rtype: StringIO
"""
protocol, location = self.split(url)
if protocol == self.protocol:
return self.find(location)
else:
return None
def find(self, location):
"""
Find the specified location in the store.
@param location: The I{location} part of a URL.
@type location: str
@return: An input stream to the document.
@rtype: StringIO
"""
try:
content = self.store[location]
return StringIO(content)
except:
reason = 'location "%s" not in document store' % location
raise Exception, reason
def split(self, url):
"""
Split the url into I{protocol} and I{location}
@param url: A URL.
@param url: str
@return: (I{url}, I{location})
@rtype: tuple
"""
parts = url.split('://', 1)
if len(parts) == 2:
return parts
else:
return (None, url)
| 18,425
|
Python
|
.py
| 515
| 30.201942
| 169
| 0.675075
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,326
|
resolver.py
|
CouchPotato_CouchPotatoServer/libs/suds/resolver.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{resolver} module provides a collection of classes that
provide wsdl/xsd named type resolution.
"""
import re
from logging import getLogger
from suds import *
from suds.sax import splitPrefix, Namespace
from suds.sudsobject import Object
from suds.xsd.query import BlindQuery, TypeQuery, qualify
log = getLogger(__name__)
class Resolver:
"""
An I{abstract} schema-type resolver.
@ivar schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
self.schema = schema
def find(self, name, resolved=True):
"""
Get the definition object for the schema object by name.
@param name: The name of a schema object.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
log.debug('searching schema for (%s)', name)
qref = qualify(name, self.schema.root, self.schema.tns)
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
return None
log.debug('found (%s) as (%s)', name, Repr(result))
if resolved:
result = result.resolve()
return result
class PathResolver(Resolver):
"""
Resolveds the definition object for the schema type located at the specified path.
The path may contain (.) dot notation to specify nested types.
@ivar wsdl: A wsdl object.
@type wsdl: L{wsdl.Definitions}
"""
def __init__(self, wsdl, ps='.'):
"""
@param wsdl: A schema object.
@type wsdl: L{wsdl.Definitions}
@param ps: The path separator character
@type ps: char
"""
Resolver.__init__(self, wsdl.schema)
self.wsdl = wsdl
self.altp = re.compile('({)(.+)(})(.+)')
self.splitp = re.compile('({.+})*[^\%s]+' % ps[0])
def find(self, path, resolved=True):
"""
Get the definition object for the schema type located at the specified path.
The path may contain (.) dot notation to specify nested types.
Actually, the path separator is usually a (.) but can be redefined
during contruction.
@param path: A (.) separated path to a schema type.
@type path: basestring
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
parts = self.split(path)
try:
result = self.root(parts)
if len(parts) > 1:
result = result.resolve(nobuiltin=True)
result = self.branch(result, parts)
result = self.leaf(result, parts)
if resolved:
result = result.resolve(nobuiltin=True)
except PathResolver.BadPath:
log.error('path: "%s", not-found' % path)
return result
def root(self, parts):
"""
Find the path root.
@param parts: A list of path parts.
@type parts: [str,..]
@return: The root.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = None
name = parts[0]
log.debug('searching schema for (%s)', name)
qref = self.qualify(parts[0])
query = BlindQuery(qref)
result = query.execute(self.schema)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
else:
log.debug('found (%s) as (%s)', name, Repr(result))
return result
def branch(self, root, parts):
"""
Traverse the path until the leaf is reached.
@param parts: A list of path parts.
@type parts: [str,..]
@param root: The root.
@type root: L{xsd.sxbase.SchemaObject}
@return: The end of the branch.
@rtype: L{xsd.sxbase.SchemaObject}
"""
result = root
for part in parts[1:-1]:
name = splitPrefix(part)[1]
log.debug('searching parent (%s) for (%s)', Repr(result), name)
result, ancestry = result.get_child(name)
if result is None:
log.error('(%s) not-found', name)
raise PathResolver.BadPath(name)
else:
result = result.resolve(nobuiltin=True)
log.debug('found (%s) as (%s)', name, Repr(result))
return result
def leaf(self, parent, parts):
"""
Find the leaf.
@param parts: A list of path parts.
@type parts: [str,..]
@param parent: The leaf's parent.
@type parent: L{xsd.sxbase.SchemaObject}
@return: The leaf.
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = splitPrefix(parts[-1])[1]
if name.startswith('@'):
result, path = parent.get_attribute(name[1:])
else:
result, ancestry = parent.get_child(name)
if result is None:
raise PathResolver.BadPath(name)
return result
def qualify(self, name):
"""
Qualify the name as either:
- plain name
- ns prefixed name (eg: ns0:Person)
- fully ns qualified name (eg: {http://myns-uri}Person)
@param name: The name of an object in the schema.
@type name: str
@return: A qualifed name.
@rtype: qname
"""
m = self.altp.match(name)
if m is None:
return qualify(name, self.wsdl.root, self.wsdl.tns)
else:
return (m.group(4), m.group(2))
def split(self, s):
"""
Split the string on (.) while preserving any (.) inside the
'{}' alternalte syntax for full ns qualification.
@param s: A plain or qualifed name.
@type s: str
@return: A list of the name's parts.
@rtype: [str,..]
"""
parts = []
b = 0
while 1:
m = self.splitp.match(s, b)
if m is None:
break
b,e = m.span()
parts.append(s[b:e])
b = e+1
return parts
class BadPath(Exception): pass
class TreeResolver(Resolver):
"""
The tree resolver is a I{stateful} tree resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
@ivar stack: The context stack.
@type stack: list
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
Resolver.__init__(self, schema)
self.stack = Stack()
def reset(self):
"""
Reset the resolver's state.
"""
self.stack = Stack()
def push(self, x):
"""
Push an I{object} onto the stack.
@param x: An object to push.
@type x: L{Frame}
@return: The pushed frame.
@rtype: L{Frame}
"""
if isinstance(x, Frame):
frame = x
else:
frame = Frame(x)
self.stack.append(frame)
log.debug('push: (%s)\n%s', Repr(frame), Repr(self.stack))
return frame
def top(self):
"""
Get the I{frame} at the top of the stack.
@return: The top I{frame}, else None.
@rtype: L{Frame}
"""
if len(self.stack):
return self.stack[-1]
else:
return Frame.Empty()
def pop(self):
"""
Pop the frame at the top of the stack.
@return: The popped frame, else None.
@rtype: L{Frame}
"""
if len(self.stack):
popped = self.stack.pop()
log.debug('pop: (%s)\n%s', Repr(popped), Repr(self.stack))
return popped
else:
log.debug('stack empty, not-popped')
return None
def depth(self):
"""
Get the current stack depth.
@return: The current stack depth.
@rtype: int
"""
return len(self.stack)
def getchild(self, name, parent):
""" get a child by name """
log.debug('searching parent (%s) for (%s)', Repr(parent), name)
if name.startswith('@'):
return parent.get_attribute(name[1:])
else:
return parent.get_child(name)
class NodeResolver(TreeResolver):
"""
The node resolver is a I{stateful} XML document resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
TreeResolver.__init__(self, schema)
def find(self, node, resolved=False, push=True):
"""
@param node: An xml node to be resolved.
@type node: L{sax.element.Element}
@param resolved: A flag indicating that the fully resolved type should be
returned.
@type resolved: boolean
@param push: Indicates that the resolved type should be
pushed onto the stack.
@type push: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = node.name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
known = self.known(node)
if result is None:
return result
if push:
frame = Frame(result, resolved=known, ancestry=ancestry)
pushed = self.push(frame)
if resolved:
result = result.resolve()
return result
def findattr(self, name, resolved=True):
"""
Find an attribute type definition.
@param name: An attribute name.
@type name: basestring
@param resolved: A flag indicating that the fully resolved type should be
returned.
@type resolved: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
name = '@%s'%name
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name, node)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return result
if resolved:
result = result.resolve()
return result
def query(self, name, node):
""" blindly query the schema by name """
log.debug('searching schema for (%s)', name)
qref = qualify(name, node, node.namespace())
query = BlindQuery(qref)
result = query.execute(self.schema)
return (result, [])
def known(self, node):
""" resolve type referenced by @xsi:type """
ref = node.get('type', Namespace.xsins)
if ref is None:
return None
qref = qualify(ref, node, node.namespace())
query = BlindQuery(qref)
return query.execute(self.schema)
class GraphResolver(TreeResolver):
"""
The graph resolver is a I{stateful} L{Object} graph resolver
used to resolve each node in a tree. As such, it mirrors
the tree structure to ensure that nodes are resolved in
context.
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
TreeResolver.__init__(self, schema)
def find(self, name, object, resolved=False, push=True):
"""
@param name: The name of the object to be resolved.
@type name: basestring
@param object: The name's value.
@type object: (any|L{Object})
@param resolved: A flag indicating that the fully resolved type
should be returned.
@type resolved: boolean
@param push: Indicates that the resolved type should be
pushed onto the stack.
@type push: boolean
@return: The found schema I{type}
@rtype: L{xsd.sxbase.SchemaObject}
"""
known = None
parent = self.top().resolved
if parent is None:
result, ancestry = self.query(name)
else:
result, ancestry = self.getchild(name, parent)
if result is None:
return None
if isinstance(object, Object):
known = self.known(object)
if push:
frame = Frame(result, resolved=known, ancestry=ancestry)
pushed = self.push(frame)
if resolved:
if known is None:
result = result.resolve()
else:
result = known
return result
def query(self, name):
""" blindly query the schema by name """
log.debug('searching schema for (%s)', name)
schema = self.schema
wsdl = self.wsdl()
if wsdl is None:
qref = qualify(name, schema.root, schema.tns)
else:
qref = qualify(name, wsdl.root, wsdl.tns)
query = BlindQuery(qref)
result = query.execute(schema)
return (result, [])
def wsdl(self):
""" get the wsdl """
container = self.schema.container
if container is None:
return None
else:
return container.wsdl
def known(self, object):
""" get the type specified in the object's metadata """
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
class Frame:
def __init__(self, type, resolved=None, ancestry=()):
self.type = type
if resolved is None:
resolved = type.resolve()
self.resolved = resolved.resolve()
self.ancestry = ancestry
def __str__(self):
return '%s\n%s\n%s' % \
(Repr(self.type),
Repr(self.resolved),
[Repr(t) for t in self.ancestry])
class Empty:
def __getattr__(self, name):
if name == 'ancestry':
return ()
else:
return None
class Stack(list):
def __repr__(self):
result = []
for item in self:
result.append(repr(item))
return '\n'.join(result)
| 15,814
|
Python
|
.py
| 450
| 25.897778
| 86
| 0.57564
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,327
|
soaparray.py
|
CouchPotato_CouchPotatoServer/libs/suds/soaparray.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{soaparray} module provides XSD extensions for handling
soap (section 5) encoded arrays.
"""
from suds import *
from logging import getLogger
from suds.xsd.sxbasic import Factory as SXFactory
from suds.xsd.sxbasic import Attribute as SXAttribute
class Attribute(SXAttribute):
"""
Represents an XSD <attribute/> that handles special
attributes that are extensions for WSDLs.
@ivar aty: Array type information.
@type aty: The value of wsdl:arrayType.
"""
def __init__(self, schema, root, aty):
"""
@param aty: Array type information.
@type aty: The value of wsdl:arrayType.
"""
SXAttribute.__init__(self, schema, root)
if aty.endswith('[]'):
self.aty = aty[:-2]
else:
self.aty = aty
def autoqualified(self):
aqs = SXAttribute.autoqualified(self)
aqs.append('aty')
return aqs
def description(self):
d = SXAttribute.description(self)
d = d+('aty',)
return d
#
# Builder function, only builds Attribute when arrayType
# attribute is defined on root.
#
def __fn(x, y):
ns = (None, "http://schemas.xmlsoap.org/wsdl/")
aty = y.get('arrayType', ns=ns)
if aty is None:
return SXAttribute(x, y)
else:
return Attribute(x, y, aty)
#
# Remap <xs:attrbute/> tags to __fn() builder.
#
SXFactory.maptag('attribute', __fn)
| 2,262
|
Python
|
.py
| 63
| 31.460317
| 76
| 0.693437
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,328
|
serviceproxy.py
|
CouchPotato_CouchPotatoServer/libs/suds/serviceproxy.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The service proxy provides access to web services.
Replaced by: L{client.Client}
"""
from logging import getLogger
from suds import *
from suds.client import Client
log = getLogger(__name__)
class ServiceProxy(object):
"""
A lightweight soap based web service proxy.
@ivar __client__: A client.
Everything is delegated to the 2nd generation API.
@type __client__: L{Client}
@note: Deprecated, replaced by L{Client}.
"""
def __init__(self, url, **kwargs):
"""
@param url: The URL for the WSDL.
@type url: str
@param kwargs: keyword arguments.
@keyword faults: Raise faults raised by server (default:True),
else return tuple from service method invocation as (http code, object).
@type faults: boolean
@keyword proxy: An http proxy to be specified on requests (default:{}).
The proxy is defined as {protocol:proxy,}
@type proxy: dict
"""
client = Client(url, **kwargs)
self.__client__ = client
def get_instance(self, name):
"""
Get an instance of a WSDL type by name
@param name: The name of a type defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def get_enum(self, name):
"""
Get an instance of an enumeration defined in the WSDL by name.
@param name: The name of a enumeration defined in the WSDL.
@type name: str
@return: An instance on success, else None
@rtype: L{sudsobject.Object}
"""
return self.__client__.factory.create(name)
def __str__(self):
return str(self.__client__)
def __unicode__(self):
return unicode(self.__client__)
def __getattr__(self, name):
builtin = name.startswith('__') and name.endswith('__')
if builtin:
return self.__dict__[name]
else:
return getattr(self.__client__.service, name)
| 2,978
|
Python
|
.py
| 73
| 34.041096
| 88
| 0.65272
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,329
|
servicedefinition.py
|
CouchPotato_CouchPotatoServer/libs/suds/servicedefinition.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{service definition} provides a textual representation of a service.
"""
from logging import getLogger
from suds import *
import suds.metrics as metrics
from suds.sax import Namespace
log = getLogger(__name__)
class ServiceDefinition:
"""
A service definition provides an object used to generate a textual description
of a service.
@ivar wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
@ivar service: The service object.
@type service: L{suds.wsdl.Service}
@ivar ports: A list of port-tuple: (port, [(method-name, pdef)])
@type ports: [port-tuple,..]
@ivar prefixes: A list of remapped prefixes.
@type prefixes: [(prefix,uri),..]
@ivar types: A list of type definitions
@type types: [I{Type},..]
"""
def __init__(self, wsdl, service):
"""
@param wsdl: A wsdl object
@type wsdl: L{Definitions}
@param service: A service B{name}.
@type service: str
"""
self.wsdl = wsdl
self.service = service
self.ports = []
self.params = []
self.types = []
self.prefixes = []
self.addports()
self.paramtypes()
self.publictypes()
self.getprefixes()
self.pushprefixes()
def pushprefixes(self):
"""
Add our prefixes to the wsdl so that when users invoke methods
and reference the prefixes, the will resolve properly.
"""
for ns in self.prefixes:
self.wsdl.root.addPrefix(ns[0], ns[1])
def addports(self):
"""
Look through the list of service ports and construct a list of tuples where
each tuple is used to describe a port and it's list of methods as:
(port, [method]). Each method is tuple: (name, [pdef,..] where each pdef is
a tuple: (param-name, type).
"""
timer = metrics.Timer()
timer.start()
for port in self.service.ports:
p = self.findport(port)
for op in port.binding.operations.values():
m = p[0].method(op.name)
binding = m.binding.input
method = (m.name, binding.param_defs(m))
p[1].append(method)
metrics.log.debug("method '%s' created: %s", m.name, timer)
p[1].sort()
timer.stop()
def findport(self, port):
"""
Find and return a port tuple for the specified port.
Created and added when not found.
@param port: A port.
@type port: I{service.Port}
@return: A port tuple.
@rtype: (port, [method])
"""
for p in self.ports:
if p[0] == p: return p
p = (port, [])
self.ports.append(p)
return p
def getprefixes(self):
"""
Add prefixes foreach namespace referenced by parameter types.
"""
namespaces = []
for l in (self.params, self.types):
for t,r in l:
ns = r.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
if Namespace.xs(ns) or Namespace.xsd(ns):
continue
namespaces.append(ns[1])
if t == r: continue
ns = t.namespace()
if ns[1] is None: continue
if ns[1] in namespaces: continue
namespaces.append(ns[1])
i = 0
namespaces.sort()
for u in namespaces:
p = self.nextprefix()
ns = (p, u)
self.prefixes.append(ns)
def paramtypes(self):
""" get all parameter types """
for m in [p[1] for p in self.ports]:
for p in [p[1] for p in m]:
for pd in p:
if pd[1] in self.params: continue
item = (pd[1], pd[1].resolve())
self.params.append(item)
def publictypes(self):
""" get all public types """
for t in self.wsdl.schema.types.values():
if t in self.params: continue
if t in self.types: continue
item = (t, t)
self.types.append(item)
tc = lambda x,y: cmp(x[0].name, y[0].name)
self.types.sort(cmp=tc)
def nextprefix(self):
"""
Get the next available prefix. This means a prefix starting with 'ns' with
a number appended as (ns0, ns1, ..) that is not already defined on the
wsdl document.
"""
used = [ns[0] for ns in self.prefixes]
used += [ns[0] for ns in self.wsdl.root.nsprefixes.items()]
for n in range(0,1024):
p = 'ns%d'%n
if p not in used:
return p
raise Exception('prefixes exhausted')
def getprefix(self, u):
"""
Get the prefix for the specified namespace (uri)
@param u: A namespace uri.
@type u: str
@return: The namspace.
@rtype: (prefix, uri).
"""
for ns in Namespace.all:
if u == ns[1]: return ns[0]
for ns in self.prefixes:
if u == ns[1]: return ns[0]
raise Exception('ns (%s) not mapped' % u)
def xlate(self, type):
"""
Get a (namespace) translated I{qualified} name for specified type.
@param type: A schema type.
@type type: I{suds.xsd.sxbasic.SchemaObject}
@return: A translated I{qualified} name.
@rtype: str
"""
resolved = type.resolve()
name = resolved.name
if type.unbounded():
name += '[]'
ns = resolved.namespace()
if ns[1] == self.wsdl.tns[1]:
return name
prefix = self.getprefix(ns[1])
return ':'.join((prefix, name))
def description(self):
"""
Get a textual description of the service for which this object represents.
@return: A textual description.
@rtype: str
"""
s = []
indent = (lambda n : '\n%*s'%(n*3,' '))
s.append('Service ( %s ) tns="%s"' % (self.service.name, self.wsdl.tns[1]))
s.append(indent(1))
s.append('Prefixes (%d)' % len(self.prefixes))
for p in self.prefixes:
s.append(indent(2))
s.append('%s = "%s"' % p)
s.append(indent(1))
s.append('Ports (%d):' % len(self.ports))
for p in self.ports:
s.append(indent(2))
s.append('(%s)' % p[0].name)
s.append(indent(3))
s.append('Methods (%d):' % len(p[1]))
for m in p[1]:
sig = []
s.append(indent(4))
sig.append(m[0])
sig.append('(')
for p in m[1]:
sig.append(self.xlate(p[1]))
sig.append(' ')
sig.append(p[0])
sig.append(', ')
sig.append(')')
try:
s.append(''.join(sig))
except:
pass
s.append(indent(3))
s.append('Types (%d):' % len(self.types))
for t in self.types:
s.append(indent(4))
s.append(self.xlate(t[0]))
s.append('\n\n')
return ''.join(s)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
try:
return self.description()
except Exception, e:
log.exception(e)
return tostr(e)
| 8,478
|
Python
|
.py
| 231
| 26.480519
| 84
| 0.545689
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,330
|
encoded.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/encoded.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides soap encoded unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.typed import Typed
from suds.sax import splitPrefix, Namespace
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Typed):
"""
A SOAP section (5) encoding unmarshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# Grab the array type and continue
#
self.setaty(content)
Typed.start(self, content)
def end(self, content):
#
# Squash soap encoded arrays into python lists. This is
# also where we insure that empty arrays are represented
# as empty python lists.
#
aty = content.aty
if aty is not None:
self.promote(content)
return Typed.end(self, content)
def postprocess(self, content):
#
# Ensure proper rendering of empty arrays.
#
if content.aty is None:
return Typed.postprocess(self, content)
else:
return content.data
def setaty(self, content):
"""
Grab the (aty) soap-enc:arrayType and attach it to the
content for proper array processing later in end().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
name = 'arrayType'
ns = (None, 'http://schemas.xmlsoap.org/soap/encoding/')
aty = content.node.get(name, ns)
if aty is not None:
content.aty = aty
parts = aty.split('[')
ref = parts[0]
if len(parts) == 2:
self.applyaty(content, ref)
else:
pass # (2) dimensional array
return self
def applyaty(self, content, xty):
"""
Apply the type referenced in the I{arrayType} to the content
(child nodes) of the array. Each element (node) in the array
that does not have an explicit xsi:type attribute is given one
based on the I{arrayType}.
@param content: An array content.
@type content: L{Content}
@param xty: The XSI type reference.
@type xty: str
@return: self
@rtype: L{Encoded}
"""
name = 'type'
ns = Namespace.xsins
parent = content.node
for child in parent.getChildren():
ref = child.get(name, ns)
if ref is None:
parent.addPrefix(ns[0], ns[1])
attr = ':'.join((ns[0], name))
child.set(attr, xty)
return self
def promote(self, content):
"""
Promote (replace) the content.data with the first attribute
of the current content.data that is a I{list}. Note: the
content.data may be empty or contain only _x attributes.
In either case, the content.data is assigned an empty list.
@param content: An array content.
@type content: L{Content}
"""
for n,v in content.data:
if isinstance(v, list):
content.data = v
return
content.data = []
| 4,197
|
Python
|
.py
| 116
| 28.474138
| 76
| 0.625555
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,331
|
typed.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/typed.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides typed unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
from suds.resolver import NodeResolver, Frame
from suds.sudsobject import Factory
log = getLogger(__name__)
#
# Add typed extensions
# type = The expected xsd type
# real = The 'true' XSD type
#
Content.extensions.append('type')
Content.extensions.append('real')
class Typed(Core):
"""
A I{typed} XML unmarshaller
@ivar resolver: A schema type resolver.
@type resolver: L{NodeResolver}
"""
def __init__(self, schema):
"""
@param schema: A schema object.
@type schema: L{xsd.schema.Schema}
"""
self.resolver = NodeResolver(schema)
def process(self, node, type):
"""
Process an object graph representation of the xml L{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@param type: The I{optional} schema type.
@type type: L{xsd.sxbase.SchemaObject}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
content.type = type
return Core.process(self, content)
def reset(self):
log.debug('reset')
self.resolver.reset()
def start(self, content):
#
# Resolve to the schema type; build an object and setup metadata.
#
if content.type is None:
found = self.resolver.find(content.node)
if found is None:
log.error(self.resolver.schema)
raise TypeNotFound(content.node.qname())
content.type = found
else:
known = self.resolver.known(content.node)
frame = Frame(content.type, resolved=known)
self.resolver.push(frame)
real = self.resolver.top().resolved
content.real = real
cls_name = real.name
if cls_name is None:
cls_name = content.node.name
content.data = Factory.object(cls_name)
md = content.data.__metadata__
md.sxtype = real
def end(self, content):
self.resolver.pop()
def unbounded(self, content):
return content.type.unbounded()
def nillable(self, content):
resolved = content.type.resolve()
return ( content.type.nillable or \
(resolved.builtin() and resolved.nillable ) )
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
type = self.resolver.findattr(name)
if type is None:
log.warn('attribute (%s) type, not-found', name)
else:
value = self.translated(value, type)
Core.append_attribute(self, name, value, content)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
Here is where the I{true} type is used to translate the value
into the proper python type.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
Core.append_text(self, content)
known = self.resolver.top().resolved
content.text = self.translated(content.text, known)
def translated(self, value, type):
""" translate using the schema type """
if value is not None:
resolved = type.resolve()
return resolved.translate(value)
else:
return value
| 4,646
|
Python
|
.py
| 124
| 29.943548
| 76
| 0.64764
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,332
|
attrlist.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/attrlist.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides filtered attribute list classes.
"""
from suds import *
from suds.umx import *
from suds.sax import Namespace
class AttrList:
"""
A filtered attribute list.
Items are included during iteration if they are in either the (xs) or
(xml) namespaces.
@ivar raw: The I{raw} attribute list.
@type raw: list
"""
def __init__(self, attributes):
"""
@param attributes: A list of attributes
@type attributes: list
"""
self.raw = attributes
def real(self):
"""
Get list of I{real} attributes which exclude xs and xml attributes.
@return: A list of I{real} attributes.
@rtype: I{generator}
"""
for a in self.raw:
if self.skip(a): continue
yield a
def rlen(self):
"""
Get the number of I{real} attributes which exclude xs and xml attributes.
@return: A count of I{real} attributes.
@rtype: L{int}
"""
n = 0
for a in self.real():
n += 1
return n
def lang(self):
"""
Get list of I{filtered} attributes which exclude xs.
@return: A list of I{filtered} attributes.
@rtype: I{generator}
"""
for a in self.raw:
if a.qname() == 'xml:lang':
return a.value
return None
def skip(self, attr):
"""
Get whether to skip (filter-out) the specified attribute.
@param attr: An attribute.
@type attr: I{Attribute}
@return: True if should be skipped.
@rtype: bool
"""
ns = attr.namespace()
skip = (
Namespace.xmlns[1],
'http://schemas.xmlsoap.org/soap/encoding/',
'http://schemas.xmlsoap.org/soap/envelope/',
'http://www.w3.org/2003/05/soap-envelope',
)
return ( Namespace.xs(ns) or ns[1] in skip )
| 2,811
|
Python
|
.py
| 80
| 27.8625
| 81
| 0.623188
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,333
|
basic.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/basic.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides basic unmarshaller classes.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.core import Core
class Basic(Core):
"""
A object builder (unmarshaller).
"""
def process(self, node):
"""
Process an object graph representation of the xml I{node}.
@param node: An XML tree.
@type node: L{sax.element.Element}
@return: A suds object.
@rtype: L{Object}
"""
content = Content(node)
return Core.process(self, content)
| 1,394
|
Python
|
.py
| 36
| 34.916667
| 76
| 0.719911
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,334
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
unmarshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
@ivar node: The content source node.
@type node: L{sax.element.Element}
@ivar data: The (optional) content data.
@type data: L{Object}
@ivar text: The (optional) content (xml) text.
@type text: basestring
"""
extensions = []
def __init__(self, node, **kwargs):
Object.__init__(self)
self.node = node
self.data = None
self.text = None
for k,v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError, \
'Content has no attribute %s' % name
else:
v = self.__dict__[name]
return v
| 1,811
|
Python
|
.py
| 48
| 31.8125
| 76
| 0.656963
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,335
|
core.py
|
CouchPotato_CouchPotatoServer/libs/suds/umx/core.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides base classes for XML->object I{unmarshalling}.
"""
from logging import getLogger
from suds import *
from suds.umx import *
from suds.umx.attrlist import AttrList
from suds.sax.text import Text
from suds.sudsobject import Factory, merge
log = getLogger(__name__)
reserved = { 'class':'cls', 'def':'dfn', }
class Core:
"""
The abstract XML I{node} unmarshaller. This class provides the
I{core} unmarshalling functionality.
"""
def process(self, content):
"""
Process an object graph representation of the xml I{node}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A suds object.
@rtype: L{Object}
"""
self.reset()
return self.append(content)
def append(self, content):
"""
Process the specified node and convert the XML document into
a I{suds} L{object}.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A I{append-result} tuple as: (L{Object}, I{value})
@rtype: I{append-result}
@note: This is not the proper entry point.
@see: L{process()}
"""
self.start(content)
self.append_attributes(content)
self.append_children(content)
self.append_text(content)
self.end(content)
return self.postprocess(content)
def postprocess(self, content):
"""
Perform final processing of the resulting data structure as follows:
- Mixed values (children and text) will have a result of the I{content.node}.
- Simi-simple values (attributes, no-children and text) will have a result of a
property object.
- Simple values (no-attributes, no-children with text nodes) will have a string
result equal to the value of the content.node.getText().
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: The post-processed result.
@rtype: I{any}
"""
node = content.node
if len(node.children) and node.hasText():
return node
attributes = AttrList(node.attributes)
if attributes.rlen() and \
not len(node.children) and \
node.hasText():
p = Factory.property(node.name, node.getText())
return merge(content.data, p)
if len(content.data):
return content.data
lang = attributes.lang()
if content.node.isnil():
return None
if not len(node.children) and content.text is None:
if self.nillable(content):
return None
else:
return Text('', lang=lang)
if isinstance(content.text, basestring):
return Text(content.text, lang=lang)
else:
return content.text
def append_attributes(self, content):
"""
Append attribute nodes into L{Content.data}.
Attributes in the I{schema} or I{xml} namespaces are skipped.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
attributes = AttrList(content.node.attributes)
for attr in attributes.real():
name = attr.name
value = attr.value
self.append_attribute(name, value, content)
def append_attribute(self, name, value, content):
"""
Append an attribute name/value into L{Content.data}.
@param name: The attribute name
@type name: basestring
@param value: The attribute's value
@type value: basestring
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
key = name
key = '_%s' % reserved.get(key, key)
setattr(content.data, key, value)
def append_children(self, content):
"""
Append child nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
for child in content.node:
cont = Content(child)
cval = self.append(cont)
key = reserved.get(child.name, child.name)
if key in content.data:
v = getattr(content.data, key)
if isinstance(v, list):
v.append(cval)
else:
setattr(content.data, key, [v, cval])
continue
if self.unbounded(cont):
if cval is None:
setattr(content.data, key, [])
else:
setattr(content.data, key, [cval,])
else:
setattr(content.data, key, cval)
def append_text(self, content):
"""
Append text nodes into L{Content.data}
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
if content.node.hasText():
content.text = content.node.getText()
def reset(self):
pass
def start(self, content):
"""
Processing on I{node} has started. Build and return
the proper object.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: A subclass of Object.
@rtype: L{Object}
"""
content.data = Factory.object(content.node.name)
def end(self, content):
"""
Processing on I{node} has ended.
@param content: The current content being unmarshalled.
@type content: L{Content}
"""
pass
def bounded(self, content):
"""
Get whether the content is bounded (not a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if bounded, else False
@rtype: boolean
'"""
return ( not self.unbounded(content) )
def unbounded(self, content):
"""
Get whether the object is unbounded (a list).
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if unbounded, else False
@rtype: boolean
'"""
return False
def nillable(self, content):
"""
Get whether the object is nillable.
@param content: The current content being unmarshalled.
@type content: L{Content}
@return: True if nillable, else False
@rtype: boolean
'"""
return False
| 7,575
|
Python
|
.py
| 197
| 29.228426
| 90
| 0.614286
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,336
|
sxbuiltin.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/sxbuiltin.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbuiltin} module provides classes that represent
XSD I{builtin} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.date import *
from suds.xsd.sxbase import XBuiltin
import datetime as dt
log = getLogger(__name__)
class XString(XBuiltin):
"""
Represents an (xsd) <xs:string/> node
"""
pass
class XAny(XBuiltin):
"""
Represents an (xsd) <any/> node
"""
def __init__(self, schema, name):
XBuiltin.__init__(self, schema, name)
self.nillable = False
def get_child(self, name):
child = XAny(self.schema, name)
return (child, [])
def any(self):
return True
class XBoolean(XBuiltin):
"""
Represents an (xsd) boolean builtin type.
"""
translation = (
{ '1':True,'true':True,'0':False,'false':False },
{ True:'true',1:'true',False:'false',0:'false' },
)
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring):
return XBoolean.translation[0].get(value)
else:
return None
else:
if isinstance(value, (bool,int)):
return XBoolean.translation[1].get(value)
else:
return value
class XInteger(XBuiltin):
"""
Represents an (xsd) xs:int builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return int(value)
else:
return None
else:
if isinstance(value, int):
return str(value)
else:
return value
class XLong(XBuiltin):
"""
Represents an (xsd) xs:long builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return long(value)
else:
return None
else:
if isinstance(value, (int,long)):
return str(value)
else:
return value
class XFloat(XBuiltin):
"""
Represents an (xsd) xs:float builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return float(value)
else:
return None
else:
if isinstance(value, float):
return str(value)
else:
return value
class XDate(XBuiltin):
"""
Represents an (xsd) xs:date builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Date(value).date
else:
return None
else:
if isinstance(value, dt.date):
return str(Date(value))
else:
return value
class XTime(XBuiltin):
"""
Represents an (xsd) xs:time builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return Time(value).time
else:
return None
else:
if isinstance(value, dt.date):
return str(Time(value))
else:
return value
class XDateTime(XBuiltin):
"""
Represents an (xsd) xs:datetime builtin type.
"""
def translate(self, value, topython=True):
if topython:
if isinstance(value, basestring) and len(value):
return DateTime(value).datetime
else:
return None
else:
if isinstance(value, dt.date):
return str(DateTime(value))
else:
return value
class Factory:
tags =\
{
# any
'anyType' : XAny,
# strings
'string' : XString,
'normalizedString' : XString,
'ID' : XString,
'Name' : XString,
'QName' : XString,
'NCName' : XString,
'anySimpleType' : XString,
'anyURI' : XString,
'NOTATION' : XString,
'token' : XString,
'language' : XString,
'IDREFS' : XString,
'ENTITIES' : XString,
'IDREF' : XString,
'ENTITY' : XString,
'NMTOKEN' : XString,
'NMTOKENS' : XString,
# binary
'hexBinary' : XString,
'base64Binary' : XString,
# integers
'int' : XInteger,
'integer' : XInteger,
'unsignedInt' : XInteger,
'positiveInteger' : XInteger,
'negativeInteger' : XInteger,
'nonPositiveInteger' : XInteger,
'nonNegativeInteger' : XInteger,
# longs
'long' : XLong,
'unsignedLong' : XLong,
# shorts
'short' : XInteger,
'unsignedShort' : XInteger,
'byte' : XInteger,
'unsignedByte' : XInteger,
# floats
'float' : XFloat,
'double' : XFloat,
'decimal' : XFloat,
# dates & times
'date' : XDate,
'time' : XTime,
'dateTime': XDateTime,
'duration': XString,
'gYearMonth' : XString,
'gYear' : XString,
'gMonthDay' : XString,
'gDay' : XString,
'gMonth' : XString,
# boolean
'boolean' : XBoolean,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, schema, name):
"""
Create an object based on the root tag name.
@param schema: A schema object.
@type schema: L{schema.Schema}
@param name: The name.
@type name: str
@return: The created object.
@rtype: L{XBuiltin}
"""
fn = cls.tags.get(name)
if fn is not None:
return fn(schema, name)
else:
return XBuiltin(schema, name)
| 7,297
|
Python
|
.py
| 237
| 21.607595
| 76
| 0.56338
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,337
|
deplist.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/deplist.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{depsolve} module defines a class for performing dependancy solving.
"""
from logging import getLogger
from suds import *
log = getLogger(__name__)
class DepList:
"""
Dependancy solving list.
Items are tuples: (object, (deps,))
@ivar raw: The raw (unsorted) items.
@type raw: list
@ivar index: The index of (unsorted) items.
@type index: list
@ivar stack: The sorting stack.
@type stack: list
@ivar pushed: The I{pushed} set tracks items that have been
processed.
@type pushed: set
@ivar sorted: The sorted list of items.
@type sorted: list
"""
def __init__(self):
""" """
self.unsorted = []
self.index = {}
self.stack = []
self.pushed = set()
self.sorted = None
def add(self, *items):
"""
Add items to be sorted.
@param items: One or more items to be added.
@type items: I{item}
@return: self
@rtype: L{DepList}
"""
for item in items:
self.unsorted.append(item)
key = item[0]
self.index[key] = item
return self
def sort(self):
"""
Sort the list based on dependancies.
@return: The sorted items.
@rtype: list
"""
self.sorted = list()
self.pushed = set()
for item in self.unsorted:
popped = []
self.push(item)
while len(self.stack):
try:
top = self.top()
ref = top[1].next()
refd = self.index.get(ref)
if refd is None:
log.debug('"%s" not found, skipped', Repr(ref))
continue
self.push(refd)
except StopIteration:
popped.append(self.pop())
continue
for p in popped:
self.sorted.append(p)
self.unsorted = self.sorted
return self.sorted
def top(self):
"""
Get the item at the top of the stack.
@return: The top item.
@rtype: (item, iter)
"""
return self.stack[-1]
def push(self, item):
"""
Push and item onto the sorting stack.
@param item: An item to push.
@type item: I{item}
@return: The number of items pushed.
@rtype: int
"""
if item in self.pushed:
return
frame = (item, iter(item[1]))
self.stack.append(frame)
self.pushed.add(item)
def pop(self):
"""
Pop the top item off the stack and append
it to the sorted list.
@return: The popped item.
@rtype: I{item}
"""
try:
frame = self.stack.pop()
return frame[0]
except:
pass
if __name__ == '__main__':
a = ('a', ('x',))
b = ('b', ('a',))
c = ('c', ('a','b'))
d = ('d', ('c',))
e = ('e', ('d','a'))
f = ('f', ('e','c','d','a'))
x = ('x', ())
L = DepList()
L.add(c, e, d, b, f, a, x)
print [x[0] for x in L.sort()]
| 4,052
|
Python
|
.py
| 127
| 23.433071
| 76
| 0.548005
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,338
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace, splitPrefix
log = getLogger(__name__)
def qualify(ref, resolvers, defns=Namespace.default):
"""
Get a reference that is I{qualified} by namespace.
@param ref: A referenced schema type name.
@type ref: str
@param resolvers: A list of objects to be used to resolve types.
@type resolvers: [L{sax.element.Element},]
@param defns: An optional target namespace used to qualify references
when no prefix is specified.
@type defns: A default namespace I{tuple: (prefix,uri)} used when ref not prefixed.
@return: A qualified reference.
@rtype: (name, namespace-uri)
"""
ns = None
p, n = splitPrefix(ref)
if p is not None:
if not isinstance(resolvers, (list, tuple)):
resolvers = (resolvers,)
for r in resolvers:
resolved = r.resolvePrefix(p)
if resolved[1] is not None:
ns = resolved
break
if ns is None:
raise Exception('prefix (%s) not resolved' % p)
else:
ns = defns
return (n, ns[1])
def isqref(object):
"""
Get whether the object is a I{qualified reference}.
@param object: An object to be tested.
@type object: I{any}
@rtype: boolean
@see: L{qualify}
"""
return (\
isinstance(object, tuple) and \
len(object) == 2 and \
isinstance(object[0], basestring) and \
isinstance(object[1], basestring))
class Filter:
def __init__(self, inclusive=False, *items):
self.inclusive = inclusive
self.items = items
def __contains__(self, x):
if self.inclusive:
result = ( x in self.items )
else:
result = ( x not in self.items )
return result
| 3,007
|
Python
|
.py
| 77
| 33.766234
| 87
| 0.683328
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,339
|
query.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/query.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{query} module defines a class for performing schema queries.
"""
from logging import getLogger
from suds import *
from suds.sudsobject import *
from suds.xsd import qualify, isqref
from suds.xsd.sxbuiltin import Factory
log = getLogger(__name__)
class Query(Object):
"""
Schema query base class.
"""
def __init__(self, ref=None):
"""
@param ref: The schema reference being queried.
@type ref: qref
"""
Object.__init__(self)
self.id = objid(self)
self.ref = ref
self.history = []
self.resolved = False
if not isqref(self.ref):
raise Exception('%s, must be qref' % tostr(self.ref))
def execute(self, schema):
"""
Execute this query using the specified schema.
@param schema: The schema associated with the query. The schema
is used by the query to search for items.
@type schema: L{schema.Schema}
@return: The item matching the search criteria.
@rtype: L{sxbase.SchemaObject}
"""
raise Exception, 'not-implemented by subclass'
def filter(self, result):
"""
Filter the specified result based on query criteria.
@param result: A potential result.
@type result: L{sxbase.SchemaObject}
@return: True if result should be excluded.
@rtype: boolean
"""
if result is None:
return True
reject = ( result in self.history )
if reject:
log.debug('result %s, rejected by\n%s', Repr(result), self)
return reject
def result(self, result):
"""
Query result post processing.
@param result: A query result.
@type result: L{sxbase.SchemaObject}
"""
if result is None:
log.debug('%s, not-found', self.ref)
return
if self.resolved:
result = result.resolve()
log.debug('%s, found as: %s', self.ref, Repr(result))
self.history.append(result)
return result
class BlindQuery(Query):
"""
Schema query class that I{blindly} searches for a reference in
the specified schema. It may be used to find Elements and Types but
will match on an Element first. This query will also find builtins.
"""
def execute(self, schema):
if schema.builtin(self.ref):
name = self.ref[0]
b = Factory.create(schema, name)
log.debug('%s, found builtin (%s)', self.id, name)
return b
result = None
for d in (schema.elements, schema.types):
result = d.get(self.ref)
if self.filter(result):
result = None
else:
break
if result is None:
eq = ElementQuery(self.ref)
eq.history = self.history
result = eq.execute(schema)
return self.result(result)
class TypeQuery(Query):
"""
Schema query class that searches for Type references in
the specified schema. Matches on root types only.
"""
def execute(self, schema):
if schema.builtin(self.ref):
name = self.ref[0]
b = Factory.create(schema, name)
log.debug('%s, found builtin (%s)', self.id, name)
return b
result = schema.types.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class GroupQuery(Query):
"""
Schema query class that searches for Group references in
the specified schema.
"""
def execute(self, schema):
result = schema.groups.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class AttrQuery(Query):
"""
Schema query class that searches for Attribute references in
the specified schema. Matches on root Attribute by qname first, then searches
deep into the document.
"""
def execute(self, schema):
result = schema.attributes.get(self.ref)
if self.filter(result):
result = self.__deepsearch(schema)
return self.result(result)
def __deepsearch(self, schema):
from suds.xsd.sxbasic import Attribute
result = None
for e in schema.all:
result = e.find(self.ref, (Attribute,))
if self.filter(result):
result = None
else:
break
return result
class AttrGroupQuery(Query):
"""
Schema query class that searches for attributeGroup references in
the specified schema.
"""
def execute(self, schema):
result = schema.agrps.get(self.ref)
if self.filter(result):
result = None
return self.result(result)
class ElementQuery(Query):
"""
Schema query class that searches for Element references in
the specified schema. Matches on root Elements by qname first, then searches
deep into the document.
"""
def execute(self, schema):
result = schema.elements.get(self.ref)
if self.filter(result):
result = self.__deepsearch(schema)
return self.result(result)
def __deepsearch(self, schema):
from suds.xsd.sxbasic import Element
result = None
for e in schema.all:
result = e.find(self.ref, (Element,))
if self.filter(result):
result = None
else:
break
return result
| 6,451
|
Python
|
.py
| 179
| 27.77095
| 82
| 0.625568
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,340
|
sxbase.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/sxbase.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbase} module provides I{base} classes that represent
schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.sax.element import Element
from suds.sax import Namespace
log = getLogger(__name__)
class SchemaObject(object):
"""
A schema object is an extension to object object with
with schema awareness.
@ivar root: The XML root element.
@type root: L{Element}
@ivar schema: The schema containing this object.
@type schema: L{schema.Schema}
@ivar form_qualified: A flag that inidcates that @elementFormDefault
has a value of I{qualified}.
@type form_qualified: boolean
@ivar nillable: A flag that inidcates that @nillable
has a value of I{true}.
@type nillable: boolean
@ivar default: The default value.
@type default: object
@ivar rawchildren: A list raw of all children.
@type rawchildren: [L{SchemaObject},...]
"""
@classmethod
def prepend(cls, d, s, filter=Filter()):
"""
Prepend schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be prepended.
@type filter: L{Filter}
"""
i = 0
for x in s:
if x in filter:
d.insert(i, x)
i += 1
@classmethod
def append(cls, d, s, filter=Filter()):
"""
Append schema object's from B{s}ource list to
the B{d}estination list while applying the filter.
@param d: The destination list.
@type d: list
@param s: The source list.
@type s: list
@param filter: A filter that allows items to be appended.
@type filter: L{Filter}
"""
for item in s:
if item in filter:
d.append(item)
def __init__(self, schema, root):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
@param root: The xml root node.
@type root: L{Element}
"""
self.schema = schema
self.root = root
self.id = objid(self)
self.name = root.get('name')
self.qname = (self.name, schema.tns[1])
self.min = root.get('minOccurs')
self.max = root.get('maxOccurs')
self.type = root.get('type')
self.ref = root.get('ref')
self.form_qualified = schema.form_qualified
self.nillable = False
self.default = root.get('default')
self.rawchildren = []
self.cache = {}
def attributes(self, filter=Filter()):
"""
Get only the attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list of tuples (attr, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if child.isattr() and child in filter:
result.append((child, ancestry))
return result
def children(self, filter=Filter()):
"""
Get only the I{direct} or non-attribute content.
@param filter: A filter to constrain the result.
@type filter: L{Filter}
@return: A list tuples: (child, ancestry)
@rtype: [(L{SchemaObject}, [L{SchemaObject},..]),..]
"""
result = []
for child, ancestry in self:
if not child.isattr() and child in filter:
result.append((child, ancestry))
return result
def get_attribute(self, name):
"""
Get (find) a I{non-attribute} attribute by name.
@param name: A attribute name.
@type name: str
@return: A tuple: the requested (attribute, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.attributes():
if child.name == name:
return (child, ancestry)
return (None, [])
def get_child(self, name):
"""
Get (find) a I{non-attribute} child by name.
@param name: A child name.
@type name: str
@return: A tuple: the requested (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
"""
for child, ancestry in self.children():
if child.any() or child.name == name:
return (child, ancestry)
return (None, [])
def namespace(self, prefix=None):
"""
Get this properties namespace
@param prefix: The default prefix.
@type prefix: str
@return: The schema's target namespace
@rtype: (I{prefix},I{URI})
"""
ns = self.schema.tns
if ns[0] is None:
ns = (prefix, ns[1])
return ns
def default_namespace(self):
return self.root.defaultNamespace()
def unbounded(self):
"""
Get whether this node is unbounded I{(a collection)}
@return: True if unbounded, else False.
@rtype: boolean
"""
max = self.max
if max is None:
max = '1'
if max.isdigit():
return (int(max) > 1)
else:
return ( max == 'unbounded' )
def optional(self):
"""
Get whether this type is optional.
@return: True if optional, else False
@rtype: boolean
"""
min = self.min
if min is None:
min = '1'
return ( min == '0' )
def required(self):
"""
Get whether this type is required.
@return: True if required, else False
@rtype: boolean
"""
return ( not self.optional() )
def resolve(self, nobuiltin=False):
"""
Resolve and return the nodes true self.
@param nobuiltin: Flag indicates that resolution must
not continue to include xsd builtins.
@return: The resolved (true) type.
@rtype: L{SchemaObject}
"""
return self.cache.get(nobuiltin, self)
def sequence(self):
"""
Get whether this is an <xs:sequence/>
@return: True if <xs:sequence/>, else False
@rtype: boolean
"""
return False
def xslist(self):
"""
Get whether this is an <xs:list/>
@return: True if any, else False
@rtype: boolean
"""
return False
def all(self):
"""
Get whether this is an <xs:all/>
@return: True if any, else False
@rtype: boolean
"""
return False
def choice(self):
"""
Get whether this is n <xs:choice/>
@return: True if any, else False
@rtype: boolean
"""
return False
def any(self):
"""
Get whether this is an <xs:any/>
@return: True if any, else False
@rtype: boolean
"""
return False
def builtin(self):
"""
Get whether this is a schema-instance (xs) type.
@return: True if any, else False
@rtype: boolean
"""
return False
def enum(self):
"""
Get whether this is a simple-type containing an enumeration.
@return: True if any, else False
@rtype: boolean
"""
return False
def isattr(self):
"""
Get whether the object is a schema I{attribute} definition.
@return: True if an attribute, else False.
@rtype: boolean
"""
return False
def extension(self):
"""
Get whether the object is an extension of another type.
@return: True if an extension, else False.
@rtype: boolean
"""
return False
def restriction(self):
"""
Get whether the object is an restriction of another type.
@return: True if an restriction, else False.
@rtype: boolean
"""
return False
def mixed(self):
"""
Get whether this I{mixed} content.
"""
return False
def find(self, qref, classes=()):
"""
Find a referenced type in self or children.
@param qref: A qualified reference.
@type qref: qref
@param classes: A list of classes used to qualify the match.
@type classes: [I{class},...]
@return: The referenced type.
@rtype: L{SchemaObject}
@see: L{qualify()}
"""
if not len(classes):
classes = (self.__class__,)
if self.qname == qref and self.__class__ in classes:
return self
for c in self.rawchildren:
p = c.find(qref, classes)
if p is not None:
return p
return None
def translate(self, value, topython=True):
"""
Translate a value (type) to/from a python type.
@param value: A value to translate.
@return: The converted I{language} type.
"""
return value
def childtags(self):
"""
Get a list of valid child tag names.
@return: A list of child tag names.
@rtype: [str,...]
"""
return ()
def dependencies(self):
"""
Get a list of dependancies for dereferencing.
@return: A merge dependancy index and a list of dependancies.
@rtype: (int, [L{SchemaObject},...])
"""
return (None, [])
def autoqualified(self):
"""
The list of I{auto} qualified attribute values.
Qualification means to convert values into I{qref}.
@return: A list of attibute names.
@rtype: list
"""
return ['type', 'ref']
def qualify(self):
"""
Convert attribute values, that are references to other
objects, into I{qref}. Qualfied using default document namespace.
Since many wsdls are written improperly: when the document does
not define a default namespace, the schema target namespace is used
to qualify references.
"""
defns = self.root.defaultNamespace()
if Namespace.none(defns):
defns = self.schema.tns
for a in self.autoqualified():
ref = getattr(self, a)
if ref is None:
continue
if isqref(ref):
continue
qref = qualify(ref, self.root, defns)
log.debug('%s, convert %s="%s" to %s', self.id, a, ref, qref)
setattr(self, a, qref)
def merge(self, other):
"""
Merge another object as needed.
"""
other.qualify()
for n in ('name',
'qname',
'min',
'max',
'default',
'type',
'nillable',
'form_qualified',):
if getattr(self, n) is not None:
continue
v = getattr(other, n)
if v is None:
continue
setattr(self, n, v)
def content(self, collection=None, filter=Filter(), history=None):
"""
Get a I{flattened} list of this nodes contents.
@param collection: A list to fill.
@type collection: list
@param filter: A filter used to constrain the result.
@type filter: L{Filter}
@param history: The history list used to prevent cyclic dependency.
@type history: list
@return: The filled list.
@rtype: list
"""
if collection is None:
collection = []
if history is None:
history = []
if self in history:
return collection
history.append(self)
if self in filter:
collection.append(self)
for c in self.rawchildren:
c.content(collection, filter, history[:])
return collection
def str(self, indent=0, history=None):
"""
Get a string representation of this object.
@param indent: The indent.
@type indent: int
@return: A string.
@rtype: str
"""
if history is None:
history = []
if self in history:
return '%s ...' % Repr(self)
history.append(self)
tab = '%*s'%(indent*3, '')
result = []
result.append('%s<%s' % (tab, self.id))
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
result.append(' %s="%s"' % (n, v))
if len(self):
result.append('>')
for c in self.rawchildren:
result.append('\n')
result.append(c.str(indent+1, history[:]))
if c.isattr():
result.append('@')
result.append('\n%s' % tab)
result.append('</%s>' % self.__class__.__name__)
else:
result.append(' />')
return ''.join(result)
def description(self):
"""
Get the names used for str() and repr() description.
@return: A dictionary of relavent attributes.
@rtype: [str,...]
"""
return ()
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return unicode(self.str())
def __repr__(self):
s = []
s.append('<%s' % self.id)
for n in self.description():
if not hasattr(self, n):
continue
v = getattr(self, n)
if v is None:
continue
s.append(' %s="%s"' % (n, v))
s.append(' />')
myrep = ''.join(s)
return myrep.encode('utf-8')
def __len__(self):
n = 0
for x in self: n += 1
return n
def __iter__(self):
return Iter(self)
def __getitem__(self, index):
i = 0
for c in self:
if i == index:
return c
class Iter:
"""
The content iterator - used to iterate the L{Content} children. The iterator
provides a I{view} of the children that is free of container elements
such as <sequence/> and <choice/>.
@ivar stack: A stack used to control nesting.
@type stack: list
"""
class Frame:
""" A content iterator frame. """
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.sx = sx
self.items = sx.rawchildren
self.index = 0
def next(self):
"""
Get the I{next} item in the frame's collection.
@return: The next item or None
@rtype: L{SchemaObject}
"""
if self.index < len(self.items):
result = self.items[self.index]
self.index += 1
return result
def __init__(self, sx):
"""
@param sx: A schema object.
@type sx: L{SchemaObject}
"""
self.stack = []
self.push(sx)
def push(self, sx):
"""
Create a frame and push the specified object.
@param sx: A schema object to push.
@type sx: L{SchemaObject}
"""
self.stack.append(Iter.Frame(sx))
def pop(self):
"""
Pop the I{top} frame.
@return: The popped frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack.pop()
else:
raise StopIteration()
def top(self):
"""
Get the I{top} frame.
@return: The top frame.
@rtype: L{Frame}
@raise StopIteration: when stack is empty.
"""
if len(self.stack):
return self.stack[-1]
else:
raise StopIteration()
def next(self):
"""
Get the next item.
@return: A tuple: the next (child, ancestry).
@rtype: (L{SchemaObject}, [L{SchemaObject},..])
@raise StopIteration: A the end.
"""
frame = self.top()
while True:
result = frame.next()
if result is None:
self.pop()
return self.next()
if isinstance(result, Content):
ancestry = [f.sx for f in self.stack]
return (result, ancestry)
self.push(result)
return self.next()
def __iter__(self):
return self
class XBuiltin(SchemaObject):
"""
Represents an (xsd) schema <xs:*/> node
"""
def __init__(self, schema, name):
"""
@param schema: The containing schema.
@type schema: L{schema.Schema}
"""
root = Element(name)
SchemaObject.__init__(self, schema, root)
self.name = name
self.nillable = True
def namespace(self, prefix=None):
return Namespace.xsdns
def builtin(self):
return True
def resolve(self, nobuiltin=False):
return self
class Content(SchemaObject):
"""
This class represents those schema objects that represent
real XML document content.
"""
pass
class NodeFinder:
"""
Find nodes based on flexable criteria. The I{matcher} is
may be any object that implements a match(n) method.
@ivar matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@ivar limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
def __init__(self, matcher, limit=0):
"""
@param matcher: An object used as criteria for match.
@type matcher: I{any}.match(n)
@param limit: Limit the number of matches. 0=unlimited.
@type limit: int
"""
self.matcher = matcher
self.limit = limit
def find(self, node, list):
"""
Traverse the tree looking for matches.
@param node: A node to match on.
@type node: L{SchemaObject}
@param list: A list to fill.
@type list: list
"""
if self.matcher.match(node):
list.append(node)
self.limit -= 1
if self.limit == 0:
return
for c in node.rawchildren:
self.find(c, list)
return self
| 19,777
|
Python
|
.py
| 600
| 23.303333
| 81
| 0.550898
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,341
|
sxbasic.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/sxbasic.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{sxbasic} module provides classes that represent
I{basic} schema objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
from suds.xsd.sxbase import *
from suds.xsd.query import *
from suds.sax import splitPrefix, Namespace
from suds.transport import TransportError
from suds.reader import DocumentReader
from urlparse import urljoin
log = getLogger(__name__)
class RestrictionMatcher:
"""
For use with L{NodeFinder} to match restriction.
"""
def match(self, n):
return isinstance(n, Restriction)
class TypedContent(Content):
"""
Represents any I{typed} content.
"""
def resolve(self, nobuiltin=False):
qref = self.qref()
if qref is None:
return self
key = 'resolved:nb=%s' % nobuiltin
cached = self.cache.get(key)
if cached is not None:
return cached
result = self
query = TypeQuery(qref)
query.history = [self]
log.debug('%s, resolving: %s\n using:%s', self.id, qref, query)
resolved = query.execute(self.schema)
if resolved is None:
log.debug(self.schema)
raise TypeNotFound(qref)
self.cache[key] = resolved
if resolved.builtin():
if nobuiltin:
result = self
else:
result = resolved
else:
result = resolved.resolve(nobuiltin)
return result
def qref(self):
"""
Get the I{type} qualified reference to the referenced xsd type.
This method takes into account simple types defined through
restriction with are detected by determining that self is simple
(len=0) and by finding a restriction child.
@return: The I{type} qualified reference.
@rtype: qref
"""
qref = self.type
if qref is None and len(self) == 0:
ls = []
m = RestrictionMatcher()
finder = NodeFinder(m, 1)
finder.find(self, ls)
if len(ls):
return ls[0].ref
return qref
class Complex(SchemaObject):
"""
Represents an (xsd) schema <xs:complexType/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return (
'attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'complexContent',
'simpleContent',
'any',
'group')
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def mixed(self):
for c in self.rawchildren:
if isinstance(c, SimpleContent) and c.mixed():
return True
return False
class Group(SchemaObject):
"""
Represents an (xsd) schema <xs:group/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('sequence', 'all', 'choice')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = GroupQuery(self.ref)
g = query.execute(self.schema)
if g is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(g)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class AttributeGroup(SchemaObject):
"""
Represents an (xsd) schema <xs:attributeGroup/> node.
@cvar childtags: A list of valid child node names
@type childtags: (I{str},...)
"""
def childtags(self):
return ('attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrGroupQuery(self.ref)
ag = query.execute(self.schema)
if ag is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(ag)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref',)
class Simple(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleType/> node
"""
def childtags(self):
return ('restriction', 'any', 'list',)
def enum(self):
for child, ancestry in self.children():
if isinstance(child, Enumeration):
return True
return False
def mixed(self):
return len(self)
def description(self):
return ('name',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class List(SchemaObject):
"""
Represents an (xsd) schema <xs:list/> node
"""
def childtags(self):
return ()
def description(self):
return ('name',)
def xslist(self):
return True
class Restriction(SchemaObject):
"""
Represents an (xsd) schema <xs:restriction/> node
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('enumeration', 'attribute', 'attributeGroup')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def restriction(self):
return True
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def description(self):
return ('ref',)
class Collection(SchemaObject):
"""
Represents an (xsd) schema collection node:
- sequence
- choice
- all
"""
def childtags(self):
return ('element', 'sequence', 'all', 'choice', 'any', 'group')
class Sequence(Collection):
"""
Represents an (xsd) schema <xs:sequence/> node.
"""
def sequence(self):
return True
class All(Collection):
"""
Represents an (xsd) schema <xs:all/> node.
"""
def all(self):
return True
class Choice(Collection):
"""
Represents an (xsd) schema <xs:choice/> node.
"""
def choice(self):
return True
class ComplexContent(SchemaObject):
"""
Represents an (xsd) schema <xs:complexContent/> node.
"""
def childtags(self):
return ('attribute', 'attributeGroup', 'extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
class SimpleContent(SchemaObject):
"""
Represents an (xsd) schema <xs:simpleContent/> node.
"""
def childtags(self):
return ('extension', 'restriction')
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def mixed(self):
return len(self)
class Enumeration(Content):
"""
Represents an (xsd) schema <xs:enumeration/> node
"""
def __init__(self, schema, root):
Content.__init__(self, schema, root)
self.name = root.get('value')
def enum(self):
return True
class Element(TypedContent):
"""
Represents an (xsd) schema <xs:element/> node.
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
a = root.get('form')
if a is not None:
self.form_qualified = ( a == 'qualified' )
a = self.root.get('nillable')
if a is not None:
self.nillable = ( a in ('1', 'true') )
self.implany()
def implany(self):
"""
Set the type as any when implicit.
An implicit <xs:any/> is when an element has not
body and no type defined.
@return: self
@rtype: L{Element}
"""
if self.type is None and \
self.ref is None and \
self.root.isempty():
self.type = self.anytype()
return self
def childtags(self):
return ('attribute', 'simpleType', 'complexType', 'any',)
def extension(self):
for c in self.rawchildren:
if c.extension():
return True
return False
def restriction(self):
for c in self.rawchildren:
if c.restriction():
return True
return False
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = ElementQuery(self.ref)
e = query.execute(self.schema)
if e is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(e)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
self.rawchildren = other.rawchildren
def description(self):
return ('name', 'ref', 'type')
def anytype(self):
""" create an xsd:anyType reference """
p,u = Namespace.xsdns
mp = self.root.findPrefix(u)
if mp is None:
mp = p
self.root.addPrefix(p, u)
return ':'.join((mp, 'anyType'))
class Extension(SchemaObject):
"""
Represents an (xsd) schema <xs:extension/> node.
"""
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ref = root.get('base')
def childtags(self):
return ('attribute',
'attributeGroup',
'sequence',
'all',
'choice',
'group')
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = TypeQuery(self.ref)
super = query.execute(self.schema)
if super is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
if not super.builtin():
deps.append(super)
midx = 0
return (midx, deps)
def merge(self, other):
SchemaObject.merge(self, other)
filter = Filter(False, self.rawchildren)
self.prepend(self.rawchildren, other.rawchildren, filter)
def extension(self):
return ( self.ref is not None )
def description(self):
return ('ref',)
class Import(SchemaObject):
"""
Represents an (xsd) schema <xs:import/> node
@cvar locations: A dictionary of namespace locations.
@type locations: dict
@ivar ns: The imported namespace.
@type ns: str
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
@classmethod
def bind(cls, ns, location=None):
"""
Bind a namespace to a schema location (URI).
This is used for imports that don't specify a schemaLocation.
@param ns: A namespace-uri.
@type ns: str
@param location: The (optional) schema location for the
namespace. (default=ns).
@type location: str
"""
if location is None:
location = ns
cls.locations[ns] = location
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.ns = (None, root.get('namespace'))
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and import the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, importing ns="%s", location="%s"', self.id, self.ns[1], self.location)
result = self.locate()
if result is None:
if self.location is None:
log.debug('imported schema (%s) not-found', self.ns[1])
else:
result = self.download(options)
log.debug('imported:\n%s', result)
return result
def locate(self):
""" find the schema locally """
if self.ns[1] == self.schema.tns[1]:
return None
else:
return self.schema.locate(self.ns)
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'imported schema (%s) at (%s), failed' % (self.ns[1], url)
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def description(self):
return ('ns', 'location')
class Include(SchemaObject):
"""
Represents an (xsd) schema <xs:include/> node
@ivar location: The (optional) location.
@type location: namespace-uri
@ivar opened: Opened and I{imported} flag.
@type opened: boolean
"""
locations = {}
def __init__(self, schema, root):
SchemaObject.__init__(self, schema, root)
self.location = root.get('schemaLocation')
if self.location is None:
self.location = self.locations.get(self.ns[1])
self.opened = False
def open(self, options):
"""
Open and include the refrenced schema.
@param options: An options dictionary.
@type options: L{options.Options}
@return: The referenced schema.
@rtype: L{Schema}
"""
if self.opened:
return
self.opened = True
log.debug('%s, including location="%s"', self.id, self.location)
result = self.download(options)
log.debug('included:\n%s', result)
return result
def download(self, options):
""" download the schema """
url = self.location
try:
if '://' not in url:
url = urljoin(self.schema.baseurl, url)
reader = DocumentReader(options)
d = reader.open(url)
root = d.root()
root.set('url', url)
self.__applytns(root)
return self.schema.instance(root, url, options)
except TransportError:
msg = 'include schema at (%s), failed' % url
log.error('%s, %s', self.id, msg, exc_info=True)
raise Exception(msg)
def __applytns(self, root):
""" make sure included schema has same tns. """
TNS = 'targetNamespace'
tns = root.get(TNS)
if tns is None:
tns = self.schema.tns[1]
root.set(TNS, tns)
else:
if self.schema.tns[1] != tns:
raise Exception, '%s mismatch' % TNS
def description(self):
return ('location')
class Attribute(TypedContent):
"""
Represents an (xsd) <attribute/> node
"""
def __init__(self, schema, root):
TypedContent.__init__(self, schema, root)
self.use = root.get('use', default='')
def childtags(self):
return ('restriction',)
def isattr(self):
return True
def get_default(self):
"""
Gets the <xs:attribute default=""/> attribute value.
@return: The default value for the attribute
@rtype: str
"""
return self.root.get('default', default='')
def optional(self):
return ( self.use != 'required' )
def dependencies(self):
deps = []
midx = None
if self.ref is not None:
query = AttrQuery(self.ref)
a = query.execute(self.schema)
if a is None:
log.debug(self.schema)
raise TypeNotFound(self.ref)
deps.append(a)
midx = 0
return (midx, deps)
def description(self):
return ('name', 'ref', 'type')
class Any(Content):
"""
Represents an (xsd) <any/> node
"""
def get_child(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) child')
child = Any(self.schema, root)
return (child, [])
def get_attribute(self, name):
root = self.root.clone()
root.set('note', 'synthesized (any) attribute')
attribute = Any(self.schema, root)
return (attribute, [])
def any(self):
return True
class Factory:
"""
@cvar tags: A factory to create object objects based on tag.
@type tags: {tag:fn,}
"""
tags =\
{
'import' : Import,
'include' : Include,
'complexType' : Complex,
'group' : Group,
'attributeGroup' : AttributeGroup,
'simpleType' : Simple,
'list' : List,
'element' : Element,
'attribute' : Attribute,
'sequence' : Sequence,
'all' : All,
'choice' : Choice,
'complexContent' : ComplexContent,
'simpleContent' : SimpleContent,
'restriction' : Restriction,
'enumeration' : Enumeration,
'extension' : Extension,
'any' : Any,
}
@classmethod
def maptag(cls, tag, fn):
"""
Map (override) tag => I{class} mapping.
@param tag: An xsd tag name.
@type tag: str
@param fn: A function or class.
@type fn: fn|class.
"""
cls.tags[tag] = fn
@classmethod
def create(cls, root, schema):
"""
Create an object based on the root tag name.
@param root: An XML root element.
@type root: L{Element}
@param schema: A schema object.
@type schema: L{schema.Schema}
@return: The created object.
@rtype: L{SchemaObject}
"""
fn = cls.tags.get(root.name)
if fn is not None:
return fn(schema, root)
else:
return None
@classmethod
def build(cls, root, schema, filter=('*',)):
"""
Build an xsobject representation.
@param root: An schema XML root.
@type root: L{sax.element.Element}
@param filter: A tag filter.
@type filter: [str,...]
@return: A schema object graph.
@rtype: L{sxbase.SchemaObject}
"""
children = []
for node in root.getChildren(ns=Namespace.xsdns):
if '*' in filter or node.name in filter:
child = cls.create(node, schema)
if child is None:
continue
children.append(child)
c = cls.build(node, schema, child.childtags())
child.rawchildren = c
return children
@classmethod
def collate(cls, children):
imports = []
elements = {}
attributes = {}
types = {}
groups = {}
agrps = {}
for c in children:
if isinstance(c, (Import, Include)):
imports.append(c)
continue
if isinstance(c, Attribute):
attributes[c.qname] = c
continue
if isinstance(c, Element):
elements[c.qname] = c
continue
if isinstance(c, Group):
groups[c.qname] = c
continue
if isinstance(c, AttributeGroup):
agrps[c.qname] = c
continue
types[c.qname] = c
for i in imports:
children.remove(i)
return (children, imports, attributes, elements, types, groups, agrps)
#######################################################
# Static Import Bindings :-(
#######################################################
Import.bind(
'http://schemas.xmlsoap.org/soap/encoding/',
'suds://schemas.xmlsoap.org/soap/encoding/')
Import.bind(
'http://www.w3.org/XML/1998/namespace',
'http://www.w3.org/2001/xml.xsd')
Import.bind(
'http://www.w3.org/2001/XMLSchema',
'http://www.w3.org/2001/XMLSchema.xsd')
| 22,829
|
Python
|
.py
| 692
| 23.656069
| 93
| 0.565794
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,342
|
schema.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/schema.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{schema} module provides a intelligent representation of
an XSD schema. The I{raw} model is the XML tree and the I{model}
is the denormalized, objectified and intelligent view of the schema.
Most of the I{value-add} provided by the model is centered around
tranparent referenced type resolution and targeted denormalization.
"""
import suds.metrics
from suds import *
from suds.xsd import *
from suds.xsd.sxbuiltin import *
from suds.xsd.sxbasic import Factory as BasicFactory
from suds.xsd.sxbuiltin import Factory as BuiltinFactory
from suds.xsd.sxbase import SchemaObject
from suds.xsd.deplist import DepList
from suds.sax.element import Element
from suds.sax import splitPrefix, Namespace
from logging import getLogger
log = getLogger(__name__)
class SchemaCollection:
"""
A collection of schema objects. This class is needed because WSDLs
may contain more then one <schema/> node.
@ivar wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
@ivar children: A list contained schemas.
@type children: [L{Schema},...]
@ivar namespaces: A dictionary of contained schemas by namespace.
@type namespaces: {str:L{Schema}}
"""
def __init__(self, wsdl):
"""
@param wsdl: A wsdl object.
@type wsdl: L{suds.wsdl.Definitions}
"""
self.wsdl = wsdl
self.children = []
self.namespaces = {}
def add(self, schema):
"""
Add a schema node to the collection. Schema(s) within the same target
namespace are consolidated.
@param schema: A schema object.
@type schema: (L{Schema})
"""
key = schema.tns[1]
existing = self.namespaces.get(key)
if existing is None:
self.children.append(schema)
self.namespaces[key] = schema
else:
existing.root.children += schema.root.children
existing.root.nsprefixes.update(schema.root.nsprefixes)
def load(self, options):
"""
Load the schema objects for the root nodes.
- de-references schemas
- merge schemas
@param options: An options dictionary.
@type options: L{options.Options}
@return: The merged schema.
@rtype: L{Schema}
"""
if options.autoblend:
self.autoblend()
for child in self.children:
child.build()
for child in self.children:
child.open_imports(options)
for child in self.children:
child.dereference()
log.debug('loaded:\n%s', self)
merged = self.merge()
log.debug('MERGED:\n%s', merged)
return merged
def autoblend(self):
"""
Ensure that all schemas within the collection
import each other which has a blending effect.
@return: self
@rtype: L{SchemaCollection}
"""
namespaces = self.namespaces.keys()
for s in self.children:
for ns in namespaces:
tns = s.root.get('targetNamespace')
if tns == ns:
continue
for imp in s.root.getChildren('import'):
if imp.get('namespace') == ns:
continue
imp = Element('import', ns=Namespace.xsdns)
imp.set('namespace', ns)
s.root.append(imp)
return self
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
return self.namespaces.get(ns[1])
def merge(self):
"""
Merge the contained schemas into one.
@return: The merged schema.
@rtype: L{Schema}
"""
if len(self):
schema = self.children[0]
for s in self.children[1:]:
schema.merge(s)
return schema
else:
return None
def __len__(self):
return len(self.children)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
result = ['\nschema collection']
for s in self.children:
result.append(s.str(1))
return '\n'.join(result)
class Schema:
"""
The schema is an objectification of a <schema/> (xsd) definition.
It provides inspection, lookup and type resolution.
@ivar root: The root node.
@type root: L{sax.element.Element}
@ivar baseurl: The I{base} URL for this schema.
@type baseurl: str
@ivar container: A schema collection containing this schema.
@type container: L{SchemaCollection}
@ivar children: A list of direct top level children.
@type children: [L{SchemaObject},...]
@ivar all: A list of all (includes imported) top level children.
@type all: [L{SchemaObject},...]
@ivar types: A schema types cache.
@type types: {name:L{SchemaObject}}
@ivar imports: A list of import objects.
@type imports: [L{SchemaObject},...]
@ivar elements: A list of <element/> objects.
@type elements: [L{SchemaObject},...]
@ivar attributes: A list of <attribute/> objects.
@type attributes: [L{SchemaObject},...]
@ivar groups: A list of group objects.
@type groups: [L{SchemaObject},...]
@ivar agrps: A list of attribute group objects.
@type agrps: [L{SchemaObject},...]
@ivar form_qualified: The flag indicating:
(@elementFormDefault).
@type form_qualified: bool
"""
Tag = 'schema'
def __init__(self, root, baseurl, options, container=None):
"""
@param root: The xml root.
@type root: L{sax.element.Element}
@param baseurl: The base url used for importing.
@type baseurl: basestring
@param options: An options dictionary.
@type options: L{options.Options}
@param container: An optional container.
@type container: L{SchemaCollection}
"""
self.root = root
self.id = objid(self)
self.tns = self.mktns()
self.baseurl = baseurl
self.container = container
self.children = []
self.all = []
self.types = {}
self.imports = []
self.elements = {}
self.attributes = {}
self.groups = {}
self.agrps = {}
if options.doctor is not None:
options.doctor.examine(root)
form = self.root.get('elementFormDefault')
if form is None:
self.form_qualified = False
else:
self.form_qualified = ( form == 'qualified' )
if container is None:
self.build()
self.open_imports(options)
log.debug('built:\n%s', self)
self.dereference()
log.debug('dereferenced:\n%s', self)
def mktns(self):
"""
Make the schema's target namespace.
@return: The namespace representation of the schema's
targetNamespace value.
@rtype: (prefix, uri)
"""
tns = [None, self.root.get('targetNamespace')]
if tns[1] is not None:
tns[0] = self.root.findPrefix(tns[1])
return tuple(tns)
def build(self):
"""
Build the schema (object graph) using the root node
using the factory.
- Build the graph.
- Collate the children.
"""
self.children = BasicFactory.build(self.root, self)
collated = BasicFactory.collate(self.children)
self.children = collated[0]
self.attributes = collated[2]
self.imports = collated[1]
self.elements = collated[3]
self.types = collated[4]
self.groups = collated[5]
self.agrps = collated[6]
def merge(self, schema):
"""
Merge the contents from the schema. Only objects not already contained
in this schema's collections are merged. This is to provide for bidirectional
import which produce cyclic includes.
@returns: self
@rtype: L{Schema}
"""
for item in schema.attributes.items():
if item[0] in self.attributes:
continue
self.all.append(item[1])
self.attributes[item[0]] = item[1]
for item in schema.elements.items():
if item[0] in self.elements:
continue
self.all.append(item[1])
self.elements[item[0]] = item[1]
for item in schema.types.items():
if item[0] in self.types:
continue
self.all.append(item[1])
self.types[item[0]] = item[1]
for item in schema.groups.items():
if item[0] in self.groups:
continue
self.all.append(item[1])
self.groups[item[0]] = item[1]
for item in schema.agrps.items():
if item[0] in self.agrps:
continue
self.all.append(item[1])
self.agrps[item[0]] = item[1]
schema.merged = True
return self
def open_imports(self, options):
"""
Instruct all contained L{sxbasic.Import} children to import
the schema's which they reference. The contents of the
imported schema are I{merged} in.
@param options: An options dictionary.
@type options: L{options.Options}
"""
for imp in self.imports:
imported = imp.open(options)
if imported is None:
continue
imported.open_imports(options)
log.debug('imported:\n%s', imported)
self.merge(imported)
def dereference(self):
"""
Instruct all children to perform dereferencing.
"""
all = []
indexes = {}
for child in self.children:
child.content(all)
deplist = DepList()
for x in all:
x.qualify()
midx, deps = x.dependencies()
item = (x, tuple(deps))
deplist.add(item)
indexes[x] = midx
for x, deps in deplist.sort():
midx = indexes.get(x)
if midx is None: continue
d = deps[midx]
log.debug('(%s) merging %s <== %s', self.tns[1], Repr(x), Repr(d))
x.merge(d)
def locate(self, ns):
"""
Find a schema by namespace. Only the URI portion of
the namespace is compared to each schema's I{targetNamespace}.
The request is passed to the container.
@param ns: A namespace.
@type ns: (prefix,URI)
@return: The schema matching the namesapce, else None.
@rtype: L{Schema}
"""
if self.container is not None:
return self.container.locate(ns)
else:
return None
def custom(self, ref, context=None):
"""
Get whether the specified reference is B{not} an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if B{not} a builtin, else False.
@rtype: bool
"""
if ref is None:
return True
else:
return ( not self.builtin(ref, context) )
def builtin(self, ref, context=None):
"""
Get whether the specified reference is an (xs) builtin.
@param ref: A str or qref.
@type ref: (str|qref)
@return: True if builtin, else False.
@rtype: bool
"""
w3 = 'http://www.w3.org'
try:
if isqref(ref):
ns = ref[1]
return ( ref[0] in Factory.tags and ns.startswith(w3) )
if context is None:
context = self.root
prefix = splitPrefix(ref)[0]
prefixes = context.findPrefixes(w3, 'startswith')
return ( prefix in prefixes and ref[0] in Factory.tags )
except:
return False
def instance(self, root, baseurl, options):
"""
Create and return an new schema object using the
specified I{root} and I{url}.
@param root: A schema root node.
@type root: L{sax.element.Element}
@param baseurl: A base URL.
@type baseurl: str
@param options: An options dictionary.
@type options: L{options.Options}
@return: The newly created schema object.
@rtype: L{Schema}
@note: This is only used by Import children.
"""
return Schema(root, baseurl, options)
def str(self, indent=0):
tab = '%*s'%(indent*3, '')
result = []
result.append('%s%s' % (tab, self.id))
result.append('%s(raw)' % tab)
result.append(self.root.str(indent+1))
result.append('%s(model)' % tab)
for c in self.children:
result.append(c.str(indent+1))
result.append('')
return '\n'.join(result)
def __repr__(self):
myrep = '<%s tns="%s"/>' % (self.id, self.tns[1])
return myrep.encode('utf-8')
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
| 14,328
|
Python
|
.py
| 387
| 27.73385
| 86
| 0.592436
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,343
|
doctor.py
|
CouchPotato_CouchPotatoServer/libs/suds/xsd/doctor.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The I{doctor} module provides classes for fixing broken (sick)
schema(s).
"""
from logging import getLogger
from suds.sax import splitPrefix, Namespace
from suds.sax.element import Element
from suds.plugin import DocumentPlugin, DocumentContext
log = getLogger(__name__)
class Doctor:
"""
Schema Doctor.
"""
def examine(self, root):
"""
Examine and repair the schema (if necessary).
@param root: A schema root element.
@type root: L{Element}
"""
pass
class Practice(Doctor):
"""
A collection of doctors.
@ivar doctors: A list of doctors.
@type doctors: list
"""
def __init__(self):
self.doctors = []
def add(self, doctor):
"""
Add a doctor to the practice
@param doctor: A doctor to add.
@type doctor: L{Doctor}
"""
self.doctors.append(doctor)
def examine(self, root):
for d in self.doctors:
d.examine(root)
return root
class TnsFilter:
"""
Target Namespace filter.
@ivar tns: A list of target namespaces.
@type tns: [str,...]
"""
def __init__(self, *tns):
"""
@param tns: A list of target namespaces.
@type tns: [str,...]
"""
self.tns = []
self.add(*tns)
def add(self, *tns):
"""
Add I{targetNamesapces} to be added.
@param tns: A list of target namespaces.
@type tns: [str,...]
"""
self.tns += tns
def match(self, root, ns):
"""
Match by I{targetNamespace} excluding those that
are equal to the specified namespace to prevent
adding an import to itself.
@param root: A schema root.
@type root: L{Element}
"""
tns = root.get('targetNamespace')
if len(self.tns):
matched = ( tns in self.tns )
else:
matched = 1
itself = ( ns == tns )
return ( matched and not itself )
class Import:
"""
An <xs:import/> to be applied.
@cvar xsdns: The XSD namespace.
@type xsdns: (p,u)
@ivar ns: An import namespace.
@type ns: str
@ivar location: An optional I{schemaLocation}.
@type location: str
@ivar filter: A filter used to restrict application to
a particular schema.
@type filter: L{TnsFilter}
"""
xsdns = Namespace.xsdns
def __init__(self, ns, location=None):
"""
@param ns: An import namespace.
@type ns: str
@param location: An optional I{schemaLocation}.
@type location: str
"""
self.ns = ns
self.location = location
self.filter = TnsFilter()
def setfilter(self, filter):
"""
Set the filter.
@param filter: A filter to set.
@type filter: L{TnsFilter}
"""
self.filter = filter
def apply(self, root):
"""
Apply the import (rule) to the specified schema.
If the schema does not already contain an import for the
I{namespace} specified here, it is added.
@param root: A schema root.
@type root: L{Element}
"""
if not self.filter.match(root, self.ns):
return
if self.exists(root):
return
node = Element('import', ns=self.xsdns)
node.set('namespace', self.ns)
if self.location is not None:
node.set('schemaLocation', self.location)
log.debug('inserting: %s', node)
root.insert(node)
def add(self, root):
"""
Add an <xs:import/> to the specified schema root.
@param root: A schema root.
@type root: L{Element}
"""
node = Element('import', ns=self.xsdns)
node.set('namespace', self.ns)
if self.location is not None:
node.set('schemaLocation', self.location)
log.debug('%s inserted', node)
root.insert(node)
def exists(self, root):
"""
Check to see if the <xs:import/> already exists
in the specified schema root by matching I{namesapce}.
@param root: A schema root.
@type root: L{Element}
"""
for node in root.children:
if node.name != 'import':
continue
ns = node.get('namespace')
if self.ns == ns:
return 1
return 0
class ImportDoctor(Doctor, DocumentPlugin):
"""
Doctor used to fix missing imports.
@ivar imports: A list of imports to apply.
@type imports: [L{Import},...]
"""
def __init__(self, *imports):
"""
"""
self.imports = []
self.add(*imports)
def add(self, *imports):
"""
Add a namesapce to be checked.
@param imports: A list of L{Import} objects.
@type imports: [L{Import},..]
"""
self.imports += imports
def examine(self, node):
for imp in self.imports:
imp.apply(node)
def parsed(self, context):
node = context.document
# xsd root
if node.name == 'schema' and Namespace.xsd(node.namespace()):
self.examine(node)
return
# look deeper
context = DocumentContext()
for child in node:
context.document = child
self.parsed(context)
| 6,308
|
Python
|
.py
| 196
| 24.311224
| 76
| 0.594162
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,344
|
attribute.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/attribute.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{attribute} classes.
"""
import suds.sax
from logging import getLogger
from suds import *
from suds.sax import *
from suds.sax.text import Text
log = getLogger(__name__)
class Attribute:
"""
An XML attribute object.
@ivar parent: The node containing this attribute
@type parent: L{element.Element}
@ivar prefix: The I{optional} namespace prefix.
@type prefix: basestring
@ivar name: The I{unqualified} name of the attribute
@type name: basestring
@ivar value: The attribute's value
@type value: basestring
"""
def __init__(self, name, value=None):
"""
@param name: The attribute's name with I{optional} namespace prefix.
@type name: basestring
@param value: The attribute's value
@type value: basestring
"""
self.parent = None
self.prefix, self.name = splitPrefix(name)
self.setValue(value)
def clone(self, parent=None):
"""
Clone this object.
@param parent: The parent for the clone.
@type parent: L{element.Element}
@return: A copy of this object assigned to the new parent.
@rtype: L{Attribute}
"""
a = Attribute(self.qname(), self.value)
a.parent = parent
return a
def qname(self):
"""
Get the B{fully} qualified name of this attribute
@return: The fully qualified name.
@rtype: basestring
"""
if self.prefix is None:
return self.name
else:
return ':'.join((self.prefix, self.name))
def setValue(self, value):
"""
Set the attributes value
@param value: The new value (may be None)
@type value: basestring
@return: self
@rtype: L{Attribute}
"""
if isinstance(value, Text):
self.value = value
else:
self.value = Text(value)
return self
def getValue(self, default=Text('')):
"""
Get the attributes value with optional default.
@param default: An optional value to be return when the
attribute's has not been set.
@type default: basestring
@return: The attribute's value, or I{default}
@rtype: L{Text}
"""
if self.hasText():
return self.value
else:
return default
def hasText(self):
"""
Get whether the attribute has I{text} and that it is not an empty
(zero length) string.
@return: True when has I{text}.
@rtype: boolean
"""
return ( self.value is not None and len(self.value) )
def namespace(self):
"""
Get the attributes namespace. This may either be the namespace
defined by an optional prefix, or its parent's namespace.
@return: The attribute's namespace
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return Namespace.default
else:
return self.resolvePrefix(self.prefix)
def resolvePrefix(self, prefix):
"""
Resolve the specified prefix to a known namespace.
@param prefix: A declared prefix
@type prefix: basestring
@return: The namespace that has been mapped to I{prefix}
@rtype: (I{prefix}, I{name})
"""
ns = Namespace.default
if self.parent is not None:
ns = self.parent.resolvePrefix(prefix)
return ns
def match(self, name=None, ns=None):
"""
Match by (optional) name and/or (optional) namespace.
@param name: The optional attribute tag name.
@type name: str
@param ns: An optional namespace.
@type ns: (I{prefix}, I{name})
@return: True if matched.
@rtype: boolean
"""
if name is None:
byname = True
else:
byname = ( self.name == name )
if ns is None:
byns = True
else:
byns = ( self.namespace()[1] == ns[1] )
return ( byname and byns )
def __eq__(self, rhs):
""" equals operator """
return rhs is not None and \
isinstance(rhs, Attribute) and \
self.prefix == rhs.name and \
self.name == rhs.name
def __repr__(self):
""" get a string representation """
return \
'attr (prefix=%s, name=%s, value=(%s))' %\
(self.prefix, self.name, self.value)
def __str__(self):
""" get an xml string representation """
return unicode(self).encode('utf-8')
def __unicode__(self):
""" get an xml string representation """
n = self.qname()
if self.hasText():
v = self.value.escape()
else:
v = self.value
return u'%s="%s"' % (n, v)
| 5,788
|
Python
|
.py
| 165
| 26.793939
| 76
| 0.601265
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,345
|
element.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/element.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{element} classes.
"""
from logging import getLogger
from suds import *
from suds.sax import *
from suds.sax.text import Text
from suds.sax.attribute import Attribute
import sys
if sys.version_info < (2, 4, 0):
from sets import Set as set
del sys
log = getLogger(__name__)
class Element:
"""
An XML element object.
@ivar parent: The node containing this attribute
@type parent: L{Element}
@ivar prefix: The I{optional} namespace prefix.
@type prefix: basestring
@ivar name: The I{unqualified} name of the attribute
@type name: basestring
@ivar expns: An explicit namespace (xmlns="...").
@type expns: (I{prefix}, I{name})
@ivar nsprefixes: A mapping of prefixes to namespaces.
@type nsprefixes: dict
@ivar attributes: A list of XML attributes.
@type attributes: [I{Attribute},]
@ivar text: The element's I{text} content.
@type text: basestring
@ivar children: A list of child elements.
@type children: [I{Element},]
@cvar matcher: A collection of I{lambda} for string matching.
@cvar specialprefixes: A dictionary of builtin-special prefixes.
"""
matcher = \
{
'eq': lambda a,b: a == b,
'startswith' : lambda a,b: a.startswith(b),
'endswith' : lambda a,b: a.endswith(b),
'contains' : lambda a,b: b in a
}
specialprefixes = { Namespace.xmlns[0] : Namespace.xmlns[1] }
@classmethod
def buildPath(self, parent, path):
"""
Build the specifed pat as a/b/c where missing intermediate nodes are built
automatically.
@param parent: A parent element on which the path is built.
@type parent: I{Element}
@param path: A simple path separated by (/).
@type path: basestring
@return: The leaf node of I{path}.
@rtype: L{Element}
"""
for tag in path.split('/'):
child = parent.getChild(tag)
if child is None:
child = Element(tag, parent)
parent = child
return child
def __init__(self, name, parent=None, ns=None):
"""
@param name: The element's (tag) name. May cotain a prefix.
@type name: basestring
@param parent: An optional parent element.
@type parent: I{Element}
@param ns: An optional namespace
@type ns: (I{prefix}, I{name})
"""
self.rename(name)
self.expns = None
self.nsprefixes = {}
self.attributes = []
self.text = None
if parent is not None:
if isinstance(parent, Element):
self.parent = parent
else:
raise Exception('parent (%s) not-valid', parent.__class__.__name__)
else:
self.parent = None
self.children = []
self.applyns(ns)
def rename(self, name):
"""
Rename the element.
@param name: A new name for the element.
@type name: basestring
"""
if name is None:
raise Exception('name (%s) not-valid' % name)
else:
self.prefix, self.name = splitPrefix(name)
def setPrefix(self, p, u=None):
"""
Set the element namespace prefix.
@param p: A new prefix for the element.
@type p: basestring
@param u: A namespace URI to be mapped to the prefix.
@type u: basestring
@return: self
@rtype: L{Element}
"""
self.prefix = p
if p is not None and u is not None:
self.addPrefix(p, u)
return self
def qname(self):
"""
Get the B{fully} qualified name of this element
@return: The fully qualified name.
@rtype: basestring
"""
if self.prefix is None:
return self.name
else:
return '%s:%s' % (self.prefix, self.name)
def getRoot(self):
"""
Get the root (top) node of the tree.
@return: The I{top} node of this tree.
@rtype: I{Element}
"""
if self.parent is None:
return self
else:
return self.parent.getRoot()
def clone(self, parent=None):
"""
Deep clone of this element and children.
@param parent: An optional parent for the copied fragment.
@type parent: I{Element}
@return: A deep copy parented by I{parent}
@rtype: I{Element}
"""
root = Element(self.qname(), parent, self.namespace())
for a in self.attributes:
root.append(a.clone(self))
for c in self.children:
root.append(c.clone(self))
for item in self.nsprefixes.items():
root.addPrefix(item[0], item[1])
return root
def detach(self):
"""
Detach from parent.
@return: This element removed from its parent's
child list and I{parent}=I{None}
@rtype: L{Element}
"""
if self.parent is not None:
if self in self.parent.children:
self.parent.children.remove(self)
self.parent = None
return self
def set(self, name, value):
"""
Set an attribute's value.
@param name: The name of the attribute.
@type name: basestring
@param value: The attribute value.
@type value: basestring
@see: __setitem__()
"""
attr = self.getAttribute(name)
if attr is None:
attr = Attribute(name, value)
self.append(attr)
else:
attr.setValue(value)
def unset(self, name):
"""
Unset (remove) an attribute.
@param name: The attribute name.
@type name: str
@return: self
@rtype: L{Element}
"""
try:
attr = self.getAttribute(name)
self.attributes.remove(attr)
except:
pass
return self
def get(self, name, ns=None, default=None):
"""
Get the value of an attribute by name.
@param name: The name of the attribute.
@type name: basestring
@param ns: The optional attribute's namespace.
@type ns: (I{prefix}, I{name})
@param default: An optional value to be returned when either
the attribute does not exist of has not value.
@type default: basestring
@return: The attribute's value or I{default}
@rtype: basestring
@see: __getitem__()
"""
attr = self.getAttribute(name, ns)
if attr is None or attr.value is None:
return default
else:
return attr.getValue()
def setText(self, value):
"""
Set the element's L{Text} content.
@param value: The element's text value.
@type value: basestring
@return: self
@rtype: I{Element}
"""
if isinstance(value, Text):
self.text = value
else:
self.text = Text(value)
return self
def getText(self, default=None):
"""
Get the element's L{Text} content with optional default
@param default: A value to be returned when no text content exists.
@type default: basestring
@return: The text content, or I{default}
@rtype: L{Text}
"""
if self.hasText():
return self.text
else:
return default
def trim(self):
"""
Trim leading and trailing whitespace.
@return: self
@rtype: L{Element}
"""
if self.hasText():
self.text = self.text.trim()
return self
def hasText(self):
"""
Get whether the element has I{text} and that it is not an empty
(zero length) string.
@return: True when has I{text}.
@rtype: boolean
"""
return ( self.text is not None and len(self.text) )
def namespace(self):
"""
Get the element's namespace.
@return: The element's namespace by resolving the prefix, the explicit
namespace or the inherited namespace.
@rtype: (I{prefix}, I{name})
"""
if self.prefix is None:
return self.defaultNamespace()
else:
return self.resolvePrefix(self.prefix)
def defaultNamespace(self):
"""
Get the default (unqualified namespace).
This is the expns of the first node (looking up the tree)
that has it set.
@return: The namespace of a node when not qualified.
@rtype: (I{prefix}, I{name})
"""
p = self
while p is not None:
if p.expns is not None:
return (None, p.expns)
else:
p = p.parent
return Namespace.default
def append(self, objects):
"""
Append the specified child based on whether it is an
element or an attrbuite.
@param objects: A (single|collection) of attribute(s) or element(s)
to be added as children.
@type objects: (L{Element}|L{Attribute})
@return: self
@rtype: L{Element}
"""
if not isinstance(objects, (list, tuple)):
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.append(child)
child.parent = self
continue
if isinstance(child, Attribute):
self.attributes.append(child)
child.parent = self
continue
raise Exception('append %s not-valid' % child.__class__.__name__)
return self
def insert(self, objects, index=0):
"""
Insert an L{Element} content at the specified index.
@param objects: A (single|collection) of attribute(s) or element(s)
to be added as children.
@type objects: (L{Element}|L{Attribute})
@param index: The position in the list of children to insert.
@type index: int
@return: self
@rtype: L{Element}
"""
objects = (objects,)
for child in objects:
if isinstance(child, Element):
self.children.insert(index, child)
child.parent = self
else:
raise Exception('append %s not-valid' % child.__class__.__name__)
return self
def remove(self, child):
"""
Remove the specified child element or attribute.
@param child: A child to remove.
@type child: L{Element}|L{Attribute}
@return: The detached I{child} when I{child} is an element, else None.
@rtype: L{Element}|None
"""
if isinstance(child, Element):
return child.detach()
if isinstance(child, Attribute):
self.attributes.remove(child)
return None
def replaceChild(self, child, content):
"""
Replace I{child} with the specified I{content}.
@param child: A child element.
@type child: L{Element}
@param content: An element or collection of elements.
@type content: L{Element} or [L{Element},]
"""
if child not in self.children:
raise Exception('child not-found')
index = self.children.index(child)
self.remove(child)
if not isinstance(content, (list, tuple)):
content = (content,)
for node in content:
self.children.insert(index, node.detach())
node.parent = self
index += 1
def getAttribute(self, name, ns=None, default=None):
"""
Get an attribute by name and (optional) namespace
@param name: The name of a contained attribute (may contain prefix).
@type name: basestring
@param ns: An optional namespace
@type ns: (I{prefix}, I{name})
@param default: Returned when attribute not-found.
@type default: L{Attribute}
@return: The requested attribute object.
@rtype: L{Attribute}
"""
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
for a in self.attributes:
if a.match(name, ns):
return a
return default
def getChild(self, name, ns=None, default=None):
"""
Get a child by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@param default: Returned when child not-found.
@type default: L{Element}
@return: The requested child, or I{default} when not-found.
@rtype: L{Element}
"""
if ns is None:
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
for c in self.children:
if c.match(name, ns):
return c
return default
def childAtPath(self, path):
"""
Get a child at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The leaf node at the end of I{path}
@rtype: L{Element}
"""
result = None
node = self
for name in [p for p in path.split('/') if len(p) > 0]:
ns = None
prefix, name = splitPrefix(name)
if prefix is not None:
ns = node.resolvePrefix(prefix)
result = node.getChild(name, ns)
if result is None:
break;
else:
node = result
return result
def childrenAtPath(self, path):
"""
Get a list of children at I{path} where I{path} is a (/) separated
list of element names that are expected to be children.
@param path: A (/) separated list of element names.
@type path: basestring
@return: The collection leaf nodes at the end of I{path}
@rtype: [L{Element},...]
"""
parts = [p for p in path.split('/') if len(p) > 0]
if len(parts) == 1:
result = self.getChildren(path)
else:
result = self.__childrenAtPath(parts)
return result
def getChildren(self, name=None, ns=None):
"""
Get a list of children by (optional) name and/or (optional) namespace.
@param name: The name of a child element (may contain prefix).
@type name: basestring
@param ns: An optional namespace used to match the child.
@type ns: (I{prefix}, I{name})
@return: The list of matching children.
@rtype: [L{Element},...]
"""
if ns is None:
if name is None:
return self.children
prefix, name = splitPrefix(name)
if prefix is None:
ns = None
else:
ns = self.resolvePrefix(prefix)
return [c for c in self.children if c.match(name, ns)]
def detachChildren(self):
"""
Detach and return this element's children.
@return: The element's children (detached).
@rtype: [L{Element},...]
"""
detached = self.children
self.children = []
for child in detached:
child.parent = None
return detached
def resolvePrefix(self, prefix, default=Namespace.default):
"""
Resolve the specified prefix to a namespace. The I{nsprefixes} is
searched. If not found, it walks up the tree until either resolved or
the top of the tree is reached. Searching up the tree provides for
inherited mappings.
@param prefix: A namespace prefix to resolve.
@type prefix: basestring
@param default: An optional value to be returned when the prefix
cannot be resolved.
@type default: (I{prefix},I{URI})
@return: The namespace that is mapped to I{prefix} in this context.
@rtype: (I{prefix},I{URI})
"""
n = self
while n is not None:
if prefix in n.nsprefixes:
return (prefix, n.nsprefixes[prefix])
if prefix in self.specialprefixes:
return (prefix, self.specialprefixes[prefix])
n = n.parent
return default
def addPrefix(self, p, u):
"""
Add or update a prefix mapping.
@param p: A prefix.
@type p: basestring
@param u: A namespace URI.
@type u: basestring
@return: self
@rtype: L{Element}
"""
self.nsprefixes[p] = u
return self
def updatePrefix(self, p, u):
"""
Update (redefine) a prefix mapping for the branch.
@param p: A prefix.
@type p: basestring
@param u: A namespace URI.
@type u: basestring
@return: self
@rtype: L{Element}
@note: This method traverses down the entire branch!
"""
if p in self.nsprefixes:
self.nsprefixes[p] = u
for c in self.children:
c.updatePrefix(p, u)
return self
def clearPrefix(self, prefix):
"""
Clear the specified prefix from the prefix mappings.
@param prefix: A prefix to clear.
@type prefix: basestring
@return: self
@rtype: L{Element}
"""
if prefix in self.nsprefixes:
del self.nsprefixes[prefix]
return self
def findPrefix(self, uri, default=None):
"""
Find the first prefix that has been mapped to a namespace URI.
The local mapping is searched, then it walks up the tree until
it reaches the top or finds a match.
@param uri: A namespace URI.
@type uri: basestring
@param default: A default prefix when not found.
@type default: basestring
@return: A mapped prefix.
@rtype: basestring
"""
for item in self.nsprefixes.items():
if item[1] == uri:
prefix = item[0]
return prefix
for item in self.specialprefixes.items():
if item[1] == uri:
prefix = item[0]
return prefix
if self.parent is not None:
return self.parent.findPrefix(uri, default)
else:
return default
def findPrefixes(self, uri, match='eq'):
"""
Find all prefixes that has been mapped to a namespace URI.
The local mapping is searched, then it walks up the tree until
it reaches the top collecting all matches.
@param uri: A namespace URI.
@type uri: basestring
@param match: A matching function L{Element.matcher}.
@type match: basestring
@return: A list of mapped prefixes.
@rtype: [basestring,...]
"""
result = []
for item in self.nsprefixes.items():
if self.matcher[match](item[1], uri):
prefix = item[0]
result.append(prefix)
for item in self.specialprefixes.items():
if self.matcher[match](item[1], uri):
prefix = item[0]
result.append(prefix)
if self.parent is not None:
result += self.parent.findPrefixes(uri, match)
return result
def promotePrefixes(self):
"""
Push prefix declarations up the tree as far as possible. Prefix
mapping are pushed to its parent unless the parent has the
prefix mapped to another URI or the parent has the prefix.
This is propagated up the tree until the top is reached.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.promotePrefixes()
if self.parent is None:
return
for p,u in self.nsprefixes.items():
if p in self.parent.nsprefixes:
pu = self.parent.nsprefixes[p]
if pu == u:
del self.nsprefixes[p]
continue
if p != self.parent.prefix:
self.parent.nsprefixes[p] = u
del self.nsprefixes[p]
return self
def refitPrefixes(self):
"""
Refit namespace qualification by replacing prefixes
with explicit namespaces. Also purges prefix mapping table.
@return: self
@rtype: L{Element}
"""
for c in self.children:
c.refitPrefixes()
if self.prefix is not None:
ns = self.resolvePrefix(self.prefix)
if ns[1] is not None:
self.expns = ns[1]
self.prefix = None
self.nsprefixes = {}
return self
def normalizePrefixes(self):
"""
Normalize the namespace prefixes.
This generates unique prefixes for all namespaces. Then retrofits all
prefixes and prefix mappings. Further, it will retrofix attribute values
that have values containing (:).
@return: self
@rtype: L{Element}
"""
PrefixNormalizer.apply(self)
return self
def isempty(self, content=True):
"""
Get whether the element has no children.
@param content: Test content (children & text) only.
@type content: boolean
@return: True when element has not children.
@rtype: boolean
"""
noattrs = not len(self.attributes)
nochildren = not len(self.children)
notext = ( self.text is None )
nocontent = ( nochildren and notext )
if content:
return nocontent
else:
return ( nocontent and noattrs )
def isnil(self):
"""
Get whether the element is I{nil} as defined by having
an attribute in the I{xsi:nil="true"}
@return: True if I{nil}, else False
@rtype: boolean
"""
nilattr = self.getAttribute('nil', ns=Namespace.xsins)
if nilattr is None:
return False
else:
return ( nilattr.getValue().lower() == 'true' )
def setnil(self, flag=True):
"""
Set this node to I{nil} as defined by having an
attribute I{xsi:nil}=I{flag}.
@param flag: A flag inidcating how I{xsi:nil} will be set.
@type flag: boolean
@return: self
@rtype: L{Element}
"""
p, u = Namespace.xsins
name = ':'.join((p, 'nil'))
self.set(name, str(flag).lower())
self.addPrefix(p, u)
if flag:
self.text = None
return self
def applyns(self, ns):
"""
Apply the namespace to this node. If the prefix is I{None} then
this element's explicit namespace I{expns} is set to the
URI defined by I{ns}. Otherwise, the I{ns} is simply mapped.
@param ns: A namespace.
@type ns: (I{prefix},I{URI})
"""
if ns is None:
return
if not isinstance(ns, (tuple,list)):
raise Exception('namespace must be tuple')
if ns[0] is None:
self.expns = ns[1]
else:
self.prefix = ns[0]
self.nsprefixes[ns[0]] = ns[1]
def str(self, indent=0):
"""
Get a string representation of this XML fragment.
@param indent: The indent to be used in formatting the output.
@type indent: int
@return: A I{pretty} string.
@rtype: basestring
"""
tab = '%*s'%(indent*3,'')
result = []
result.append('%s<%s' % (tab, self.qname()))
result.append(self.nsdeclarations())
for a in [unicode(a) for a in self.attributes]:
result.append(' %s' % a)
if self.isempty():
result.append('/>')
return ''.join(result)
result.append('>')
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append('\n')
result.append(c.str(indent+1))
if len(self.children):
result.append('\n%s' % tab)
result.append('</%s>' % self.qname())
result = ''.join(result)
return result
def plain(self):
"""
Get a string representation of this XML fragment.
@return: A I{plain} string.
@rtype: basestring
"""
result = []
result.append('<%s' % self.qname())
result.append(self.nsdeclarations())
for a in [unicode(a) for a in self.attributes]:
result.append(' %s' % a)
if self.isempty():
result.append('/>')
return ''.join(result)
result.append('>')
if self.hasText():
result.append(self.text.escape())
for c in self.children:
result.append(c.plain())
result.append('</%s>' % self.qname())
result = ''.join(result)
return result
def nsdeclarations(self):
"""
Get a string representation for all namespace declarations
as xmlns="" and xmlns:p="".
@return: A separated list of declarations.
@rtype: basestring
"""
s = []
myns = (None, self.expns)
if self.parent is None:
pns = Namespace.default
else:
pns = (None, self.parent.expns)
if myns[1] != pns[1]:
if self.expns is not None:
d = ' xmlns="%s"' % self.expns
s.append(d)
for item in self.nsprefixes.items():
(p,u) = item
if self.parent is not None:
ns = self.parent.resolvePrefix(p)
if ns[1] == u: continue
d = ' xmlns:%s="%s"' % (p, u)
s.append(d)
return ''.join(s)
def match(self, name=None, ns=None):
"""
Match by (optional) name and/or (optional) namespace.
@param name: The optional element tag name.
@type name: str
@param ns: An optional namespace.
@type ns: (I{prefix}, I{name})
@return: True if matched.
@rtype: boolean
"""
if name is None:
byname = True
else:
byname = ( self.name == name )
if ns is None:
byns = True
else:
byns = ( self.namespace()[1] == ns[1] )
return ( byname and byns )
def branch(self):
"""
Get a flattened representation of the branch.
@return: A flat list of nodes.
@rtype: [L{Element},..]
"""
branch = [self]
for c in self.children:
branch += c.branch()
return branch
def ancestors(self):
"""
Get a list of ancestors.
@return: A list of ancestors.
@rtype: [L{Element},..]
"""
ancestors = []
p = self.parent
while p is not None:
ancestors.append(p)
p = p.parent
return ancestors
def walk(self, visitor):
"""
Walk the branch and call the visitor function
on each node.
@param visitor: A function.
@return: self
@rtype: L{Element}
"""
visitor(self)
for c in self.children:
c.walk(visitor)
return self
def prune(self):
"""
Prune the branch of empty nodes.
"""
pruned = []
for c in self.children:
c.prune()
if c.isempty(False):
pruned.append(c)
for p in pruned:
self.children.remove(p)
def __childrenAtPath(self, parts):
result = []
node = self
last = len(parts)-1
ancestors = parts[:last]
leaf = parts[last]
for name in ancestors:
ns = None
prefix, name = splitPrefix(name)
if prefix is not None:
ns = node.resolvePrefix(prefix)
child = node.getChild(name, ns)
if child is None:
break
else:
node = child
if child is not None:
ns = None
prefix, leaf = splitPrefix(leaf)
if prefix is not None:
ns = node.resolvePrefix(prefix)
result = child.getChildren(leaf)
return result
def __len__(self):
return len(self.children)
def __getitem__(self, index):
if isinstance(index, basestring):
return self.get(index)
else:
if index < len(self.children):
return self.children[index]
else:
return None
def __setitem__(self, index, value):
if isinstance(index, basestring):
self.set(index, value)
else:
if index < len(self.children) and \
isinstance(value, Element):
self.children.insert(index, value)
def __eq__(self, rhs):
return rhs is not None and \
isinstance(rhs, Element) and \
self.name == rhs.name and \
self.namespace()[1] == rhs.namespace()[1]
def __repr__(self):
return \
'Element (prefix=%s, name=%s)' % (self.prefix, self.name)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
def __iter__(self):
return NodeIterator(self)
class NodeIterator:
"""
The L{Element} child node iterator.
@ivar pos: The current position
@type pos: int
@ivar children: A list of a child nodes.
@type children: [L{Element},..]
"""
def __init__(self, parent):
"""
@param parent: An element to iterate.
@type parent: L{Element}
"""
self.pos = 0
self.children = parent.children
def next(self):
"""
Get the next child.
@return: The next child.
@rtype: L{Element}
@raise StopIterator: At the end.
"""
try:
child = self.children[self.pos]
self.pos += 1
return child
except:
raise StopIteration()
class PrefixNormalizer:
"""
The prefix normalizer provides namespace prefix normalization.
@ivar node: A node to normalize.
@type node: L{Element}
@ivar branch: The nodes flattened branch.
@type branch: [L{Element},..]
@ivar namespaces: A unique list of namespaces (URI).
@type namespaces: [str,]
@ivar prefixes: A reverse dict of prefixes.
@type prefixes: {u, p}
"""
@classmethod
def apply(cls, node):
"""
Normalize the specified node.
@param node: A node to normalize.
@type node: L{Element}
@return: The normalized node.
@rtype: L{Element}
"""
pn = PrefixNormalizer(node)
return pn.refit()
def __init__(self, node):
"""
@param node: A node to normalize.
@type node: L{Element}
"""
self.node = node
self.branch = node.branch()
self.namespaces = self.getNamespaces()
self.prefixes = self.genPrefixes()
def getNamespaces(self):
"""
Get the I{unique} set of namespaces referenced in the branch.
@return: A set of namespaces.
@rtype: set
"""
s = set()
for n in self.branch + self.node.ancestors():
if self.permit(n.expns):
s.add(n.expns)
s = s.union(self.pset(n))
return s
def pset(self, n):
"""
Convert the nodes nsprefixes into a set.
@param n: A node.
@type n: L{Element}
@return: A set of namespaces.
@rtype: set
"""
s = set()
for ns in n.nsprefixes.items():
if self.permit(ns):
s.add(ns[1])
return s
def genPrefixes(self):
"""
Generate a I{reverse} mapping of unique prefixes for all namespaces.
@return: A referse dict of prefixes.
@rtype: {u, p}
"""
prefixes = {}
n = 0
for u in self.namespaces:
p = 'ns%d' % n
prefixes[u] = p
n += 1
return prefixes
def refit(self):
"""
Refit (normalize) the prefixes in the node.
"""
self.refitNodes()
self.refitMappings()
def refitNodes(self):
"""
Refit (normalize) all of the nodes in the branch.
"""
for n in self.branch:
if n.prefix is not None:
ns = n.namespace()
if self.permit(ns):
n.prefix = self.prefixes[ns[1]]
self.refitAttrs(n)
def refitAttrs(self, n):
"""
Refit (normalize) all of the attributes in the node.
@param n: A node.
@type n: L{Element}
"""
for a in n.attributes:
self.refitAddr(a)
def refitAddr(self, a):
"""
Refit (normalize) the attribute.
@param a: An attribute.
@type a: L{Attribute}
"""
if a.prefix is not None:
ns = a.namespace()
if self.permit(ns):
a.prefix = self.prefixes[ns[1]]
self.refitValue(a)
def refitValue(self, a):
"""
Refit (normalize) the attribute's value.
@param a: An attribute.
@type a: L{Attribute}
"""
p,name = splitPrefix(a.getValue())
if p is None: return
ns = a.resolvePrefix(p)
if self.permit(ns):
u = ns[1]
p = self.prefixes[u]
a.setValue(':'.join((p, name)))
def refitMappings(self):
"""
Refit (normalize) all of the nsprefix mappings.
"""
for n in self.branch:
n.nsprefixes = {}
n = self.node
for u, p in self.prefixes.items():
n.addPrefix(p, u)
def permit(self, ns):
"""
Get whether the I{ns} is to be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be included.
@rtype: boolean
"""
return not self.skip(ns)
def skip(self, ns):
"""
Get whether the I{ns} is to B{not} be normalized.
@param ns: A namespace.
@type ns: (p,u)
@return: True if to be skipped.
@rtype: boolean
"""
return ns is None or \
( ns == Namespace.default ) or \
( ns == Namespace.xsdns ) or \
( ns == Namespace.xsins) or \
( ns == Namespace.xmlns )
| 36,480
|
Python
|
.py
| 1,061
| 24.3082
| 83
| 0.55539
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,346
|
date.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/date.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Nathan Van Gheem (vangheem@gmail.com)
"""
The I{xdate} module provides classes for converstion
between XML dates and python objects.
"""
from logging import getLogger
from suds import *
from suds.xsd import *
import time
import datetime as dt
import re
log = getLogger(__name__)
class Date:
"""
An XML date object.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
@ivar date: The object value.
@type date: B{datetime}.I{date}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (date|str)
@raise ValueError: When I{date} is invalid.
"""
if isinstance(date, dt.date):
self.date = date
return
if isinstance(date, basestring):
self.date = self.__parse(date)
return
raise ValueError, type(date)
def year(self):
"""
Get the I{year} component.
@return: The year.
@rtype: int
"""
return self.date.year
def month(self):
"""
Get the I{month} component.
@return: The month.
@rtype: int
"""
return self.date.month
def day(self):
"""
Get the I{day} component.
@return: The day.
@rtype: int
"""
return self.date.day
def __parse(self, s):
"""
Parse the string date.
Supported formats:
- YYYY-MM-DD
- YYYY-MM-DD(z|Z)
- YYYY-MM-DD+06:00
- YYYY-MM-DD-06:00
Although, the TZ is ignored because it's meaningless
without the time, right?
@param s: A date string.
@type s: str
@return: A date object.
@rtype: I{date}
"""
try:
year, month, day = s[:10].split('-', 2)
year = int(year)
month = int(month)
day = int(day)
return dt.date(year, month, day)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __str__(self):
return unicode(self)
def __unicode__(self):
return self.date.isoformat()
class Time:
"""
An XML time object.
Supported formats:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@ivar tz: The timezone
@type tz: L{Timezone}
@ivar date: The object value.
@type date: B{datetime}.I{time}
"""
def __init__(self, time, adjusted=True):
"""
@param time: The value of the object.
@type time: (time|str)
@param adjusted: Adjust for I{local} Timezone.
@type adjusted: boolean
@raise ValueError: When I{time} is invalid.
"""
self.tz = Timezone()
if isinstance(time, dt.time):
self.time = time
return
if isinstance(time, basestring):
self.time = self.__parse(time)
if adjusted:
self.__adjust()
return
raise ValueError, type(time)
def hour(self):
"""
Get the I{hour} component.
@return: The hour.
@rtype: int
"""
return self.time.hour
def minute(self):
"""
Get the I{minute} component.
@return: The minute.
@rtype: int
"""
return self.time.minute
def second(self):
"""
Get the I{seconds} component.
@return: The seconds.
@rtype: int
"""
return self.time.second
def microsecond(self):
"""
Get the I{microsecond} component.
@return: The microsecond.
@rtype: int
"""
return self.time.microsecond
def __adjust(self):
"""
Adjust for TZ offset.
"""
if hasattr(self, 'offset'):
today = dt.date.today()
delta = self.tz.adjustment(self.offset)
d = dt.datetime.combine(today, self.time)
d = ( d + delta )
self.time = d.time()
def __parse(self, s):
"""
Parse the string date.
Patterns:
- HH:MI:SS
- HH:MI:SS(z|Z)
- HH:MI:SS.ms
- HH:MI:SS.ms(z|Z)
- HH:MI:SS(+|-)06:00
- HH:MI:SS.ms(+|-)06:00
@param s: A time string.
@type s: str
@return: A time object.
@rtype: B{datetime}.I{time}
"""
try:
offset = None
part = Timezone.split(s)
hour, minute, second = part[0].split(':', 2)
hour = int(hour)
minute = int(minute)
second, ms = self.__second(second)
if len(part) == 2:
self.offset = self.__offset(part[1])
if ms is None:
return dt.time(hour, minute, second)
else:
return dt.time(hour, minute, second, ms)
except:
log.debug(s, exec_info=True)
raise ValueError, 'Invalid format "%s"' % s
def __second(self, s):
"""
Parse the seconds and microseconds.
The microseconds are truncated to 999999 due to a restriction in
the python datetime.datetime object.
@param s: A string representation of the seconds.
@type s: str
@return: Tuple of (sec,ms)
@rtype: tuple.
"""
part = s.split('.')
if len(part) > 1:
return (int(part[0]), int(part[1][:6]))
else:
return (int(part[0]), None)
def __offset(self, s):
"""
Parse the TZ offset.
@param s: A string representation of the TZ offset.
@type s: str
@return: The signed offset in hours.
@rtype: str
"""
if len(s) == len('-00:00'):
return int(s[:3])
if len(s) == 0:
return self.tz.local
if len(s) == 1:
return 0
raise Exception()
def __str__(self):
return unicode(self)
def __unicode__(self):
time = self.time.isoformat()
if self.tz.local:
return '%s%+.2d:00' % (time, self.tz.local)
else:
return '%sZ' % time
class DateTime(Date,Time):
"""
An XML time object.
Supported formats:
- YYYY-MM-DDB{T}HH:MI:SS
- YYYY-MM-DDB{T}HH:MI:SS(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS.ms
- YYYY-MM-DDB{T}HH:MI:SS.ms(z|Z)
- YYYY-MM-DDB{T}HH:MI:SS(+|-)06:00
- YYYY-MM-DDB{T}HH:MI:SS.ms(+|-)06:00
@ivar datetime: The object value.
@type datetime: B{datetime}.I{datedate}
"""
def __init__(self, date):
"""
@param date: The value of the object.
@type date: (datetime|str)
@raise ValueError: When I{tm} is invalid.
"""
if isinstance(date, dt.datetime):
Date.__init__(self, date.date())
Time.__init__(self, date.time())
self.datetime = \
dt.datetime.combine(self.date, self.time)
return
if isinstance(date, basestring):
part = date.split('T')
Date.__init__(self, part[0])
Time.__init__(self, part[1], 0)
self.datetime = \
dt.datetime.combine(self.date, self.time)
self.__adjust()
return
raise ValueError, type(date)
def __adjust(self):
"""
Adjust for TZ offset.
"""
if not hasattr(self, 'offset'):
return
delta = self.tz.adjustment(self.offset)
try:
d = ( self.datetime + delta )
self.datetime = d
self.date = d.date()
self.time = d.time()
except OverflowError:
log.warn('"%s" caused overflow, not-adjusted', self.datetime)
def __str__(self):
return unicode(self)
def __unicode__(self):
s = []
s.append(Date.__unicode__(self))
s.append(Time.__unicode__(self))
return 'T'.join(s)
class UTC(DateTime):
"""
Represents current UTC time.
"""
def __init__(self, date=None):
if date is None:
date = dt.datetime.utcnow()
DateTime.__init__(self, date)
self.tz.local = 0
class Timezone:
"""
Timezone object used to do TZ conversions
@cvar local: The (A) local TZ offset.
@type local: int
@cvar patten: The regex patten to match TZ.
@type patten: re.Pattern
"""
pattern = re.compile('([zZ])|([\-\+][0-9]{2}:[0-9]{2})')
LOCAL = ( 0-time.timezone/60/60 )
def __init__(self, offset=None):
if offset is None:
offset = self.LOCAL
self.local = offset
@classmethod
def split(cls, s):
"""
Split the TZ from string.
@param s: A string containing a timezone
@type s: basestring
@return: The split parts.
@rtype: tuple
"""
m = cls.pattern.search(s)
if m is None:
return (s,)
x = m.start(0)
return (s[:x], s[x:])
def adjustment(self, offset):
"""
Get the adjustment to the I{local} TZ.
@return: The delta between I{offset} and local TZ.
@rtype: B{datetime}.I{timedelta}
"""
delta = ( self.local - offset )
return dt.timedelta(hours=delta)
| 10,456
|
Python
|
.py
| 339
| 21.843658
| 76
| 0.535262
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,347
|
text.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/text.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains XML text classes.
"""
from suds import *
from suds.sax import *
class Text(unicode):
"""
An XML text object used to represent text content.
@ivar lang: The (optional) language flag.
@type lang: bool
@ivar escaped: The (optional) XML special character escaped flag.
@type escaped: bool
"""
__slots__ = ('lang', 'escaped',)
@classmethod
def __valid(cls, *args):
return ( len(args) and args[0] is not None )
def __new__(cls, *args, **kwargs):
if cls.__valid(*args):
lang = kwargs.pop('lang', None)
escaped = kwargs.pop('escaped', False)
result = super(Text, cls).__new__(cls, *args, **kwargs)
result.lang = lang
result.escaped = escaped
else:
result = None
return result
def escape(self):
"""
Encode (escape) special XML characters.
@return: The text with XML special characters escaped.
@rtype: L{Text}
"""
if not self.escaped:
post = sax.encoder.encode(self)
escaped = ( post != self )
return Text(post, lang=self.lang, escaped=escaped)
return self
def unescape(self):
"""
Decode (unescape) special XML characters.
@return: The text with escaped XML special characters decoded.
@rtype: L{Text}
"""
if self.escaped:
post = sax.encoder.decode(self)
return Text(post, lang=self.lang)
return self
def trim(self):
post = self.strip()
return Text(post, lang=self.lang, escaped=self.escaped)
def __add__(self, other):
joined = u''.join((self, other))
result = Text(joined, lang=self.lang, escaped=self.escaped)
if isinstance(other, Text):
result.escaped = ( self.escaped or other.escaped )
return result
def __repr__(self):
s = [self]
if self.lang is not None:
s.append(' [%s]' % self.lang)
if self.escaped:
s.append(' <escaped>')
return ''.join(s)
def __getstate__(self):
state = {}
for k in self.__slots__:
state[k] = getattr(self, k)
return state
def __setstate__(self, state):
for k in self.__slots__:
setattr(self, k, state[k])
class Raw(Text):
"""
Raw text which is not XML escaped.
This may include I{string} XML.
"""
def escape(self):
return self
def unescape(self):
return self
def __add__(self, other):
joined = u''.join((self, other))
return Raw(joined, lang=self.lang)
| 3,567
|
Python
|
.py
| 99
| 28.464646
| 76
| 0.613416
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,348
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
@var encoder: A I{pluggable} XML special character processor used to
encode/decode strings.
@type encoder: L{Encoder}
"""
from suds.sax.enc import Encoder
#
# pluggable XML special character encoder.
#
encoder = Encoder()
def splitPrefix(name):
"""
Split the name into a tuple (I{prefix}, I{name}). The first element in
the tuple is I{None} when the name does't have a prefix.
@param name: A node name containing an optional prefix.
@type name: basestring
@return: A tuple containing the (2) parts of I{name}
@rtype: (I{prefix}, I{name})
"""
if isinstance(name, basestring) \
and ':' in name:
return tuple(name.split(':', 1))
else:
return (None, name)
class Namespace:
"""
The namespace class represents XML namespaces.
"""
default = (None, None)
xmlns = ('xml', 'http://www.w3.org/XML/1998/namespace')
xsdns = ('xs', 'http://www.w3.org/2001/XMLSchema')
xsins = ('xsi', 'http://www.w3.org/2001/XMLSchema-instance')
all = (xsdns, xsins)
@classmethod
def create(cls, p=None, u=None):
return (p, u)
@classmethod
def none(cls, ns):
return ( ns == cls.default )
@classmethod
def xsd(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema')
except:
pass
return False
@classmethod
def xsi(cls, ns):
try:
return cls.w3(ns) and ns[1].endswith('XMLSchema-instance')
except:
pass
return False
@classmethod
def xs(cls, ns):
return ( cls.xsd(ns) or cls.xsi(ns) )
@classmethod
def w3(cls, ns):
try:
return ns[1].startswith('http://www.w3.org')
except:
pass
return False
@classmethod
def isns(cls, ns):
try:
return isinstance(ns, tuple) and len(ns) == len(cls.default)
except:
pass
return False
| 3,253
|
Python
|
.py
| 92
| 29.771739
| 76
| 0.663458
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,349
|
document.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/document.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{document} classes.
"""
from logging import getLogger
from suds import *
from suds.sax import *
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Element):
""" simple document """
DECL = '<?xml version="1.0" encoding="UTF-8"?>'
def __init__(self, root=None):
Element.__init__(self, 'document')
if root is not None:
self.append(root)
def root(self):
if len(self.children):
return self.children[0]
else:
return None
def str(self):
s = []
s.append(self.DECL)
s.append('\n')
s.append(self.root().str())
return ''.join(s)
def plain(self):
s = []
s.append(self.DECL)
s.append(self.root().plain())
return ''.join(s)
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.str()
| 1,810
|
Python
|
.py
| 50
| 30.66
| 76
| 0.663182
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,350
|
parser.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/parser.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
The sax module contains a collection of classes that provide a
(D)ocument (O)bject (M)odel representation of an XML document.
The goal is to provide an easy, intuative interface for managing XML
documents. Although, the term, DOM, is used above, this model is
B{far} better.
XML namespaces in suds are represented using a (2) element tuple
containing the prefix and the URI. Eg: I{('tns', 'http://myns')}
"""
from logging import getLogger
import suds.metrics
from suds import *
from suds.sax import *
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sax.text import Text
from suds.sax.attribute import Attribute
from xml.sax import make_parser, InputSource, ContentHandler
from xml.sax.handler import feature_external_ges
from cStringIO import StringIO
log = getLogger(__name__)
class Handler(ContentHandler):
""" sax hanlder """
def __init__(self):
self.nodes = [Document()]
def startElement(self, name, attrs):
top = self.top()
node = Element(unicode(name), parent=top)
for a in attrs.getNames():
n = unicode(a)
v = unicode(attrs.getValue(a))
attribute = Attribute(n,v)
if self.mapPrefix(node, attribute):
continue
node.append(attribute)
node.charbuffer = []
top.append(node)
self.push(node)
def mapPrefix(self, node, attribute):
skip = False
if attribute.name == 'xmlns':
if len(attribute.value):
node.expns = unicode(attribute.value)
skip = True
elif attribute.prefix == 'xmlns':
prefix = attribute.name
node.nsprefixes[prefix] = unicode(attribute.value)
skip = True
return skip
def endElement(self, name):
name = unicode(name)
current = self.top()
if len(current.charbuffer):
current.text = Text(u''.join(current.charbuffer))
del current.charbuffer
if len(current):
current.trim()
currentqname = current.qname()
if name == currentqname:
self.pop()
else:
raise Exception('malformed document')
def characters(self, content):
text = unicode(content)
node = self.top()
node.charbuffer.append(text)
def push(self, node):
self.nodes.append(node)
return node
def pop(self):
return self.nodes.pop()
def top(self):
return self.nodes[len(self.nodes)-1]
class Parser:
""" SAX Parser """
@classmethod
def saxparser(cls):
p = make_parser()
p.setFeature(feature_external_ges, 0)
h = Handler()
p.setContentHandler(h)
return (p, h)
def parse(self, file=None, string=None):
"""
SAX parse XML text.
@param file: Parse a python I{file-like} object.
@type file: I{file-like} object.
@param string: Parse string XML.
@type string: str
"""
timer = metrics.Timer()
timer.start()
sax, handler = self.saxparser()
if file is not None:
sax.parse(file)
timer.stop()
metrics.log.debug('sax (%s) duration: %s', file, timer)
return handler.nodes[0]
if string is not None:
source = InputSource(None)
source.setByteStream(StringIO(string))
sax.parse(source)
timer.stop()
metrics.log.debug('%s\nsax duration: %s', string, timer)
return handler.nodes[0]
| 4,461
|
Python
|
.py
| 120
| 29.816667
| 76
| 0.644703
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,351
|
enc.py
|
CouchPotato_CouchPotatoServer/libs/suds/sax/enc.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides XML I{special character} encoder classes.
"""
import re
class Encoder:
"""
An XML special character encoder/decoder.
@cvar encodings: A mapping of special characters encoding.
@type encodings: [(str,str)]
@cvar decodings: A mapping of special characters decoding.
@type decodings: [(str,str)]
@cvar special: A list of special characters
@type special: [char]
"""
encodings = \
(( '&(?!(amp|lt|gt|quot|apos);)', '&' ),( '<', '<' ),( '>', '>' ),( '"', '"' ),("'", ''' ))
decodings = \
(( '<', '<' ),( '>', '>' ),( '"', '"' ),( ''', "'" ),( '&', '&' ))
special = \
('&', '<', '>', '"', "'")
def needsEncoding(self, s):
"""
Get whether string I{s} contains special characters.
@param s: A string to check.
@type s: str
@return: True if needs encoding.
@rtype: boolean
"""
if isinstance(s, basestring):
for c in self.special:
if c in s:
return True
return False
def encode(self, s):
"""
Encode special characters found in string I{s}.
@param s: A string to encode.
@type s: str
@return: The encoded string.
@rtype: str
"""
if isinstance(s, basestring) and self.needsEncoding(s):
for x in self.encodings:
s = re.sub(x[0], x[1], s)
return s
def decode(self, s):
"""
Decode special characters encodings found in string I{s}.
@param s: A string to decode.
@type s: str
@return: The decoded string.
@rtype: str
"""
if isinstance(s, basestring) and '&' in s:
for x in self.decodings:
s = s.replace(x[0], x[1])
return s
| 2,720
|
Python
|
.py
| 72
| 30.833333
| 119
| 0.593524
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,352
|
binding.py
|
CouchPotato_CouchPotatoServer/libs/suds/bindings/binding.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for (WS) SOAP bindings.
"""
from logging import getLogger
from suds import *
from suds.sax import Namespace
from suds.sax.parser import Parser
from suds.sax.document import Document
from suds.sax.element import Element
from suds.sudsobject import Factory, Object
from suds.mx import Content
from suds.mx.literal import Literal as MxLiteral
from suds.umx.basic import Basic as UmxBasic
from suds.umx.typed import Typed as UmxTyped
from suds.bindings.multiref import MultiRef
from suds.xsd.query import TypeQuery, ElementQuery
from suds.xsd.sxbasic import Element as SchemaElement
from suds.options import Options
from suds.plugin import PluginContainer
from copy import deepcopy
log = getLogger(__name__)
envns = ('SOAP-ENV', 'http://schemas.xmlsoap.org/soap/envelope/')
class Binding:
"""
The soap binding class used to process outgoing and imcoming
soap messages per the WSDL port binding.
@cvar replyfilter: The reply filter function.
@type replyfilter: (lambda s,r: r)
@ivar wsdl: The wsdl.
@type wsdl: L{suds.wsdl.Definitions}
@ivar schema: The collective schema contained within the wsdl.
@type schema: L{xsd.schema.Schema}
@ivar options: A dictionary options.
@type options: L{Options}
"""
replyfilter = (lambda s,r: r)
def __init__(self, wsdl):
"""
@param wsdl: A wsdl.
@type wsdl: L{wsdl.Definitions}
"""
self.wsdl = wsdl
self.multiref = MultiRef()
def schema(self):
return self.wsdl.schema
def options(self):
return self.wsdl.options
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxTyped(self.schema())
else:
return UmxBasic()
def marshaller(self):
"""
Get the appropriate XML encoder.
@return: An L{MxLiteral} marshaller.
@rtype: L{MxLiteral}
"""
return MxLiteral(self.schema(), self.options().xstq)
def param_defs(self, method):
"""
Get parameter definitions.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A servic emethod.
@type method: I{service.Method}
@return: A collection of parameter definitions
@rtype: [I{pdef},..]
"""
raise Exception, 'not implemented'
def get_message(self, method, args, kwargs):
"""
Get the soap message for the specified method, args and soapheaders.
This is the entry point for creating the outbound soap message.
@param method: The method being invoked.
@type method: I{service.Method}
@param args: A list of args for the method invoked.
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The soap envelope.
@rtype: L{Document}
"""
content = self.headercontent(method)
header = self.header(content)
content = self.bodycontent(method, args, kwargs)
body = self.body(content)
env = self.envelope(header, body)
if self.options().prefixes:
body.normalizePrefixes()
env.promotePrefixes()
else:
env.refitPrefixes()
return Document(env)
def get_reply(self, method, reply):
"""
Process the I{reply} for the specified I{method} by sax parsing the I{reply}
and then unmarshalling into python object(s).
@param method: The name of the invoked method.
@type method: str
@param reply: The reply XML received after invoking the specified method.
@type reply: str
@return: The unmarshalled reply. The returned value is an L{Object} for a
I{list} depending on whether the service returns a single object or a
collection.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
replyroot = sax.parse(string=reply)
plugins = PluginContainer(self.options().plugins)
plugins.message.parsed(reply=replyroot)
soapenv = replyroot.getChild('Envelope')
soapenv.promotePrefixes()
soapbody = soapenv.getChild('Body')
self.detect_fault(soapbody)
soapbody = self.multiref.process(soapbody)
nodes = self.replycontent(method, soapbody)
rtypes = self.returned_types(method)
if len(rtypes) > 1:
result = self.replycomposite(rtypes, nodes)
return (replyroot, result)
if len(rtypes) == 1:
if rtypes[0].unbounded():
result = self.replylist(rtypes[0], nodes)
return (replyroot, result)
if len(nodes):
unmarshaller = self.unmarshaller()
resolved = rtypes[0].resolve(nobuiltin=True)
result = unmarshaller.process(nodes[0], resolved)
return (replyroot, result)
return (replyroot, None)
def detect_fault(self, body):
"""
Detect I{hidden} soapenv:Fault element in the soap body.
@param body: The soap envelope body.
@type body: L{Element}
@raise WebFault: When found.
"""
fault = body.getChild('Fault', envns)
if fault is None:
return
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, fault)
return self
def replylist(self, rt, nodes):
"""
Construct a I{list} reply. This mehod is called when it has been detected
that the reply is a list.
@param rt: The return I{type}.
@type rt: L{suds.xsd.sxbase.SchemaObject}
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: A list of I{unmarshalled} objects.
@rtype: [L{Object},...]
"""
result = []
resolved = rt.resolve(nobuiltin=True)
unmarshaller = self.unmarshaller()
for node in nodes:
sobject = unmarshaller.process(node, resolved)
result.append(sobject)
return result
def replycomposite(self, rtypes, nodes):
"""
Construct a I{composite} reply. This method is called when it has been
detected that the reply has multiple root nodes.
@param rtypes: A list of known return I{types}.
@type rtypes: [L{suds.xsd.sxbase.SchemaObject},...]
@param nodes: A collection of XML nodes.
@type nodes: [L{Element},...]
@return: The I{unmarshalled} composite object.
@rtype: L{Object},...
"""
dictionary = {}
for rt in rtypes:
dictionary[rt.name] = rt
unmarshaller = self.unmarshaller()
composite = Factory.object('reply')
for node in nodes:
tag = node.name
rt = dictionary.get(tag, None)
if rt is None:
if node.get('id') is None:
raise Exception('<%s/> not mapped to message part' % tag)
else:
continue
resolved = rt.resolve(nobuiltin=True)
sobject = unmarshaller.process(node, resolved)
value = getattr(composite, tag, None)
if value is None:
if rt.unbounded():
value = []
setattr(composite, tag, value)
value.append(sobject)
else:
setattr(composite, tag, sobject)
else:
if not isinstance(value, list):
value = [value,]
setattr(composite, tag, value)
value.append(sobject)
return composite
def get_fault(self, reply):
"""
Extract the fault from the specified soap reply. If I{faults} is True, an
exception is raised. Otherwise, the I{unmarshalled} fault L{Object} is
returned. This method is called when the server raises a I{web fault}.
@param reply: A soap reply message.
@type reply: str
@return: A fault object.
@rtype: tuple ( L{Element}, L{Object} )
"""
reply = self.replyfilter(reply)
sax = Parser()
faultroot = sax.parse(string=reply)
soapenv = faultroot.getChild('Envelope')
soapbody = soapenv.getChild('Body')
fault = soapbody.getChild('Fault')
unmarshaller = self.unmarshaller(False)
p = unmarshaller.process(fault)
if self.options().faults:
raise WebFault(p, faultroot)
return (faultroot, p.detail)
def mkparam(self, method, pdef, object):
"""
Builds a parameter for the specified I{method} using the parameter
definition (pdef) and the specified value (object).
@param method: A method name.
@type method: str
@param pdef: A parameter definition.
@type pdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The parameter value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
content = \
Content(tag=pdef[0],
value=object,
type=pdef[1],
real=pdef[1].resolve())
return marshaller.process(content)
def mkheader(self, method, hdef, object):
"""
Builds a soapheader for the specified I{method} using the header
definition (hdef) and the specified value (object).
@param method: A method name.
@type method: str
@param hdef: A header definition.
@type hdef: tuple: (I{name}, L{xsd.sxbase.SchemaObject})
@param object: The header value.
@type object: any
@return: The parameter fragment.
@rtype: L{Element}
"""
marshaller = self.marshaller()
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkheader(method, hdef, item))
return tags
content = Content(tag=hdef[0], value=object, type=hdef[1])
return marshaller.process(content)
def envelope(self, header, body):
"""
Build the B{<Envelope/>} for an soap outbound message.
@param header: The soap message B{header}.
@type header: L{Element}
@param body: The soap message B{body}.
@type body: L{Element}
@return: The soap envelope containing the body and header.
@rtype: L{Element}
"""
env = Element('Envelope', ns=envns)
env.addPrefix(Namespace.xsins[0], Namespace.xsins[1])
env.append(header)
env.append(body)
return env
def header(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The header content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
header = Element('Header', ns=envns)
header.append(content)
return header
def bodycontent(self, method, args, kwargs):
"""
Get the content for the soap I{body} node.
@param method: A service method.
@type method: I{service.Method}
@param args: method parameter values
@type args: list
@param kwargs: Named (keyword) args for the method invoked.
@type kwargs: dict
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
raise Exception, 'not implemented'
def headercontent(self, method):
"""
Get the content for the soap I{Header} node.
@param method: A service method.
@type method: I{service.Method}
@return: The xml content for the <body/>
@rtype: [L{Element},..]
"""
n = 0
content = []
wsse = self.options().wsse
if wsse is not None:
content.append(wsse.xml())
headers = self.options().soapheaders
if not isinstance(headers, (tuple,list,dict)):
headers = (headers,)
if len(headers) == 0:
return content
pts = self.headpart_types(method)
if isinstance(headers, (tuple,list)):
for header in headers:
if isinstance(header, Element):
content.append(deepcopy(header))
continue
if len(pts) == n: break
h = self.mkheader(method, pts[n], header)
ns = pts[n][1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
n += 1
else:
for pt in pts:
header = headers.get(pt[0])
if header is None:
continue
h = self.mkheader(method, pt, header)
ns = pt[1].namespace('ns0')
h.setPrefix(ns[0], ns[1])
content.append(h)
return content
def replycontent(self, method, body):
"""
Get the reply body content.
@param method: A service method.
@type method: I{service.Method}
@param body: The soap body
@type body: L{Element}
@return: the body content
@rtype: [L{Element},...]
"""
raise Exception, 'not implemented'
def body(self, content):
"""
Build the B{<Body/>} for an soap outbound message.
@param content: The body content.
@type content: L{Element}
@return: the soap body fragment.
@rtype: L{Element}
"""
body = Element('Body', ns=envns)
body.append(content)
return body
def bodypart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
parts = method.soap.input.body.parts
else:
parts = method.soap.output.body.parts
for p in parts:
if p.element is not None:
query = ElementQuery(p.element)
else:
query = TypeQuery(p.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if p.type is not None:
pt = PartElement(p.name, pt)
if input:
if pt.name is None:
result.append((p.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def headpart_types(self, method, input=True):
"""
Get a list of I{parameter definitions} (pdef) defined for the specified method.
Each I{pdef} is a tuple (I{name}, L{xsd.sxbase.SchemaObject})
@param method: A service method.
@type method: I{service.Method}
@param input: Defines input/output message.
@type input: boolean
@return: A list of parameter definitions
@rtype: [I{pdef},]
"""
result = []
if input:
headers = method.soap.input.headers
else:
headers = method.soap.output.headers
for header in headers:
part = header.part
if part.element is not None:
query = ElementQuery(part.element)
else:
query = TypeQuery(part.type)
pt = query.execute(self.schema())
if pt is None:
raise TypeNotFound(query.ref)
if part.type is not None:
pt = PartElement(part.name, pt)
if input:
if pt.name is None:
result.append((part.name, pt))
else:
result.append((pt.name, pt))
else:
result.append(pt)
return result
def returned_types(self, method):
"""
Get the L{xsd.sxbase.SchemaObject} returned by the I{method}.
@param method: A service method.
@type method: I{service.Method}
@return: The name of the type return by the method.
@rtype: [I{rtype},..]
"""
result = []
for rt in self.bodypart_types(method, input=False):
result.append(rt)
return result
class PartElement(SchemaElement):
"""
A part used to represent a message part when the part
references a schema type and thus assumes to be an element.
@ivar resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
def __init__(self, name, resolved):
"""
@param name: The part name.
@type name: str
@param resolved: The part type.
@type resolved: L{suds.xsd.sxbase.SchemaObject}
"""
root = Element('element', ns=Namespace.xsdns)
SchemaElement.__init__(self, resolved.schema, root)
self.__resolved = resolved
self.name = name
self.form_qualified = False
def implany(self):
return self
def optional(self):
return True
def namespace(self, prefix=None):
return Namespace.default
def resolve(self, nobuiltin=False):
if nobuiltin and self.__resolved.builtin():
return self
else:
return self.__resolved
| 19,047
|
Python
|
.py
| 498
| 28.307229
| 87
| 0.590805
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,353
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/bindings/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support Web Services (SOAP)
bindings.
"""
| 917
|
Python
|
.py
| 19
| 47.210526
| 76
| 0.773942
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,354
|
rpc.py
|
CouchPotato_CouchPotatoServer/libs/suds/bindings/rpc.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for the (WS) SOAP I{rpc/literal} and I{rpc/encoded} bindings.
"""
from logging import getLogger
from suds import *
from suds.mx.encoded import Encoded as MxEncoded
from suds.umx.encoded import Encoded as UmxEncoded
from suds.bindings.binding import Binding, envns
from suds.sax.element import Element
log = getLogger(__name__)
encns = ('SOAP-ENC', 'http://schemas.xmlsoap.org/soap/encoding/')
class RPC(Binding):
"""
RPC/Literal binding style.
"""
def param_defs(self, method):
return self.bodypart_types(method)
def envelope(self, header, body):
env = Binding.envelope(self, header, body)
env.addPrefix(encns[0], encns[1])
env.set('%s:encodingStyle' % envns[0],
'http://schemas.xmlsoap.org/soap/encoding/')
return env
def bodycontent(self, method, args, kwargs):
n = 0
root = self.method(method)
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
p = self.mkparam(method, pd, value)
if p is not None:
root.append(p)
n += 1
return root
def replycontent(self, method, body):
return body[0].children
def method(self, method):
"""
Get the document root. For I{rpc/(literal|encoded)}, this is the
name of the method qualifed by the schema tns.
@param method: A service method.
@type method: I{service.Method}
@return: A root element.
@rtype: L{Element}
"""
ns = method.soap.input.body.namespace
if ns[0] is None:
ns = ('ns0', ns[1])
method = Element(method.name, ns=ns)
return method
class Encoded(RPC):
"""
RPC/Encoded (section 5) binding style.
"""
def marshaller(self):
return MxEncoded(self.schema())
def unmarshaller(self, typed=True):
"""
Get the appropriate XML decoder.
@return: Either the (basic|typed) unmarshaller.
@rtype: L{UmxTyped}
"""
if typed:
return UmxEncoded(self.schema())
else:
return RPC.unmarshaller(self, typed)
| 3,141
|
Python
|
.py
| 83
| 30.759036
| 78
| 0.647822
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,355
|
document.py
|
CouchPotato_CouchPotatoServer/libs/suds/bindings/document.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for the (WS) SOAP I{document/literal}.
"""
from logging import getLogger
from suds import *
from suds.bindings.binding import Binding
from suds.sax.element import Element
log = getLogger(__name__)
class Document(Binding):
"""
The document/literal style. Literal is the only (@use) supported
since document/encoded is pretty much dead.
Although the soap specification supports multiple documents within the soap
<body/>, it is very uncommon. As such, suds presents an I{RPC} view of
service methods defined with a single document parameter. This is done so
that the user can pass individual parameters instead of one, single document.
To support the complete specification, service methods defined with multiple documents
(multiple message parts), must present a I{document} view for that method.
"""
def bodycontent(self, method, args, kwargs):
#
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
if not len(method.soap.input.body.parts):
return ()
wrapped = method.soap.input.body.wrapped
if wrapped:
pts = self.bodypart_types(method)
root = self.document(pts[0])
else:
root = []
n = 0
for pd in self.param_defs(method):
if n < len(args):
value = args[n]
else:
value = kwargs.get(pd[0])
n += 1
p = self.mkparam(method, pd, value)
if p is None:
continue
if not wrapped:
ns = pd[1].namespace('ns0')
p.setPrefix(ns[0], ns[1])
root.append(p)
return root
def replycontent(self, method, body):
wrapped = method.soap.output.body.wrapped
if wrapped:
return body[0].children
else:
return body.children
def document(self, wrapper):
"""
Get the document root. For I{document/literal}, this is the
name of the wrapper element qualifed by the schema tns.
@param wrapper: The method name.
@type wrapper: L{xsd.sxbase.SchemaObject}
@return: A root element.
@rtype: L{Element}
"""
tag = wrapper[1].name
ns = wrapper[1].namespace('ns0')
d = Element(tag, ns=ns)
return d
def mkparam(self, method, pdef, object):
#
# Expand list parameters into individual parameters
# each with the type information. This is because in document
# arrays are simply unbounded elements.
#
if isinstance(object, (list, tuple)):
tags = []
for item in object:
tags.append(self.mkparam(method, pdef, item))
return tags
else:
return Binding.mkparam(self, method, pdef, object)
def param_defs(self, method):
#
# Get parameter definitions for document literal.
# The I{wrapped} vs I{bare} style is detected in 2 ways.
# If there is 2+ parts in the message then it is I{bare}.
# If there is only (1) part and that part resolves to a builtin then
# it is I{bare}. Otherwise, it is I{wrapped}.
#
pts = self.bodypart_types(method)
wrapped = method.soap.input.body.wrapped
if not wrapped:
return pts
result = []
# wrapped
for p in pts:
resolved = p[1].resolve()
for child, ancestry in resolved:
if child.isattr():
continue
if self.bychoice(ancestry):
log.debug(
'%s\ncontained by <choice/>, excluded as param for %s()',
child,
method.name)
continue
result.append((child.name, child))
return result
def returned_types(self, method):
result = []
wrapped = method.soap.output.body.wrapped
rts = self.bodypart_types(method, input=False)
if wrapped:
for pt in rts:
resolved = pt.resolve(nobuiltin=True)
for child, ancestry in resolved:
result.append(child)
break
else:
result += rts
return result
def bychoice(self, ancestry):
"""
The ancestry contains a <choice/>
@param ancestry: A list of ancestors.
@type ancestry: list
@return: True if contains <choice/>
@rtype: boolean
"""
for x in ancestry:
if x.choice():
return True
return False
| 5,792
|
Python
|
.py
| 148
| 29.587838
| 90
| 0.60193
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,356
|
multiref.py
|
CouchPotato_CouchPotatoServer/libs/suds/bindings/multiref.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides classes for handling soap multirefs.
"""
from logging import getLogger
from suds import *
from suds.sax.element import Element
log = getLogger(__name__)
soapenc = (None, 'http://schemas.xmlsoap.org/soap/encoding/')
class MultiRef:
"""
Resolves and replaces multirefs.
@ivar nodes: A list of non-multiref nodes.
@type nodes: list
@ivar catalog: A dictionary of multiref nodes by id.
@type catalog: dict
"""
def __init__(self):
self.nodes = []
self.catalog = {}
def process(self, body):
"""
Process the specified soap envelope body and replace I{multiref} node
references with the contents of the referenced node.
@param body: A soap envelope body node.
@type body: L{Element}
@return: The processed I{body}
@rtype: L{Element}
"""
self.nodes = []
self.catalog = {}
self.build_catalog(body)
self.update(body)
body.children = self.nodes
return body
def update(self, node):
"""
Update the specified I{node} by replacing the I{multiref} references with
the contents of the referenced nodes and remove the I{href} attribute.
@param node: A node to update.
@type node: L{Element}
@return: The updated node
@rtype: L{Element}
"""
self.replace_references(node)
for c in node.children:
self.update(c)
return node
def replace_references(self, node):
"""
Replacing the I{multiref} references with the contents of the
referenced nodes and remove the I{href} attribute. Warning: since
the I{ref} is not cloned,
@param node: A node to update.
@type node: L{Element}
"""
href = node.getAttribute('href')
if href is None:
return
id = href.getValue()
ref = self.catalog.get(id)
if ref is None:
log.error('soap multiref: %s, not-resolved', id)
return
node.append(ref.children)
node.setText(ref.getText())
for a in ref.attributes:
if a.name != 'id':
node.append(a)
node.remove(href)
def build_catalog(self, body):
"""
Create the I{catalog} of multiref nodes by id and the list of
non-multiref nodes.
@param body: A soap envelope body node.
@type body: L{Element}
"""
for child in body.children:
if self.soaproot(child):
self.nodes.append(child)
id = child.get('id')
if id is None: continue
key = '#%s' % id
self.catalog[key] = child
def soaproot(self, node):
"""
Get whether the specified I{node} is a soap encoded root.
This is determined by examining @soapenc:root='1'.
The node is considered to be a root when the attribute
is not specified.
@param node: A node to evaluate.
@type node: L{Element}
@return: True if a soap encoded root.
@rtype: bool
"""
root = node.getAttribute('root', ns=soapenc)
if root is None:
return True
else:
return ( root.value == '1' )
| 4,181
|
Python
|
.py
| 114
| 28.736842
| 81
| 0.62338
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,357
|
encoded.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/encoded.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides encoded I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.literal import Literal
from suds.mx.typer import Typer
from suds.sudsobject import Factory, Object
from suds.xsd.query import TypeQuery
log = getLogger(__name__)
#
# Add encoded extensions
# aty = The soap (section 5) encoded array type.
#
Content.extensions.append('aty')
class Encoded(Literal):
"""
A SOAP section (5) encoding marshaller.
This marshaller supports rpc/encoded soap styles.
"""
def start(self, content):
#
# For soap encoded arrays, the 'aty' (array type) information
# is extracted and added to the 'content'. Then, the content.value
# is replaced with an object containing an 'item=[]' attribute
# containing values that are 'typed' suds objects.
#
start = Literal.start(self, content)
if start and isinstance(content.value, (list,tuple)):
resolved = content.type.resolve()
for c in resolved:
if hasattr(c[0], 'aty'):
content.aty = (content.tag, c[0].aty)
self.cast(content)
break
return start
def end(self, parent, content):
#
# For soap encoded arrays, the soapenc:arrayType attribute is
# added with proper type and size information.
# Eg: soapenc:arrayType="xs:int[3]"
#
Literal.end(self, parent, content)
if content.aty is None:
return
tag, aty = content.aty
ns0 = ('at0', aty[1])
ns1 = ('at1', 'http://schemas.xmlsoap.org/soap/encoding/')
array = content.value.item
child = parent.getChild(tag)
child.addPrefix(ns0[0], ns0[1])
child.addPrefix(ns1[0], ns1[1])
name = '%s:arrayType' % ns1[0]
value = '%s:%s[%d]' % (ns0[0], aty[0], len(array))
child.set(name, value)
def encode(self, node, content):
if content.type.any():
Typer.auto(node, content.value)
return
if content.real.any():
Typer.auto(node, content.value)
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace()
Typer.manual(node, name, ns)
def cast(self, content):
"""
Cast the I{untyped} list items found in content I{value}.
Each items contained in the list is checked for XSD type information.
Items (values) that are I{untyped}, are replaced with suds objects and
type I{metadata} is added.
@param content: The content holding the collection.
@type content: L{Content}
@return: self
@rtype: L{Encoded}
"""
aty = content.aty[1]
resolved = content.type.resolve()
array = Factory.object(resolved.name)
array.item = []
query = TypeQuery(aty)
ref = query.execute(self.schema)
if ref is None:
raise TypeNotFound(qref)
for x in content.value:
if isinstance(x, (list, tuple)):
array.item.append(x)
continue
if isinstance(x, Object):
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
if isinstance(x, dict):
x = Factory.object(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
continue
x = Factory.property(ref.name, x)
md = x.__metadata__
md.sxtype = ref
array.item.append(x)
content.value = array
return self
| 4,651
|
Python
|
.py
| 123
| 29.243902
| 78
| 0.609257
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,358
|
typer.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/typer.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides sx typing classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.sax import Namespace as NS
from suds.sax.text import Text
log = getLogger(__name__)
class Typer:
"""
Provides XML node typing as either automatic or manual.
@cvar types: A dict of class to xs type mapping.
@type types: dict
"""
types = {
int : ('int', NS.xsdns),
long : ('long', NS.xsdns),
float : ('float', NS.xsdns),
str : ('string', NS.xsdns),
unicode : ('string', NS.xsdns),
Text : ('string', NS.xsdns),
bool : ('boolean', NS.xsdns),
}
@classmethod
def auto(cls, node, value=None):
"""
Automatically set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. When I{value} is an unmapped class,
the default type (xs:any) is set.
@param node: An XML node
@type node: L{sax.element.Element}
@param value: An object that is or would be the node's text.
@type value: I{any}
@return: The specified node.
@rtype: L{sax.element.Element}
"""
if value is None:
value = node.getText()
if isinstance(value, Object):
known = cls.known(value)
if known.name is None:
return node
tm = (known.name, known.namespace())
else:
tm = cls.types.get(value.__class__, cls.types.get(str))
cls.manual(node, *tm)
return node
@classmethod
def manual(cls, node, tval, ns=None):
"""
Set the node's xsi:type attribute based on either I{value}'s
class or the class of the node's text. Then adds the referenced
prefix(s) to the node's prefix mapping.
@param node: An XML node
@type node: L{sax.element.Element}
@param tval: The name of the schema type.
@type tval: str
@param ns: The XML namespace of I{tval}.
@type ns: (prefix, uri)
@return: The specified node.
@rtype: L{sax.element.Element}
"""
xta = ':'.join((NS.xsins[0], 'type'))
node.addPrefix(NS.xsins[0], NS.xsins[1])
if ns is None:
node.set(xta, tval)
else:
ns = cls.genprefix(node, ns)
qname = ':'.join((ns[0], tval))
node.set(xta, qname)
node.addPrefix(ns[0], ns[1])
return node
@classmethod
def genprefix(cls, node, ns):
"""
Generate a prefix.
@param node: An XML node on which the prefix will be used.
@type node: L{sax.element.Element}
@param ns: A namespace needing an unique prefix.
@type ns: (prefix, uri)
@return: The I{ns} with a new prefix.
"""
for n in range(1, 1024):
p = 'ns%d' % n
u = node.resolvePrefix(p, default=None)
if u is None or u == ns[1]:
return (p, ns[1])
raise Exception('auto prefix, exhausted')
@classmethod
def known(cls, object):
try:
md = object.__metadata__
known = md.sxtype
return known
except:
pass
| 4,116
|
Python
|
.py
| 112
| 28.982143
| 83
| 0.60393
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,359
|
basic.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/basic.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides basic I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.core import Core
log = getLogger(__name__)
class Basic(Core):
"""
A I{basic} (untyped) marshaller.
"""
def process(self, value, tag=None):
"""
Process (marshal) the tag with the specified value using the
optional type information.
@param value: The value (content) of the XML node.
@type value: (L{Object}|any)
@param tag: The (optional) tag name for the value. The default is
value.__class__.__name__
@type tag: str
@return: An xml node.
@rtype: L{Element}
"""
content = Content(tag=tag, value=value)
result = Core.process(self, content)
return result
| 1,657
|
Python
|
.py
| 42
| 35.071429
| 76
| 0.698007
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,360
|
literal.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/literal.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides literal I{marshaller} classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.core import Core
from suds.mx.typer import Typer
from suds.resolver import GraphResolver, Frame
from suds.sax.element import Element
from suds.sudsobject import Factory
log = getLogger(__name__)
#
# Add typed extensions
# type = The expected xsd type
# real = The 'true' XSD type
# ancestry = The 'type' ancestry
#
Content.extensions.append('type')
Content.extensions.append('real')
Content.extensions.append('ancestry')
class Typed(Core):
"""
A I{typed} marshaller.
This marshaller is semi-typed as needed to support both
I{document/literal} and I{rpc/literal} soap message styles.
@ivar schema: An xsd schema.
@type schema: L{xsd.schema.Schema}
@ivar resolver: A schema type resolver.
@type resolver: L{GraphResolver}
"""
def __init__(self, schema, xstq=True):
"""
@param schema: A schema object
@type schema: L{xsd.schema.Schema}
@param xstq: The B{x}ml B{s}chema B{t}ype B{q}ualified flag indicates
that the I{xsi:type} attribute values should be qualified by namespace.
@type xstq: bool
"""
Core.__init__(self)
self.schema = schema
self.xstq = xstq
self.resolver = GraphResolver(self.schema)
def reset(self):
self.resolver.reset()
def start(self, content):
#
# Start marshalling the 'content' by ensuring that both the
# 'content' _and_ the resolver are primed with the XSD type
# information. The 'content' value is both translated and
# sorted based on the XSD type. Only values that are objects
# have their attributes sorted.
#
log.debug('starting content:\n%s', content)
if content.type is None:
name = content.tag
if name.startswith('_'):
name = '@'+name[1:]
content.type = self.resolver.find(name, content.value)
if content.type is None:
raise TypeNotFound(content.tag)
else:
known = None
if isinstance(content.value, Object):
known = self.resolver.known(content.value)
if known is None:
log.debug('object has no type information', content.value)
known = content.type
frame = Frame(content.type, resolved=known)
self.resolver.push(frame)
frame = self.resolver.top()
content.real = frame.resolved
content.ancestry = frame.ancestry
self.translate(content)
self.sort(content)
if self.skip(content):
log.debug('skipping (optional) content:\n%s', content)
self.resolver.pop()
return False
else:
return True
def suspend(self, content):
#
# Suspend to process a list content. Primarily, this
# involves popping the 'list' content off the resolver's
# stack so the list items can be marshalled.
#
self.resolver.pop()
def resume(self, content):
#
# Resume processing a list content. To do this, we
# really need to simply push the 'list' content
# back onto the resolver stack.
#
self.resolver.push(Frame(content.type))
def end(self, parent, content):
#
# End processing the content. Make sure the content
# ending matches the top of the resolver stack since for
# list processing we play games with the resolver stack.
#
log.debug('ending content:\n%s', content)
current = self.resolver.top().type
if current == content.type:
self.resolver.pop()
else:
raise Exception, \
'content (end) mismatch: top=(%s) cont=(%s)' % \
(current, content)
def node(self, content):
#
# Create an XML node and namespace qualify as defined
# by the schema (elementFormDefault).
#
ns = content.type.namespace()
if content.type.form_qualified:
node = Element(content.tag, ns=ns)
node.addPrefix(ns[0], ns[1])
else:
node = Element(content.tag)
self.encode(node, content)
log.debug('created - node:\n%s', node)
return node
def setnil(self, node, content):
#
# Set the 'node' nil only if the XSD type
# specifies that it is permitted.
#
if content.type.nillable:
node.setnil()
def setdefault(self, node, content):
#
# Set the node to the default value specified
# by the XSD type.
#
default = content.type.default
if default is None:
pass
else:
node.setText(default)
return default
def optional(self, content):
if content.type.optional():
return True
for a in content.ancestry:
if a.optional():
return True
return False
def encode(self, node, content):
# Add (soap) encoding information only if the resolved
# type is derived by extension. Further, the xsi:type values
# is qualified by namespace only if the content (tag) and
# referenced type are in different namespaces.
if content.type.any():
return
if not content.real.extension():
return
if content.type.resolve() == content.real:
return
ns = None
name = content.real.name
if self.xstq:
ns = content.real.namespace('ns1')
Typer.manual(node, name, ns)
def skip(self, content):
"""
Get whether to skip this I{content}.
Should be skipped when the content is optional
and either the value=None or the value is an empty list.
@param content: The content to skip.
@type content: L{Object}
@return: True if content is to be skipped.
@rtype: bool
"""
if self.optional(content):
v = content.value
if v is None:
return True
if isinstance(v, (list,tuple)) and len(v) == 0:
return True
return False
def optional(self, content):
if content.type.optional():
return True
for a in content.ancestry:
if a.optional():
return True
return False
def translate(self, content):
"""
Translate using the XSD type information.
Python I{dict} is translated to a suds object. Most
importantly, primative values are translated from python
types to XML types using the XSD type.
@param content: The content to translate.
@type content: L{Object}
@return: self
@rtype: L{Typed}
"""
v = content.value
if v is None:
return
if isinstance(v, dict):
cls = content.real.name
content.value = Factory.object(cls, v)
md = content.value.__metadata__
md.sxtype = content.type
return
v = content.real.translate(v, False)
content.value = v
return self
def sort(self, content):
"""
Sort suds object attributes based on ordering defined
in the XSD type information.
@param content: The content to sort.
@type content: L{Object}
@return: self
@rtype: L{Typed}
"""
v = content.value
if isinstance(v, Object):
md = v.__metadata__
md.ordering = self.ordering(content.real)
return self
def ordering(self, type):
"""
Get the attribute ordering defined in the specified
XSD type information.
@param type: An XSD type object.
@type type: SchemaObject
@return: An ordered list of attribute names.
@rtype: list
"""
result = []
for child, ancestry in type.resolve():
name = child.name
if child.name is None:
continue
if child.isattr():
name = '_%s' % child.name
result.append(name)
return result
class Literal(Typed):
"""
A I{literal} marshaller.
This marshaller is semi-typed as needed to support both
I{document/literal} and I{rpc/literal} soap message styles.
"""
pass
| 9,517
|
Python
|
.py
| 265
| 26.996226
| 83
| 0.606256
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,361
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides modules containing classes to support
marshalling (XML).
"""
from suds.sudsobject import Object
class Content(Object):
"""
Marshaller Content.
@ivar tag: The content tag.
@type tag: str
@ivar value: The content's value.
@type value: I{any}
"""
extensions = []
def __init__(self, tag=None, value=None, **kwargs):
"""
@param tag: The content tag.
@type tag: str
@param value: The content's value.
@type value: I{any}
"""
Object.__init__(self)
self.tag = tag
self.value = value
for k,v in kwargs.items():
setattr(self, k, v)
def __getattr__(self, name):
if name not in self.__dict__:
if name in self.extensions:
v = None
setattr(self, name, v)
else:
raise AttributeError, \
'Content has no attribute %s' % name
else:
v = self.__dict__[name]
return v
| 1,878
|
Python
|
.py
| 52
| 29.596154
| 76
| 0.641111
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,362
|
appender.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/appender.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides appender classes for I{marshalling}.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.sudsobject import footprint
from suds.sudsobject import Object, Property
from suds.sax.element import Element
from suds.sax.text import Text
from copy import deepcopy
log = getLogger(__name__)
class Matcher:
"""
Appender matcher.
@ivar cls: A class object.
@type cls: I{classobj}
"""
def __init__(self, cls):
"""
@param cls: A class object.
@type cls: I{classobj}
"""
self.cls = cls
def __eq__(self, x):
if self.cls is None:
return ( x is None )
else:
return isinstance(x, self.cls)
class ContentAppender:
"""
Appender used to add content to marshalled objects.
@ivar default: The default appender.
@type default: L{Appender}
@ivar appenders: A I{table} of appenders mapped by class.
@type appenders: I{table}
"""
def __init__(self, marshaller):
"""
@param marshaller: A marshaller.
@type marshaller: L{suds.mx.core.Core}
"""
self.default = PrimativeAppender(marshaller)
self.appenders = (
(Matcher(None),
NoneAppender(marshaller)),
(Matcher(null),
NoneAppender(marshaller)),
(Matcher(Property),
PropertyAppender(marshaller)),
(Matcher(Object),
ObjectAppender(marshaller)),
(Matcher(Element),
ElementAppender(marshaller)),
(Matcher(Text),
TextAppender(marshaller)),
(Matcher(list),
ListAppender(marshaller)),
(Matcher(tuple),
ListAppender(marshaller)),
(Matcher(dict),
DictAppender(marshaller)),
)
def append(self, parent, content):
"""
Select an appender and append the content to parent.
@param parent: A parent node.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Content}
"""
appender = self.default
for a in self.appenders:
if a[0] == content.value:
appender = a[1]
break
appender.append(parent, content)
class Appender:
"""
An appender used by the marshaller to append content.
@ivar marshaller: A marshaller.
@type marshaller: L{suds.mx.core.Core}
"""
def __init__(self, marshaller):
"""
@param marshaller: A marshaller.
@type marshaller: L{suds.mx.core.Core}
"""
self.marshaller = marshaller
def node(self, content):
"""
Create and return an XML node that is qualified
using the I{type}. Also, make sure all referenced namespace
prefixes are declared.
@param content: The content for which proccessing has ended.
@type content: L{Object}
@return: A new node.
@rtype: L{Element}
"""
return self.marshaller.node(content)
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content for which proccessing has ended.
@type content: L{Object}
"""
self.marshaller.setnil(node, content)
def setdefault(self, node, content):
"""
Set the value of the I{node} to a default value.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content for which proccessing has ended.
@type content: L{Object}
@return: The default.
"""
return self.marshaller.setdefault(node, content)
def optional(self, content):
"""
Get whether the specified content is optional.
@param content: The content which to check.
@type content: L{Content}
"""
return self.marshaller.optional(content)
def suspend(self, content):
"""
Notify I{marshaller} that appending this content has suspended.
@param content: The content for which proccessing has been suspended.
@type content: L{Object}
"""
self.marshaller.suspend(content)
def resume(self, content):
"""
Notify I{marshaller} that appending this content has resumed.
@param content: The content for which proccessing has been resumed.
@type content: L{Object}
"""
self.marshaller.resume(content)
def append(self, parent, content):
"""
Append the specified L{content} to the I{parent}.
@param content: The content to append.
@type content: L{Object}
"""
self.marshaller.append(parent, content)
class PrimativeAppender(Appender):
"""
An appender for python I{primative} types.
"""
def append(self, parent, content):
if content.tag.startswith('_'):
attr = content.tag[1:]
value = tostr(content.value)
if value:
parent.set(attr, value)
else:
child = self.node(content)
child.setText(tostr(content.value))
parent.append(child)
class NoneAppender(Appender):
"""
An appender for I{None} values.
"""
def append(self, parent, content):
child = self.node(content)
default = self.setdefault(child, content)
if default is None:
self.setnil(child, content)
parent.append(child)
class PropertyAppender(Appender):
"""
A L{Property} appender.
"""
def append(self, parent, content):
p = content.value
child = self.node(content)
child.setText(p.get())
parent.append(child)
for item in p.items():
cont = Content(tag=item[0], value=item[1])
Appender.append(self, child, cont)
class ObjectAppender(Appender):
"""
An L{Object} appender.
"""
def append(self, parent, content):
object = content.value
if self.optional(content) and footprint(object) == 0:
return
child = self.node(content)
parent.append(child)
for item in object:
cont = Content(tag=item[0], value=item[1])
Appender.append(self, child, cont)
class DictAppender(Appender):
"""
An python I{dict} appender.
"""
def append(self, parent, content):
d = content.value
if self.optional(content) and len(d) == 0:
return
child = self.node(content)
parent.append(child)
for item in d.items():
cont = Content(tag=item[0], value=item[1])
Appender.append(self, child, cont)
class ElementWrapper(Element):
"""
Element wrapper.
"""
def __init__(self, content):
Element.__init__(self, content.name, content.parent)
self.__content = content
def str(self, indent=0):
return self.__content.str(indent)
class ElementAppender(Appender):
"""
An appender for I{Element} types.
"""
def append(self, parent, content):
if content.tag.startswith('_'):
raise Exception('raw XML not valid as attribute value')
child = ElementWrapper(content.value)
parent.append(child)
class ListAppender(Appender):
"""
A list/tuple appender.
"""
def append(self, parent, content):
collection = content.value
if len(collection):
self.suspend(content)
for item in collection:
cont = Content(tag=content.tag, value=item)
Appender.append(self, parent, cont)
self.resume(content)
class TextAppender(Appender):
"""
An appender for I{Text} values.
"""
def append(self, parent, content):
if content.tag.startswith('_'):
attr = content.tag[1:]
value = tostr(content.value)
if value:
parent.set(attr, value)
else:
child = self.node(content)
child.setText(content.value)
parent.append(child)
| 9,252
|
Python
|
.py
| 268
| 25.906716
| 77
| 0.610017
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,363
|
core.py
|
CouchPotato_CouchPotatoServer/libs/suds/mx/core.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Provides I{marshaller} core classes.
"""
from logging import getLogger
from suds import *
from suds.mx import *
from suds.mx.appender import ContentAppender
from suds.sax.element import Element
from suds.sax.document import Document
from suds.sudsobject import Property
log = getLogger(__name__)
class Core:
"""
An I{abstract} marshaller. This class implement the core
functionality of the marshaller.
@ivar appender: A content appender.
@type appender: L{ContentAppender}
"""
def __init__(self):
"""
"""
self.appender = ContentAppender(self)
def process(self, content):
"""
Process (marshal) the tag with the specified value using the
optional type information.
@param content: The content to process.
@type content: L{Object}
"""
log.debug('processing:\n%s', content)
self.reset()
if content.tag is None:
content.tag = content.value.__class__.__name__
document = Document()
if isinstance(content.value, Property):
root = self.node(content)
self.append(document, content)
else:
self.append(document, content)
return document.root()
def append(self, parent, content):
"""
Append the specified L{content} to the I{parent}.
@param parent: The parent node to append to.
@type parent: L{Element}
@param content: The content to append.
@type content: L{Object}
"""
log.debug('appending parent:\n%s\ncontent:\n%s', parent, content)
if self.start(content):
self.appender.append(parent, content)
self.end(parent, content)
def reset(self):
"""
Reset the marshaller.
"""
pass
def node(self, content):
"""
Create and return an XML node.
@param content: The content for which proccessing has been suspended.
@type content: L{Object}
@return: An element.
@rtype: L{Element}
"""
return Element(content.tag)
def start(self, content):
"""
Appending this content has started.
@param content: The content for which proccessing has started.
@type content: L{Content}
@return: True to continue appending
@rtype: boolean
"""
return True
def suspend(self, content):
"""
Appending this content has suspended.
@param content: The content for which proccessing has been suspended.
@type content: L{Content}
"""
pass
def resume(self, content):
"""
Appending this content has resumed.
@param content: The content for which proccessing has been resumed.
@type content: L{Content}
"""
pass
def end(self, parent, content):
"""
Appending this content has ended.
@param parent: The parent node ending.
@type parent: L{Element}
@param content: The content for which proccessing has ended.
@type content: L{Content}
"""
pass
def setnil(self, node, content):
"""
Set the value of the I{node} to nill.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set nil.
@type content: L{Content}
"""
pass
def setdefault(self, node, content):
"""
Set the value of the I{node} to a default value.
@param node: A I{nil} node.
@type node: L{Element}
@param content: The content to set the default value.
@type content: L{Content}
@return: The default.
"""
pass
def optional(self, content):
"""
Get whether the specified content is optional.
@param content: The content which to check.
@type content: L{Content}
"""
return False
| 4,839
|
Python
|
.py
| 139
| 27.395683
| 77
| 0.631737
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,364
|
https.py
|
CouchPotato_CouchPotatoServer/libs/suds/transport/https.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for basic HTTP (authenticated) transport implementations.
"""
import urllib2 as u2
from suds.transport import *
from suds.transport.http import HttpTransport
from logging import getLogger
log = getLogger(__name__)
class HttpAuthenticated(HttpTransport):
"""
Provides basic http authentication that follows the RFC-2617 specification.
As defined by specifications, credentials are provided to the server
upon request (HTTP/1.0 401 Authorization Required) by the server only.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
HttpTransport.__init__(self, **kwargs)
self.pm = u2.HTTPPasswordMgrWithDefaultRealm()
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
u = credentials[0]
p = credentials[1]
self.pm.add_password(None, request.url, u, p)
def credentials(self):
return (self.options.username, self.options.password)
def u2handlers(self):
handlers = HttpTransport.u2handlers(self)
handlers.append(u2.HTTPBasicAuthHandler(self.pm))
return handlers
class WindowsHttpAuthenticated(HttpAuthenticated):
"""
Provides Windows (NTLM) http authentication.
@ivar pm: The password manager.
@ivar handler: The authentication handler.
@author: Christopher Bess
"""
def u2handlers(self):
# try to import ntlm support
try:
from ntlm import HTTPNtlmAuthHandler
except ImportError:
raise Exception("Cannot import python-ntlm module")
handlers = HttpTransport.u2handlers(self)
handlers.append(HTTPNtlmAuthHandler.HTTPNtlmAuthHandler(self.pm))
return handlers
| 3,634
|
Python
|
.py
| 84
| 35.047619
| 79
| 0.668099
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,365
|
options.py
|
CouchPotato_CouchPotatoServer/libs/suds/transport/options.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for transport options.
"""
from suds.transport import *
from suds.properties import *
class Options(Skin):
"""
Options:
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
- B{headers} - Extra HTTP headers.
- type: I{dict}
- I{str} B{http} - The I{http} protocol proxy URL.
- I{str} B{https} - The I{https} protocol proxy URL.
- default: {}
- B{username} - The username used for http authentication.
- type: I{str}
- default: None
- B{password} - The password used for http authentication.
- type: I{str}
- default: None
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('proxy', dict, {}),
Definition('timeout', (int,float), 90),
Definition('headers', dict, {}),
Definition('username', basestring, None),
Definition('password', basestring, None),
]
Skin.__init__(self, domain, definitions, kwargs)
| 2,211
|
Python
|
.py
| 52
| 34.346154
| 76
| 0.611989
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,366
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/suds/transport/__init__.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains transport interface (classes).
"""
class TransportError(Exception):
def __init__(self, reason, httpcode, fp=None):
Exception.__init__(self, reason)
self.httpcode = httpcode
self.fp = fp
class Request:
"""
A transport request
@ivar url: The url for the request.
@type url: str
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, url, message=None):
"""
@param url: The url for the request.
@type url: str
@param message: The (optional) message to be send in the request.
@type message: str
"""
self.url = url
self.headers = {}
self.message = message
def __str__(self):
s = []
s.append('URL:%s' % self.url)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Reply:
"""
A transport reply
@ivar code: The http code returned.
@type code: int
@ivar message: The message to be sent in a POST request.
@type message: str
@ivar headers: The http headers to be used for the request.
@type headers: dict
"""
def __init__(self, code, headers, message):
"""
@param code: The http code returned.
@type code: int
@param headers: The http returned headers.
@type headers: dict
@param message: The (optional) reply message received.
@type message: str
"""
self.code = code
self.headers = headers
self.message = message
def __str__(self):
s = []
s.append('CODE: %s' % self.code)
s.append('HEADERS: %s' % self.headers)
s.append('MESSAGE:')
s.append(self.message)
return '\n'.join(s)
class Transport:
"""
The transport I{interface}.
"""
def __init__(self):
"""
Constructor.
"""
from suds.transport.options import Options
self.options = Options()
del Options
def open(self, request):
"""
Open the url in the specified request.
@param request: A transport request.
@type request: L{Request}
@return: An input stream.
@rtype: stream
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
def send(self, request):
"""
Send soap message. Implementations are expected to handle:
- proxies
- I{http} headers
- cookies
- sending message
- brokering exceptions into L{TransportError}
@param request: A transport request.
@type request: L{Request}
@return: The reply
@rtype: L{Reply}
@raise TransportError: On all transport errors.
"""
raise Exception('not-implemented')
| 3,895
|
Python
|
.py
| 115
| 26.852174
| 76
| 0.622692
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,367
|
http.py
|
CouchPotato_CouchPotatoServer/libs/suds/transport/http.py
|
# This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Contains classes for basic HTTP transport implementations.
"""
import urllib2 as u2
import base64
import socket
from suds.transport import *
from suds.properties import Unskin
from urlparse import urlparse
from cookielib import CookieJar
from logging import getLogger
log = getLogger(__name__)
class HttpTransport(Transport):
"""
HTTP transport using urllib2. Provided basic http transport
that provides for cookies, proxies but no authentication.
"""
def __init__(self, **kwargs):
"""
@param kwargs: Keyword arguments.
- B{proxy} - An http proxy to be specified on requests.
The proxy is defined as {protocol:proxy,}
- type: I{dict}
- default: {}
- B{timeout} - Set the url open timeout (seconds).
- type: I{float}
- default: 90
"""
Transport.__init__(self)
Unskin(self.options).update(kwargs)
self.cookiejar = CookieJar()
self.proxy = {}
self.urlopener = None
def open(self, request):
try:
url = request.url
log.debug('opening (%s)', url)
u2request = u2.Request(url)
self.proxy = self.options.proxy
return self.u2open(u2request)
except u2.HTTPError, e:
raise TransportError(str(e), e.code, e.fp)
def send(self, request):
result = None
url = request.url
msg = request.message
headers = request.headers
try:
u2request = u2.Request(url, msg, headers)
self.addcookies(u2request)
self.proxy = self.options.proxy
request.headers.update(u2request.headers)
log.debug('sending:\n%s', request)
fp = self.u2open(u2request)
self.getcookies(fp, u2request)
result = Reply(200, fp.headers.dict, fp.read())
log.debug('received:\n%s', result)
except u2.HTTPError, e:
if e.code in (202,204):
result = None
else:
raise TransportError(e.msg, e.code, e.fp)
return result
def addcookies(self, u2request):
"""
Add cookies in the cookiejar to the request.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Requet.
"""
self.cookiejar.add_cookie_header(u2request)
def getcookies(self, fp, u2request):
"""
Add cookies in the request to the cookiejar.
@param u2request: A urllib2 request.
@rtype: u2request: urllib2.Requet.
"""
self.cookiejar.extract_cookies(fp, u2request)
def u2open(self, u2request):
"""
Open a connection.
@param u2request: A urllib2 request.
@type u2request: urllib2.Requet.
@return: The opened file-like urllib2 object.
@rtype: fp
"""
tm = self.options.timeout
url = self.u2opener()
if self.u2ver() < 2.6:
socket.setdefaulttimeout(tm)
return url.open(u2request)
else:
return url.open(u2request, timeout=tm)
def u2opener(self):
"""
Create a urllib opener.
@return: An opener.
@rtype: I{OpenerDirector}
"""
if self.urlopener is None:
return u2.build_opener(*self.u2handlers())
else:
return self.urlopener
def u2handlers(self):
"""
Get a collection of urllib handlers.
@return: A list of handlers to be installed in the opener.
@rtype: [Handler,...]
"""
handlers = []
handlers.append(u2.ProxyHandler(self.proxy))
return handlers
def u2ver(self):
"""
Get the major/minor version of the urllib2 lib.
@return: The urllib2 version.
@rtype: float
"""
try:
part = u2.__version__.split('.', 1)
n = float('.'.join(part))
return n
except Exception, e:
log.exception(e)
return 0
def __deepcopy__(self, memo={}):
clone = self.__class__()
p = Unskin(self.options)
cp = Unskin(clone.options)
cp.update(p)
return clone
class HttpAuthenticated(HttpTransport):
"""
Provides basic http authentication for servers that don't follow
the specified challenge / response model. This implementation
appends the I{Authorization} http header with base64 encoded
credentials on every http request.
"""
def open(self, request):
self.addcredentials(request)
return HttpTransport.open(self, request)
def send(self, request):
self.addcredentials(request)
return HttpTransport.send(self, request)
def addcredentials(self, request):
credentials = self.credentials()
if not (None in credentials):
encoded = base64.encodestring(':'.join(credentials))
basic = 'Basic %s' % encoded[:-1]
request.headers['Authorization'] = basic
def credentials(self):
return (self.options.username, self.options.password)
| 6,132
|
Python
|
.py
| 166
| 27.993976
| 76
| 0.617029
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,368
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/logr/__init__.py
|
# logr - Simple python logging wrapper
# Packed by Dean Gardiner <gardiner91@gmail.com>
#
# File part of:
# rdio-sock - Rdio WebSocket Library
# Copyright (C) 2013 fzza- <fzzzzzzzza@gmail.com>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import inspect
import logging
import os
import sys
IGNORE = ()
PY3 = sys.version_info[0] == 3
class Logr(object):
loggers = {}
handler = None
trace_origin = False
name = "Logr"
@staticmethod
def configure(level=logging.WARNING, handler=None, formatter=None, trace_origin=False, name="Logr"):
"""Configure Logr
@param handler: Logger message handler
@type handler: logging.Handler or None
@param formatter: Logger message Formatter
@type formatter: logging.Formatter or None
"""
if formatter is None:
formatter = LogrFormatter()
if handler is None:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
handler.setLevel(level)
Logr.handler = handler
Logr.trace_origin = trace_origin
Logr.name = name
@staticmethod
def configure_check():
if Logr.handler is None:
Logr.configure()
@staticmethod
def _get_name_from_path(filename):
try:
return os.path.splitext(os.path.basename(filename))[0]
except TypeError:
return "<unknown>"
@staticmethod
def get_frame_class(frame):
if len(frame.f_code.co_varnames) <= 0:
return None
farg = frame.f_code.co_varnames[0]
if farg not in frame.f_locals:
return None
if farg == 'self':
return frame.f_locals[farg].__class__
if farg == 'cls':
return frame.f_locals[farg]
return None
@staticmethod
def get_logger_name():
if not Logr.trace_origin:
return Logr.name
stack = inspect.stack()
for x in xrange_six(len(stack)):
frame = stack[x][0]
name = None
# Try find name of function defined inside a class
frame_class = Logr.get_frame_class(frame)
if frame_class:
class_name = frame_class.__name__
module_name = frame_class.__module__
if module_name != '__main__':
name = module_name + '.' + class_name
else:
name = class_name
# Try find name of function defined outside of a class
if name is None:
if frame.f_code.co_name in frame.f_globals:
name = frame.f_globals.get('__name__')
if name == '__main__':
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
name = name
elif frame.f_code.co_name == '<module>':
name = Logr._get_name_from_path(frame.f_globals.get('__file__'))
if name is not None and name not in IGNORE:
return name
return ""
@staticmethod
def get_logger():
"""Get or create logger (if it does not exist)
@rtype: RootLogger
"""
name = Logr.get_logger_name()
if name not in Logr.loggers:
Logr.configure_check()
Logr.loggers[name] = logging.Logger(name)
Logr.loggers[name].addHandler(Logr.handler)
return Logr.loggers[name]
@staticmethod
def debug(msg, *args, **kwargs):
Logr.get_logger().debug(msg, *args, **kwargs)
@staticmethod
def info(msg, *args, **kwargs):
Logr.get_logger().info(msg, *args, **kwargs)
@staticmethod
def warning(msg, *args, **kwargs):
Logr.get_logger().warning(msg, *args, **kwargs)
warn = warning
@staticmethod
def error(msg, *args, **kwargs):
Logr.get_logger().error(msg, *args, **kwargs)
@staticmethod
def exception(msg, *args, **kwargs):
Logr.get_logger().exception(msg, *args, **kwargs)
@staticmethod
def critical(msg, *args, **kwargs):
Logr.get_logger().critical(msg, *args, **kwargs)
fatal = critical
@staticmethod
def log(level, msg, *args, **kwargs):
Logr.get_logger().log(level, msg, *args, **kwargs)
class LogrFormatter(logging.Formatter):
LENGTH_NAME = 32
LENGTH_LEVEL_NAME = 5
def __init__(self, fmt=None, datefmt=None):
if sys.version_info[:2] > (2,6):
super(LogrFormatter, self).__init__(fmt, datefmt)
else:
logging.Formatter.__init__(self, fmt, datefmt)
def usesTime(self):
return True
def format(self, record):
record.message = record.getMessage()
if self.usesTime():
record.asctime = self.formatTime(record, self.datefmt)
s = "%(asctime)s %(name)s %(levelname)s %(message)s" % {
'asctime': record.asctime,
'name': record.name[-self.LENGTH_NAME:].rjust(self.LENGTH_NAME, ' '),
'levelname': record.levelname[:self.LENGTH_LEVEL_NAME].ljust(self.LENGTH_LEVEL_NAME, ' '),
'message': record.message
}
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
if s[-1:] != "\n":
s += "\n"
try:
s += record.exc_text
except UnicodeError:
s = s + record.exc_text.decode(sys.getfilesystemencoding(),
'replace')
return s
def xrange_six(start, stop=None, step=None):
if stop is not None and step is not None:
if PY3:
return range(start, stop, step)
else:
return xrange(start, stop, step)
else:
if PY3:
return range(start)
else:
return xrange(start)
| 6,565
|
Python
|
.py
| 172
| 28.843023
| 104
| 0.589117
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,369
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/pytwitter/__init__.py
|
#!/usr/bin/env python
#
# vim: sw=2 ts=2 sts=2
#
# Copyright 2007 The Python-Twitter Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''A library that provides a Python interface to the Twitter API'''
__author__ = 'python-twitter@googlegroups.com'
__version__ = '1.0.1'
import calendar
import datetime
import httplib
import os
import rfc822
import sys
import tempfile
import textwrap
import time
import urllib
import urllib2
import urlparse
import gzip
import StringIO
try:
# Python >= 2.6
import json as simplejson
except ImportError:
try:
# Python < 2.6
import simplejson
except ImportError:
try:
# Google App Engine
from django.utils import simplejson
except ImportError:
raise ImportError, "Unable to load a json library"
# parse_qsl moved to urlparse module in v2.6
try:
from urlparse import parse_qsl, parse_qs
except ImportError:
from cgi import parse_qsl, parse_qs
try:
from hashlib import md5
except ImportError:
from md5 import md5
import oauth2 as oauth
CHARACTER_LIMIT = 140
# A singleton representing a lazily instantiated FileCache.
DEFAULT_CACHE = object()
REQUEST_TOKEN_URL = 'https://api.twitter.com/oauth/request_token'
ACCESS_TOKEN_URL = 'https://api.twitter.com/oauth/access_token'
AUTHORIZATION_URL = 'https://api.twitter.com/oauth/authorize'
SIGNIN_URL = 'https://api.twitter.com/oauth/authenticate'
class TwitterError(Exception):
'''Base class for Twitter errors'''
@property
def message(self):
'''Returns the first argument used to construct this error.'''
return self.args[0]
class Status(object):
'''A class representing the Status structure used by the twitter API.
The Status structure exposes the following properties:
status.created_at
status.created_at_in_seconds # read only
status.favorited
status.favorite_count
status.in_reply_to_screen_name
status.in_reply_to_user_id
status.in_reply_to_status_id
status.truncated
status.source
status.id
status.text
status.location
status.relative_created_at # read only
status.user
status.urls
status.user_mentions
status.hashtags
status.geo
status.place
status.coordinates
status.contributors
'''
def __init__(self,
created_at = None,
favorited = None,
favorite_count = None,
id = None,
text = None,
location = None,
user = None,
in_reply_to_screen_name = None,
in_reply_to_user_id = None,
in_reply_to_status_id = None,
truncated = None,
source = None,
now = None,
urls = None,
user_mentions = None,
hashtags = None,
media = None,
geo = None,
place = None,
coordinates = None,
contributors = None,
retweeted = None,
retweeted_status = None,
current_user_retweet = None,
retweet_count = None,
possibly_sensitive = None,
scopes = None,
withheld_copyright = None,
withheld_in_countries = None,
withheld_scope = None):
'''An object to hold a Twitter status message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
created_at:
The time this status message was posted. [Optional]
favorited:
Whether this is a favorite of the authenticated user. [Optional]
favorite_count:
Number of times this status message has been favorited. [Optional]
id:
The unique id of this status message. [Optional]
text:
The text of this status message. [Optional]
location:
the geolocation string associated with this message. [Optional]
relative_created_at:
A human readable string representing the posting time. [Optional]
user:
A twitter.User instance representing the person posting the
message. [Optional]
now:
The current time, if the client chooses to set it.
Defaults to the wall clock time. [Optional]
urls:
user_mentions:
hashtags:
geo:
place:
coordinates:
contributors:
retweeted:
retweeted_status:
current_user_retweet:
retweet_count:
possibly_sensitive:
scopes:
withheld_copyright:
withheld_in_countries:
withheld_scope:
'''
self.created_at = created_at
self.favorited = favorited
self.favorite_count = favorite_count
self.id = id
self.text = text
self.location = location
self.user = user
self.now = now
self.in_reply_to_screen_name = in_reply_to_screen_name
self.in_reply_to_user_id = in_reply_to_user_id
self.in_reply_to_status_id = in_reply_to_status_id
self.truncated = truncated
self.retweeted = retweeted
self.source = source
self.urls = urls
self.user_mentions = user_mentions
self.hashtags = hashtags
self.media = media
self.geo = geo
self.place = place
self.coordinates = coordinates
self.contributors = contributors
self.retweeted_status = retweeted_status
self.current_user_retweet = current_user_retweet
self.retweet_count = retweet_count
self.possibly_sensitive = possibly_sensitive
self.scopes = scopes
self.withheld_copyright = withheld_copyright
self.withheld_in_countries = withheld_in_countries
self.withheld_scope = withheld_scope
def GetCreatedAt(self):
'''Get the time this status message was posted.
Returns:
The time this status message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this status message was posted.
Args:
created_at:
The time this status message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc = 'The time this status message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this status message was posted, in seconds since the epoch.
Returns:
The time this status message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc = "The time this status message was "
"posted, in seconds since the epoch")
def GetFavorited(self):
'''Get the favorited setting of this status message.
Returns:
True if this status message is favorited; False otherwise
'''
return self._favorited
def SetFavorited(self, favorited):
'''Set the favorited state of this status message.
Args:
favorited:
boolean True/False favorited state of this status message
'''
self._favorited = favorited
favorited = property(GetFavorited, SetFavorited,
doc = 'The favorited state of this status message.')
def GetFavoriteCount(self):
'''Get the favorite count of this status message.
Returns:
number of times this status message has been favorited
'''
return self._favorite_count
def SetFavoriteCount(self, favorite_count):
'''Set the favorited state of this status message.
Args:
favorite_count:
int number of favorites for this status message
'''
self._favorite_count = favorite_count
favorite_count = property(GetFavoriteCount, SetFavoriteCount,
doc = 'The number of favorites for this status message.')
def GetId(self):
'''Get the unique id of this status message.
Returns:
The unique id of this status message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this status message.
Args:
id:
The unique id of this status message
'''
self._id = id
id = property(GetId, SetId,
doc = 'The unique id of this status message.')
def GetInReplyToScreenName(self):
return self._in_reply_to_screen_name
def SetInReplyToScreenName(self, in_reply_to_screen_name):
self._in_reply_to_screen_name = in_reply_to_screen_name
in_reply_to_screen_name = property(GetInReplyToScreenName, SetInReplyToScreenName,
doc = '')
def GetInReplyToUserId(self):
return self._in_reply_to_user_id
def SetInReplyToUserId(self, in_reply_to_user_id):
self._in_reply_to_user_id = in_reply_to_user_id
in_reply_to_user_id = property(GetInReplyToUserId, SetInReplyToUserId,
doc = '')
def GetInReplyToStatusId(self):
return self._in_reply_to_status_id
def SetInReplyToStatusId(self, in_reply_to_status_id):
self._in_reply_to_status_id = in_reply_to_status_id
in_reply_to_status_id = property(GetInReplyToStatusId, SetInReplyToStatusId,
doc = '')
def GetTruncated(self):
return self._truncated
def SetTruncated(self, truncated):
self._truncated = truncated
truncated = property(GetTruncated, SetTruncated,
doc = '')
def GetRetweeted(self):
return self._retweeted
def SetRetweeted(self, retweeted):
self._retweeted = retweeted
retweeted = property(GetRetweeted, SetRetweeted,
doc = '')
def GetSource(self):
return self._source
def SetSource(self, source):
self._source = source
source = property(GetSource, SetSource,
doc = '')
def GetText(self):
'''Get the text of this status message.
Returns:
The text of this status message.
'''
return self._text
def SetText(self, text):
'''Set the text of this status message.
Args:
text:
The text of this status message
'''
self._text = text
text = property(GetText, SetText,
doc = 'The text of this status message')
def GetLocation(self):
'''Get the geolocation associated with this status message
Returns:
The geolocation string of this status message.
'''
return self._location
def SetLocation(self, location):
'''Set the geolocation associated with this status message
Args:
location:
The geolocation string of this status message
'''
self._location = location
location = property(GetLocation, SetLocation,
doc = 'The geolocation string of this status message')
def GetRelativeCreatedAt(self):
'''Get a human readable string representing the posting time
Returns:
A human readable string representing the posting time
'''
fudge = 1.25
delta = long(self.now) - long(self.created_at_in_seconds)
if delta < (1 * fudge):
return 'about a second ago'
elif delta < (60 * (1 / fudge)):
return 'about %d seconds ago' % (delta)
elif delta < (60 * fudge):
return 'about a minute ago'
elif delta < (60 * 60 * (1 / fudge)):
return 'about %d minutes ago' % (delta / 60)
elif delta < (60 * 60 * fudge) or delta / (60 * 60) == 1:
return 'about an hour ago'
elif delta < (60 * 60 * 24 * (1 / fudge)):
return 'about %d hours ago' % (delta / (60 * 60))
elif delta < (60 * 60 * 24 * fudge) or delta / (60 * 60 * 24) == 1:
return 'about a day ago'
else:
return 'about %d days ago' % (delta / (60 * 60 * 24))
relative_created_at = property(GetRelativeCreatedAt,
doc = 'Get a human readable string representing '
'the posting time')
def GetUser(self):
'''Get a twitter.User representing the entity posting this status message.
Returns:
A twitter.User representing the entity posting this status message
'''
return self._user
def SetUser(self, user):
'''Set a twitter.User representing the entity posting this status message.
Args:
user:
A twitter.User representing the entity posting this status message
'''
self._user = user
user = property(GetUser, SetUser,
doc = 'A twitter.User representing the entity posting this '
'status message')
def GetNow(self):
'''Get the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Returns:
Whatever the status instance believes the current time to be,
in seconds since the epoch.
'''
if self._now is None:
self._now = time.time()
return self._now
def SetNow(self, now):
'''Set the wallclock time for this status message.
Used to calculate relative_created_at. Defaults to the time
the object was instantiated.
Args:
now:
The wallclock time for this instance.
'''
self._now = now
now = property(GetNow, SetNow,
doc = 'The wallclock time for this status instance.')
def GetGeo(self):
return self._geo
def SetGeo(self, geo):
self._geo = geo
geo = property(GetGeo, SetGeo,
doc = '')
def GetPlace(self):
return self._place
def SetPlace(self, place):
self._place = place
place = property(GetPlace, SetPlace,
doc = '')
def GetCoordinates(self):
return self._coordinates
def SetCoordinates(self, coordinates):
self._coordinates = coordinates
coordinates = property(GetCoordinates, SetCoordinates,
doc = '')
def GetContributors(self):
return self._contributors
def SetContributors(self, contributors):
self._contributors = contributors
contributors = property(GetContributors, SetContributors,
doc = '')
def GetRetweeted_status(self):
return self._retweeted_status
def SetRetweeted_status(self, retweeted_status):
self._retweeted_status = retweeted_status
retweeted_status = property(GetRetweeted_status, SetRetweeted_status,
doc = '')
def GetRetweetCount(self):
return self._retweet_count
def SetRetweetCount(self, retweet_count):
self._retweet_count = retweet_count
retweet_count = property(GetRetweetCount, SetRetweetCount,
doc = '')
def GetCurrent_user_retweet(self):
return self._current_user_retweet
def SetCurrent_user_retweet(self, current_user_retweet):
self._current_user_retweet = current_user_retweet
current_user_retweet = property(GetCurrent_user_retweet, SetCurrent_user_retweet,
doc = '')
def GetPossibly_sensitive(self):
return self._possibly_sensitive
def SetPossibly_sensitive(self, possibly_sensitive):
self._possibly_sensitive = possibly_sensitive
possibly_sensitive = property(GetPossibly_sensitive, SetPossibly_sensitive,
doc = '')
def GetScopes(self):
return self._scopes
def SetScopes(self, scopes):
self._scopes = scopes
scopes = property(GetScopes, SetScopes, doc = '')
def GetWithheld_copyright(self):
return self._withheld_copyright
def SetWithheld_copyright(self, withheld_copyright):
self._withheld_copyright = withheld_copyright
withheld_copyright = property(GetWithheld_copyright, SetWithheld_copyright,
doc = '')
def GetWithheld_in_countries(self):
return self._withheld_in_countries
def SetWithheld_in_countries(self, withheld_in_countries):
self._withheld_in_countries = withheld_in_countries
withheld_in_countries = property(GetWithheld_in_countries, SetWithheld_in_countries,
doc = '')
def GetWithheld_scope(self):
return self._withheld_scope
def SetWithheld_scope(self, withheld_scope):
self._withheld_scope = withheld_scope
withheld_scope = property(GetWithheld_scope, SetWithheld_scope,
doc = '')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.created_at == other.created_at and \
self.id == other.id and \
self.text == other.text and \
self.location == other.location and \
self.user == other.user and \
self.in_reply_to_screen_name == other.in_reply_to_screen_name and \
self.in_reply_to_user_id == other.in_reply_to_user_id and \
self.in_reply_to_status_id == other.in_reply_to_status_id and \
self.truncated == other.truncated and \
self.retweeted == other.retweeted and \
self.favorited == other.favorited and \
self.favorite_count == other.favorite_count and \
self.source == other.source and \
self.geo == other.geo and \
self.place == other.place and \
self.coordinates == other.coordinates and \
self.contributors == other.contributors and \
self.retweeted_status == other.retweeted_status and \
self.retweet_count == other.retweet_count and \
self.current_user_retweet == other.current_user_retweet and \
self.possibly_sensitive == other.possibly_sensitive and \
self.scopes == other.scopes and \
self.withheld_copyright == other.withheld_copyright and \
self.withheld_in_countries == other.withheld_in_countries and \
self.withheld_scope == other.withheld_scope
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.Status instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.Status instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.Status instance.
Returns:
A JSON string representation of this twitter.Status instance
'''
return simplejson.dumps(self.AsDict(), sort_keys = True)
def AsDict(self):
'''A dict representation of this twitter.Status instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.Status instance
'''
data = {}
if self.created_at:
data['created_at'] = self.created_at
if self.favorited:
data['favorited'] = self.favorited
if self.favorite_count:
data['favorite_count'] = self.favorite_count
if self.id:
data['id'] = self.id
if self.text:
data['text'] = self.text
if self.location:
data['location'] = self.location
if self.user:
data['user'] = self.user.AsDict()
if self.in_reply_to_screen_name:
data['in_reply_to_screen_name'] = self.in_reply_to_screen_name
if self.in_reply_to_user_id:
data['in_reply_to_user_id'] = self.in_reply_to_user_id
if self.in_reply_to_status_id:
data['in_reply_to_status_id'] = self.in_reply_to_status_id
if self.truncated is not None:
data['truncated'] = self.truncated
if self.retweeted is not None:
data['retweeted'] = self.retweeted
if self.favorited is not None:
data['favorited'] = self.favorited
if self.source:
data['source'] = self.source
if self.geo:
data['geo'] = self.geo
if self.place:
data['place'] = self.place
if self.coordinates:
data['coordinates'] = self.coordinates
if self.contributors:
data['contributors'] = self.contributors
if self.hashtags:
data['hashtags'] = [h.text for h in self.hashtags]
if self.retweeted_status:
data['retweeted_status'] = self.retweeted_status.AsDict()
if self.retweet_count:
data['retweet_count'] = self.retweet_count
if self.urls:
data['urls'] = dict([(url.url, url.expanded_url) for url in self.urls])
if self.user_mentions:
data['user_mentions'] = [um.AsDict() for um in self.user_mentions]
if self.current_user_retweet:
data['current_user_retweet'] = self.current_user_retweet
if self.possibly_sensitive:
data['possibly_sensitive'] = self.possibly_sensitive
if self.scopes:
data['scopes'] = self.scopes
if self.withheld_copyright:
data['withheld_copyright'] = self.withheld_copyright
if self.withheld_in_countries:
data['withheld_in_countries'] = self.withheld_in_countries
if self.withheld_scope:
data['withheld_scope'] = self.withheld_scope
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data: A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Status instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
if 'retweeted_status' in data:
retweeted_status = Status.NewFromJsonDict(data['retweeted_status'])
else:
retweeted_status = None
if 'current_user_retweet' in data:
current_user_retweet = data['current_user_retweet']['id']
else:
current_user_retweet = None
urls = None
user_mentions = None
hashtags = None
media = None
if 'entities' in data:
if 'urls' in data['entities']:
urls = [Url.NewFromJsonDict(u) for u in data['entities']['urls']]
if 'user_mentions' in data['entities']:
user_mentions = [User.NewFromJsonDict(u) for u in data['entities']['user_mentions']]
if 'hashtags' in data['entities']:
hashtags = [Hashtag.NewFromJsonDict(h) for h in data['entities']['hashtags']]
if 'media' in data['entities']:
media = data['entities']['media']
else:
media = []
return Status(created_at = data.get('created_at', None),
favorited = data.get('favorited', None),
favorite_count = data.get('favorite_count', None),
id = data.get('id', None),
text = data.get('text', None),
location = data.get('location', None),
in_reply_to_screen_name = data.get('in_reply_to_screen_name', None),
in_reply_to_user_id = data.get('in_reply_to_user_id', None),
in_reply_to_status_id = data.get('in_reply_to_status_id', None),
truncated = data.get('truncated', None),
retweeted = data.get('retweeted', None),
source = data.get('source', None),
user = user,
urls = urls,
user_mentions = user_mentions,
hashtags = hashtags,
media = media,
geo = data.get('geo', None),
place = data.get('place', None),
coordinates = data.get('coordinates', None),
contributors = data.get('contributors', None),
retweeted_status = retweeted_status,
current_user_retweet = current_user_retweet,
retweet_count = data.get('retweet_count', None),
possibly_sensitive = data.get('possibly_sensitive', None),
scopes = data.get('scopes', None),
withheld_copyright = data.get('withheld_copyright', None),
withheld_in_countries = data.get('withheld_in_countries', None),
withheld_scope = data.get('withheld_scope', None))
class User(object):
'''A class representing the User structure used by the twitter API.
The User structure exposes the following properties:
user.id
user.name
user.screen_name
user.location
user.description
user.profile_image_url
user.profile_background_tile
user.profile_background_image_url
user.profile_sidebar_fill_color
user.profile_background_color
user.profile_link_color
user.profile_text_color
user.protected
user.utc_offset
user.time_zone
user.url
user.status
user.statuses_count
user.followers_count
user.friends_count
user.favourites_count
user.geo_enabled
user.verified
user.lang
user.notifications
user.contributors_enabled
user.created_at
user.listed_count
'''
def __init__(self,
id = None,
name = None,
screen_name = None,
location = None,
description = None,
profile_image_url = None,
profile_background_tile = None,
profile_background_image_url = None,
profile_sidebar_fill_color = None,
profile_background_color = None,
profile_link_color = None,
profile_text_color = None,
protected = None,
utc_offset = None,
time_zone = None,
followers_count = None,
friends_count = None,
statuses_count = None,
favourites_count = None,
url = None,
status = None,
geo_enabled = None,
verified = None,
lang = None,
notifications = None,
contributors_enabled = None,
created_at = None,
listed_count = None):
self.id = id
self.name = name
self.screen_name = screen_name
self.location = location
self.description = description
self.profile_image_url = profile_image_url
self.profile_background_tile = profile_background_tile
self.profile_background_image_url = profile_background_image_url
self.profile_sidebar_fill_color = profile_sidebar_fill_color
self.profile_background_color = profile_background_color
self.profile_link_color = profile_link_color
self.profile_text_color = profile_text_color
self.protected = protected
self.utc_offset = utc_offset
self.time_zone = time_zone
self.followers_count = followers_count
self.friends_count = friends_count
self.statuses_count = statuses_count
self.favourites_count = favourites_count
self.url = url
self.status = status
self.geo_enabled = geo_enabled
self.verified = verified
self.lang = lang
self.notifications = notifications
self.contributors_enabled = contributors_enabled
self.created_at = created_at
self.listed_count = listed_count
def GetId(self):
'''Get the unique id of this user.
Returns:
The unique id of this user
'''
return self._id
def SetId(self, id):
'''Set the unique id of this user.
Args:
id: The unique id of this user.
'''
self._id = id
id = property(GetId, SetId,
doc = 'The unique id of this user.')
def GetName(self):
'''Get the real name of this user.
Returns:
The real name of this user
'''
return self._name
def SetName(self, name):
'''Set the real name of this user.
Args:
name: The real name of this user
'''
self._name = name
name = property(GetName, SetName,
doc = 'The real name of this user.')
def GetScreenName(self):
'''Get the short twitter name of this user.
Returns:
The short twitter name of this user
'''
return self._screen_name
def SetScreenName(self, screen_name):
'''Set the short twitter name of this user.
Args:
screen_name: the short twitter name of this user
'''
self._screen_name = screen_name
screen_name = property(GetScreenName, SetScreenName,
doc = 'The short twitter name of this user.')
def GetLocation(self):
'''Get the geographic location of this user.
Returns:
The geographic location of this user
'''
return self._location
def SetLocation(self, location):
'''Set the geographic location of this user.
Args:
location: The geographic location of this user
'''
self._location = location
location = property(GetLocation, SetLocation,
doc = 'The geographic location of this user.')
def GetDescription(self):
'''Get the short text description of this user.
Returns:
The short text description of this user
'''
return self._description
def SetDescription(self, description):
'''Set the short text description of this user.
Args:
description: The short text description of this user
'''
self._description = description
description = property(GetDescription, SetDescription,
doc = 'The short text description of this user.')
def GetUrl(self):
'''Get the homepage url of this user.
Returns:
The homepage url of this user
'''
return self._url
def SetUrl(self, url):
'''Set the homepage url of this user.
Args:
url: The homepage url of this user
'''
self._url = url
url = property(GetUrl, SetUrl,
doc = 'The homepage url of this user.')
def GetProfileImageUrl(self):
'''Get the url of the thumbnail of this user.
Returns:
The url of the thumbnail of this user
'''
return self._profile_image_url
def SetProfileImageUrl(self, profile_image_url):
'''Set the url of the thumbnail of this user.
Args:
profile_image_url: The url of the thumbnail of this user
'''
self._profile_image_url = profile_image_url
profile_image_url = property(GetProfileImageUrl, SetProfileImageUrl,
doc = 'The url of the thumbnail of this user.')
def GetProfileBackgroundTile(self):
'''Boolean for whether to tile the profile background image.
Returns:
True if the background is to be tiled, False if not, None if unset.
'''
return self._profile_background_tile
def SetProfileBackgroundTile(self, profile_background_tile):
'''Set the boolean flag for whether to tile the profile background image.
Args:
profile_background_tile: Boolean flag for whether to tile or not.
'''
self._profile_background_tile = profile_background_tile
profile_background_tile = property(GetProfileBackgroundTile, SetProfileBackgroundTile,
doc = 'Boolean for whether to tile the background image.')
def GetProfileBackgroundImageUrl(self):
return self._profile_background_image_url
def SetProfileBackgroundImageUrl(self, profile_background_image_url):
self._profile_background_image_url = profile_background_image_url
profile_background_image_url = property(GetProfileBackgroundImageUrl, SetProfileBackgroundImageUrl,
doc = 'The url of the profile background of this user.')
def GetProfileSidebarFillColor(self):
return self._profile_sidebar_fill_color
def SetProfileSidebarFillColor(self, profile_sidebar_fill_color):
self._profile_sidebar_fill_color = profile_sidebar_fill_color
profile_sidebar_fill_color = property(GetProfileSidebarFillColor, SetProfileSidebarFillColor)
def GetProfileBackgroundColor(self):
return self._profile_background_color
def SetProfileBackgroundColor(self, profile_background_color):
self._profile_background_color = profile_background_color
profile_background_color = property(GetProfileBackgroundColor, SetProfileBackgroundColor)
def GetProfileLinkColor(self):
return self._profile_link_color
def SetProfileLinkColor(self, profile_link_color):
self._profile_link_color = profile_link_color
profile_link_color = property(GetProfileLinkColor, SetProfileLinkColor)
def GetProfileTextColor(self):
return self._profile_text_color
def SetProfileTextColor(self, profile_text_color):
self._profile_text_color = profile_text_color
profile_text_color = property(GetProfileTextColor, SetProfileTextColor)
def GetProtected(self):
return self._protected
def SetProtected(self, protected):
self._protected = protected
protected = property(GetProtected, SetProtected)
def GetUtcOffset(self):
return self._utc_offset
def SetUtcOffset(self, utc_offset):
self._utc_offset = utc_offset
utc_offset = property(GetUtcOffset, SetUtcOffset)
def GetTimeZone(self):
'''Returns the current time zone string for the user.
Returns:
The descriptive time zone string for the user.
'''
return self._time_zone
def SetTimeZone(self, time_zone):
'''Sets the user's time zone string.
Args:
time_zone:
The descriptive time zone to assign for the user.
'''
self._time_zone = time_zone
time_zone = property(GetTimeZone, SetTimeZone)
def GetStatus(self):
'''Get the latest twitter.Status of this user.
Returns:
The latest twitter.Status of this user
'''
return self._status
def SetStatus(self, status):
'''Set the latest twitter.Status of this user.
Args:
status:
The latest twitter.Status of this user
'''
self._status = status
status = property(GetStatus, SetStatus,
doc = 'The latest twitter.Status of this user.')
def GetFriendsCount(self):
'''Get the friend count for this user.
Returns:
The number of users this user has befriended.
'''
return self._friends_count
def SetFriendsCount(self, count):
'''Set the friend count for this user.
Args:
count:
The number of users this user has befriended.
'''
self._friends_count = count
friends_count = property(GetFriendsCount, SetFriendsCount,
doc = 'The number of friends for this user.')
def GetListedCount(self):
'''Get the listed count for this user.
Returns:
The number of lists this user belongs to.
'''
return self._listed_count
def SetListedCount(self, count):
'''Set the listed count for this user.
Args:
count:
The number of lists this user belongs to.
'''
self._listed_count = count
listed_count = property(GetListedCount, SetListedCount,
doc = 'The number of lists this user belongs to.')
def GetFollowersCount(self):
'''Get the follower count for this user.
Returns:
The number of users following this user.
'''
return self._followers_count
def SetFollowersCount(self, count):
'''Set the follower count for this user.
Args:
count:
The number of users following this user.
'''
self._followers_count = count
followers_count = property(GetFollowersCount, SetFollowersCount,
doc = 'The number of users following this user.')
def GetStatusesCount(self):
'''Get the number of status updates for this user.
Returns:
The number of status updates for this user.
'''
return self._statuses_count
def SetStatusesCount(self, count):
'''Set the status update count for this user.
Args:
count:
The number of updates for this user.
'''
self._statuses_count = count
statuses_count = property(GetStatusesCount, SetStatusesCount,
doc = 'The number of updates for this user.')
def GetFavouritesCount(self):
'''Get the number of favourites for this user.
Returns:
The number of favourites for this user.
'''
return self._favourites_count
def SetFavouritesCount(self, count):
'''Set the favourite count for this user.
Args:
count:
The number of favourites for this user.
'''
self._favourites_count = count
favourites_count = property(GetFavouritesCount, SetFavouritesCount,
doc = 'The number of favourites for this user.')
def GetGeoEnabled(self):
'''Get the setting of geo_enabled for this user.
Returns:
True/False if Geo tagging is enabled
'''
return self._geo_enabled
def SetGeoEnabled(self, geo_enabled):
'''Set the latest twitter.geo_enabled of this user.
Args:
geo_enabled:
True/False if Geo tagging is to be enabled
'''
self._geo_enabled = geo_enabled
geo_enabled = property(GetGeoEnabled, SetGeoEnabled,
doc = 'The value of twitter.geo_enabled for this user.')
def GetVerified(self):
'''Get the setting of verified for this user.
Returns:
True/False if user is a verified account
'''
return self._verified
def SetVerified(self, verified):
'''Set twitter.verified for this user.
Args:
verified:
True/False if user is a verified account
'''
self._verified = verified
verified = property(GetVerified, SetVerified,
doc = 'The value of twitter.verified for this user.')
def GetLang(self):
'''Get the setting of lang for this user.
Returns:
language code of the user
'''
return self._lang
def SetLang(self, lang):
'''Set twitter.lang for this user.
Args:
lang:
language code for the user
'''
self._lang = lang
lang = property(GetLang, SetLang,
doc = 'The value of twitter.lang for this user.')
def GetNotifications(self):
'''Get the setting of notifications for this user.
Returns:
True/False for the notifications setting of the user
'''
return self._notifications
def SetNotifications(self, notifications):
'''Set twitter.notifications for this user.
Args:
notifications:
True/False notifications setting for the user
'''
self._notifications = notifications
notifications = property(GetNotifications, SetNotifications,
doc = 'The value of twitter.notifications for this user.')
def GetContributorsEnabled(self):
'''Get the setting of contributors_enabled for this user.
Returns:
True/False contributors_enabled of the user
'''
return self._contributors_enabled
def SetContributorsEnabled(self, contributors_enabled):
'''Set twitter.contributors_enabled for this user.
Args:
contributors_enabled:
True/False contributors_enabled setting for the user
'''
self._contributors_enabled = contributors_enabled
contributors_enabled = property(GetContributorsEnabled, SetContributorsEnabled,
doc = 'The value of twitter.contributors_enabled for this user.')
def GetCreatedAt(self):
'''Get the setting of created_at for this user.
Returns:
created_at value of the user
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set twitter.created_at for this user.
Args:
created_at:
created_at value for the user
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc = 'The value of twitter.created_at for this user.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.screen_name == other.screen_name and \
self.location == other.location and \
self.description == other.description and \
self.profile_image_url == other.profile_image_url and \
self.profile_background_tile == other.profile_background_tile and \
self.profile_background_image_url == other.profile_background_image_url and \
self.profile_sidebar_fill_color == other.profile_sidebar_fill_color and \
self.profile_background_color == other.profile_background_color and \
self.profile_link_color == other.profile_link_color and \
self.profile_text_color == other.profile_text_color and \
self.protected == other.protected and \
self.utc_offset == other.utc_offset and \
self.time_zone == other.time_zone and \
self.url == other.url and \
self.statuses_count == other.statuses_count and \
self.followers_count == other.followers_count and \
self.favourites_count == other.favourites_count and \
self.friends_count == other.friends_count and \
self.status == other.status and \
self.geo_enabled == other.geo_enabled and \
self.verified == other.verified and \
self.lang == other.lang and \
self.notifications == other.notifications and \
self.contributors_enabled == other.contributors_enabled and \
self.created_at == other.created_at and \
self.listed_count == other.listed_count
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.User instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.User instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.User instance.
Returns:
A JSON string representation of this twitter.User instance
'''
return simplejson.dumps(self.AsDict(), sort_keys = True)
def AsDict(self):
'''A dict representation of this twitter.User instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.User instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.screen_name:
data['screen_name'] = self.screen_name
if self.location:
data['location'] = self.location
if self.description:
data['description'] = self.description
if self.profile_image_url:
data['profile_image_url'] = self.profile_image_url
if self.profile_background_tile is not None:
data['profile_background_tile'] = self.profile_background_tile
if self.profile_background_image_url:
data['profile_sidebar_fill_color'] = self.profile_background_image_url
if self.profile_background_color:
data['profile_background_color'] = self.profile_background_color
if self.profile_link_color:
data['profile_link_color'] = self.profile_link_color
if self.profile_text_color:
data['profile_text_color'] = self.profile_text_color
if self.protected is not None:
data['protected'] = self.protected
if self.utc_offset:
data['utc_offset'] = self.utc_offset
if self.time_zone:
data['time_zone'] = self.time_zone
if self.url:
data['url'] = self.url
if self.status:
data['status'] = self.status.AsDict()
if self.friends_count:
data['friends_count'] = self.friends_count
if self.followers_count:
data['followers_count'] = self.followers_count
if self.statuses_count:
data['statuses_count'] = self.statuses_count
if self.favourites_count:
data['favourites_count'] = self.favourites_count
if self.geo_enabled:
data['geo_enabled'] = self.geo_enabled
if self.verified:
data['verified'] = self.verified
if self.lang:
data['lang'] = self.lang
if self.notifications:
data['notifications'] = self.notifications
if self.contributors_enabled:
data['contributors_enabled'] = self.contributors_enabled
if self.created_at:
data['created_at'] = self.created_at
if self.listed_count:
data['listed_count'] = self.listed_count
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.User instance
'''
if 'status' in data:
status = Status.NewFromJsonDict(data['status'])
else:
status = None
return User(id = data.get('id', None),
name = data.get('name', None),
screen_name = data.get('screen_name', None),
location = data.get('location', None),
description = data.get('description', None),
statuses_count = data.get('statuses_count', None),
followers_count = data.get('followers_count', None),
favourites_count = data.get('favourites_count', None),
friends_count = data.get('friends_count', None),
profile_image_url = data.get('profile_image_url_https', data.get('profile_image_url', None)),
profile_background_tile = data.get('profile_background_tile', None),
profile_background_image_url = data.get('profile_background_image_url', None),
profile_sidebar_fill_color = data.get('profile_sidebar_fill_color', None),
profile_background_color = data.get('profile_background_color', None),
profile_link_color = data.get('profile_link_color', None),
profile_text_color = data.get('profile_text_color', None),
protected = data.get('protected', None),
utc_offset = data.get('utc_offset', None),
time_zone = data.get('time_zone', None),
url = data.get('url', None),
status = status,
geo_enabled = data.get('geo_enabled', None),
verified = data.get('verified', None),
lang = data.get('lang', None),
notifications = data.get('notifications', None),
contributors_enabled = data.get('contributors_enabled', None),
created_at = data.get('created_at', None),
listed_count = data.get('listed_count', None))
class List(object):
'''A class representing the List structure used by the twitter API.
The List structure exposes the following properties:
list.id
list.name
list.slug
list.description
list.full_name
list.mode
list.uri
list.member_count
list.subscriber_count
list.following
'''
def __init__(self,
id = None,
name = None,
slug = None,
description = None,
full_name = None,
mode = None,
uri = None,
member_count = None,
subscriber_count = None,
following = None,
user = None):
self.id = id
self.name = name
self.slug = slug
self.description = description
self.full_name = full_name
self.mode = mode
self.uri = uri
self.member_count = member_count
self.subscriber_count = subscriber_count
self.following = following
self.user = user
def GetId(self):
'''Get the unique id of this list.
Returns:
The unique id of this list
'''
return self._id
def SetId(self, id):
'''Set the unique id of this list.
Args:
id:
The unique id of this list.
'''
self._id = id
id = property(GetId, SetId,
doc = 'The unique id of this list.')
def GetName(self):
'''Get the real name of this list.
Returns:
The real name of this list
'''
return self._name
def SetName(self, name):
'''Set the real name of this list.
Args:
name:
The real name of this list
'''
self._name = name
name = property(GetName, SetName,
doc = 'The real name of this list.')
def GetSlug(self):
'''Get the slug of this list.
Returns:
The slug of this list
'''
return self._slug
def SetSlug(self, slug):
'''Set the slug of this list.
Args:
slug:
The slug of this list.
'''
self._slug = slug
slug = property(GetSlug, SetSlug,
doc = 'The slug of this list.')
def GetDescription(self):
'''Get the description of this list.
Returns:
The description of this list
'''
return self._description
def SetDescription(self, description):
'''Set the description of this list.
Args:
description:
The description of this list.
'''
self._description = description
description = property(GetDescription, SetDescription,
doc = 'The description of this list.')
def GetFull_name(self):
'''Get the full_name of this list.
Returns:
The full_name of this list
'''
return self._full_name
def SetFull_name(self, full_name):
'''Set the full_name of this list.
Args:
full_name:
The full_name of this list.
'''
self._full_name = full_name
full_name = property(GetFull_name, SetFull_name,
doc = 'The full_name of this list.')
def GetMode(self):
'''Get the mode of this list.
Returns:
The mode of this list
'''
return self._mode
def SetMode(self, mode):
'''Set the mode of this list.
Args:
mode:
The mode of this list.
'''
self._mode = mode
mode = property(GetMode, SetMode,
doc = 'The mode of this list.')
def GetUri(self):
'''Get the uri of this list.
Returns:
The uri of this list
'''
return self._uri
def SetUri(self, uri):
'''Set the uri of this list.
Args:
uri:
The uri of this list.
'''
self._uri = uri
uri = property(GetUri, SetUri,
doc = 'The uri of this list.')
def GetMember_count(self):
'''Get the member_count of this list.
Returns:
The member_count of this list
'''
return self._member_count
def SetMember_count(self, member_count):
'''Set the member_count of this list.
Args:
member_count:
The member_count of this list.
'''
self._member_count = member_count
member_count = property(GetMember_count, SetMember_count,
doc = 'The member_count of this list.')
def GetSubscriber_count(self):
'''Get the subscriber_count of this list.
Returns:
The subscriber_count of this list
'''
return self._subscriber_count
def SetSubscriber_count(self, subscriber_count):
'''Set the subscriber_count of this list.
Args:
subscriber_count:
The subscriber_count of this list.
'''
self._subscriber_count = subscriber_count
subscriber_count = property(GetSubscriber_count, SetSubscriber_count,
doc = 'The subscriber_count of this list.')
def GetFollowing(self):
'''Get the following status of this list.
Returns:
The following status of this list
'''
return self._following
def SetFollowing(self, following):
'''Set the following status of this list.
Args:
following:
The following of this list.
'''
self._following = following
following = property(GetFollowing, SetFollowing,
doc = 'The following status of this list.')
def GetUser(self):
'''Get the user of this list.
Returns:
The owner of this list
'''
return self._user
def SetUser(self, user):
'''Set the user of this list.
Args:
user:
The owner of this list.
'''
self._user = user
user = property(GetUser, SetUser,
doc = 'The owner of this list.')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.name == other.name and \
self.slug == other.slug and \
self.description == other.description and \
self.full_name == other.full_name and \
self.mode == other.mode and \
self.uri == other.uri and \
self.member_count == other.member_count and \
self.subscriber_count == other.subscriber_count and \
self.following == other.following and \
self.user == other.user
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.List instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.List instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.List instance.
Returns:
A JSON string representation of this twitter.List instance
'''
return simplejson.dumps(self.AsDict(), sort_keys = True)
def AsDict(self):
'''A dict representation of this twitter.List instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.List instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.name:
data['name'] = self.name
if self.slug:
data['slug'] = self.slug
if self.description:
data['description'] = self.description
if self.full_name:
data['full_name'] = self.full_name
if self.mode:
data['mode'] = self.mode
if self.uri:
data['uri'] = self.uri
if self.member_count is not None:
data['member_count'] = self.member_count
if self.subscriber_count is not None:
data['subscriber_count'] = self.subscriber_count
if self.following is not None:
data['following'] = self.following
if self.user is not None:
data['user'] = self.user.AsDict()
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.List instance
'''
if 'user' in data:
user = User.NewFromJsonDict(data['user'])
else:
user = None
return List(id = data.get('id', None),
name = data.get('name', None),
slug = data.get('slug', None),
description = data.get('description', None),
full_name = data.get('full_name', None),
mode = data.get('mode', None),
uri = data.get('uri', None),
member_count = data.get('member_count', None),
subscriber_count = data.get('subscriber_count', None),
following = data.get('following', None),
user = user)
class DirectMessage(object):
'''A class representing the DirectMessage structure used by the twitter API.
The DirectMessage structure exposes the following properties:
direct_message.id
direct_message.created_at
direct_message.created_at_in_seconds # read only
direct_message.sender_id
direct_message.sender_screen_name
direct_message.recipient_id
direct_message.recipient_screen_name
direct_message.text
'''
def __init__(self,
id = None,
created_at = None,
sender_id = None,
sender_screen_name = None,
recipient_id = None,
recipient_screen_name = None,
text = None):
'''An object to hold a Twitter direct message.
This class is normally instantiated by the twitter.Api class and
returned in a sequence.
Note: Dates are posted in the form "Sat Jan 27 04:17:38 +0000 2007"
Args:
id:
The unique id of this direct message. [Optional]
created_at:
The time this direct message was posted. [Optional]
sender_id:
The id of the twitter user that sent this message. [Optional]
sender_screen_name:
The name of the twitter user that sent this message. [Optional]
recipient_id:
The id of the twitter that received this message. [Optional]
recipient_screen_name:
The name of the twitter that received this message. [Optional]
text:
The text of this direct message. [Optional]
'''
self.id = id
self.created_at = created_at
self.sender_id = sender_id
self.sender_screen_name = sender_screen_name
self.recipient_id = recipient_id
self.recipient_screen_name = recipient_screen_name
self.text = text
def GetId(self):
'''Get the unique id of this direct message.
Returns:
The unique id of this direct message
'''
return self._id
def SetId(self, id):
'''Set the unique id of this direct message.
Args:
id:
The unique id of this direct message
'''
self._id = id
id = property(GetId, SetId,
doc = 'The unique id of this direct message.')
def GetCreatedAt(self):
'''Get the time this direct message was posted.
Returns:
The time this direct message was posted
'''
return self._created_at
def SetCreatedAt(self, created_at):
'''Set the time this direct message was posted.
Args:
created_at:
The time this direct message was created
'''
self._created_at = created_at
created_at = property(GetCreatedAt, SetCreatedAt,
doc = 'The time this direct message was posted.')
def GetCreatedAtInSeconds(self):
'''Get the time this direct message was posted, in seconds since the epoch.
Returns:
The time this direct message was posted, in seconds since the epoch.
'''
return calendar.timegm(rfc822.parsedate(self.created_at))
created_at_in_seconds = property(GetCreatedAtInSeconds,
doc = "The time this direct message was "
"posted, in seconds since the epoch")
def GetSenderId(self):
'''Get the unique sender id of this direct message.
Returns:
The unique sender id of this direct message
'''
return self._sender_id
def SetSenderId(self, sender_id):
'''Set the unique sender id of this direct message.
Args:
sender_id:
The unique sender id of this direct message
'''
self._sender_id = sender_id
sender_id = property(GetSenderId, SetSenderId,
doc = 'The unique sender id of this direct message.')
def GetSenderScreenName(self):
'''Get the unique sender screen name of this direct message.
Returns:
The unique sender screen name of this direct message
'''
return self._sender_screen_name
def SetSenderScreenName(self, sender_screen_name):
'''Set the unique sender screen name of this direct message.
Args:
sender_screen_name:
The unique sender screen name of this direct message
'''
self._sender_screen_name = sender_screen_name
sender_screen_name = property(GetSenderScreenName, SetSenderScreenName,
doc = 'The unique sender screen name of this direct message.')
def GetRecipientId(self):
'''Get the unique recipient id of this direct message.
Returns:
The unique recipient id of this direct message
'''
return self._recipient_id
def SetRecipientId(self, recipient_id):
'''Set the unique recipient id of this direct message.
Args:
recipient_id:
The unique recipient id of this direct message
'''
self._recipient_id = recipient_id
recipient_id = property(GetRecipientId, SetRecipientId,
doc = 'The unique recipient id of this direct message.')
def GetRecipientScreenName(self):
'''Get the unique recipient screen name of this direct message.
Returns:
The unique recipient screen name of this direct message
'''
return self._recipient_screen_name
def SetRecipientScreenName(self, recipient_screen_name):
'''Set the unique recipient screen name of this direct message.
Args:
recipient_screen_name:
The unique recipient screen name of this direct message
'''
self._recipient_screen_name = recipient_screen_name
recipient_screen_name = property(GetRecipientScreenName, SetRecipientScreenName,
doc = 'The unique recipient screen name of this direct message.')
def GetText(self):
'''Get the text of this direct message.
Returns:
The text of this direct message.
'''
return self._text
def SetText(self, text):
'''Set the text of this direct message.
Args:
text:
The text of this direct message
'''
self._text = text
text = property(GetText, SetText,
doc = 'The text of this direct message')
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.id == other.id and \
self.created_at == other.created_at and \
self.sender_id == other.sender_id and \
self.sender_screen_name == other.sender_screen_name and \
self.recipient_id == other.recipient_id and \
self.recipient_screen_name == other.recipient_screen_name and \
self.text == other.text
except AttributeError:
return False
def __str__(self):
'''A string representation of this twitter.DirectMessage instance.
The return value is the same as the JSON string representation.
Returns:
A string representation of this twitter.DirectMessage instance.
'''
return self.AsJsonString()
def AsJsonString(self):
'''A JSON string representation of this twitter.DirectMessage instance.
Returns:
A JSON string representation of this twitter.DirectMessage instance
'''
return simplejson.dumps(self.AsDict(), sort_keys = True)
def AsDict(self):
'''A dict representation of this twitter.DirectMessage instance.
The return value uses the same key names as the JSON representation.
Return:
A dict representing this twitter.DirectMessage instance
'''
data = {}
if self.id:
data['id'] = self.id
if self.created_at:
data['created_at'] = self.created_at
if self.sender_id:
data['sender_id'] = self.sender_id
if self.sender_screen_name:
data['sender_screen_name'] = self.sender_screen_name
if self.recipient_id:
data['recipient_id'] = self.recipient_id
if self.recipient_screen_name:
data['recipient_screen_name'] = self.recipient_screen_name
if self.text:
data['text'] = self.text
return data
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.DirectMessage instance
'''
return DirectMessage(created_at = data.get('created_at', None),
recipient_id = data.get('recipient_id', None),
sender_id = data.get('sender_id', None),
text = data.get('text', None),
sender_screen_name = data.get('sender_screen_name', None),
id = data.get('id', None),
recipient_screen_name = data.get('recipient_screen_name', None))
class Hashtag(object):
''' A class representing a twitter hashtag
'''
def __init__(self,
text = None):
self.text = text
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Hashtag instance
'''
return Hashtag(text = data.get('text', None))
class Trend(object):
''' A class representing a trending topic
'''
def __init__(self, name = None, query = None, timestamp = None, url = None):
self.name = name
self.query = query
self.timestamp = timestamp
self.url = url
def __str__(self):
return 'Name: %s\nQuery: %s\nTimestamp: %s\nSearch URL: %s\n' % (self.name, self.query, self.timestamp, self.url)
def __ne__(self, other):
return not self.__eq__(other)
def __eq__(self, other):
try:
return other and \
self.name == other.name and \
self.query == other.query and \
self.timestamp == other.timestamp and \
self.url == self.url
except AttributeError:
return False
@staticmethod
def NewFromJsonDict(data, timestamp = None):
'''Create a new instance based on a JSON dict
Args:
data:
A JSON dict
timestamp:
Gets set as the timestamp property of the new object
Returns:
A twitter.Trend object
'''
return Trend(name = data.get('name', None),
query = data.get('query', None),
url = data.get('url', None),
timestamp = timestamp)
class Url(object):
'''A class representing an URL contained in a tweet'''
def __init__(self,
url = None,
expanded_url = None):
self.url = url
self.expanded_url = expanded_url
@staticmethod
def NewFromJsonDict(data):
'''Create a new instance based on a JSON dict.
Args:
data:
A JSON dict, as converted from the JSON in the twitter API
Returns:
A twitter.Url instance
'''
return Url(url = data.get('url', None),
expanded_url = data.get('expanded_url', None))
class Api(object):
'''A python interface into the Twitter API
By default, the Api caches results for 1 minute.
Example usage:
To create an instance of the twitter.Api class, with no authentication:
>>> import twitter
>>> api = twitter.Api()
To fetch the most recently posted public twitter status messages:
>>> statuses = api.GetPublicTimeline()
>>> print [s.user.name for s in statuses]
[u'DeWitt', u'Kesuke Miyagi', u'ev', u'Buzz Andersen', u'Biz Stone'] #...
To fetch a single user's public status messages, where "user" is either
a Twitter "short name" or their user id.
>>> statuses = api.GetUserTimeline(user)
>>> print [s.text for s in statuses]
To use authentication, instantiate the twitter.Api class with a
consumer key and secret; and the oAuth key and secret:
>>> api = twitter.Api(consumer_key='twitter consumer key',
consumer_secret='twitter consumer secret',
access_token_key='the_key_given',
access_token_secret='the_key_secret')
To fetch your friends (after being authenticated):
>>> users = api.GetFriends()
>>> print [u.name for u in users]
To post a twitter status message (after being authenticated):
>>> status = api.PostUpdate('I love python-twitter!')
>>> print status.text
I love python-twitter!
There are many other methods, including:
>>> api.PostUpdates(status)
>>> api.PostDirectMessage(user, text)
>>> api.GetUser(user)
>>> api.GetReplies()
>>> api.GetUserTimeline(user)
>>> api.GetHomeTimeLine()
>>> api.GetStatus(id)
>>> api.DestroyStatus(id)
>>> api.GetFriends(user)
>>> api.GetFollowers()
>>> api.GetFeatured()
>>> api.GetDirectMessages()
>>> api.GetSentDirectMessages()
>>> api.PostDirectMessage(user, text)
>>> api.DestroyDirectMessage(id)
>>> api.DestroyFriendship(user)
>>> api.CreateFriendship(user)
>>> api.GetUserByEmail(email)
>>> api.VerifyCredentials()
'''
DEFAULT_CACHE_TIMEOUT = 60 # cache for 1 minute
_API_REALM = 'Twitter API'
def __init__(self,
consumer_key = None,
consumer_secret = None,
access_token_key = None,
access_token_secret = None,
input_encoding = None,
request_headers = None,
cache = DEFAULT_CACHE,
shortner = None,
base_url = None,
use_gzip_compression = False,
debugHTTP = False):
'''Instantiate a new twitter.Api object.
Args:
consumer_key:
Your Twitter user's consumer_key.
consumer_secret:
Your Twitter user's consumer_secret.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
input_encoding:
The encoding used to encode input strings. [Optional]
request_header:
A dictionary of additional HTTP request headers. [Optional]
cache:
The cache instance to use. Defaults to DEFAULT_CACHE.
Use None to disable caching. [Optional]
shortner:
The shortner instance to use. Defaults to None.
See shorten_url.py for an example shortner. [Optional]
base_url:
The base URL to use to contact the Twitter API.
Defaults to https://api.twitter.com. [Optional]
use_gzip_compression:
Set to True to tell enable gzip compression for any call
made to Twitter. Defaults to False. [Optional]
debugHTTP:
Set to True to enable debug output from urllib2 when performing
any HTTP requests. Defaults to False. [Optional]
'''
self.SetCache(cache)
self._urllib = urllib2
self._cache_timeout = Api.DEFAULT_CACHE_TIMEOUT
self._input_encoding = input_encoding
self._use_gzip = use_gzip_compression
self._debugHTTP = debugHTTP
self._oauth_consumer = None
self._shortlink_size = 19
self._InitializeRequestHeaders(request_headers)
self._InitializeUserAgent()
self._InitializeDefaultParameters()
if base_url is None:
self.base_url = 'https://api.twitter.com/1.1'
else:
self.base_url = base_url
if consumer_key is not None and (access_token_key is None or
access_token_secret is None):
print >> sys.stderr, 'Twitter now requires an oAuth Access Token for API calls.'
print >> sys.stderr, 'If your using this library from a command line utility, please'
print >> sys.stderr, 'run the the included get_access_token.py tool to generate one.'
raise TwitterError('Twitter requires oAuth Access Token for all API access')
self.SetCredentials(consumer_key, consumer_secret, access_token_key, access_token_secret)
def SetCredentials(self,
consumer_key,
consumer_secret,
access_token_key = None,
access_token_secret = None):
'''Set the consumer_key and consumer_secret for this instance
Args:
consumer_key:
The consumer_key of the twitter account.
consumer_secret:
The consumer_secret for the twitter account.
access_token_key:
The oAuth access token key value you retrieved
from running get_access_token.py.
access_token_secret:
The oAuth access token's secret, also retrieved
from the get_access_token.py run.
'''
self._consumer_key = consumer_key
self._consumer_secret = consumer_secret
self._access_token_key = access_token_key
self._access_token_secret = access_token_secret
self._oauth_consumer = None
if consumer_key is not None and consumer_secret is not None and \
access_token_key is not None and access_token_secret is not None:
self._signature_method_plaintext = oauth.SignatureMethod_PLAINTEXT()
self._signature_method_hmac_sha1 = oauth.SignatureMethod_HMAC_SHA1()
self._oauth_token = oauth.Token(key = access_token_key, secret = access_token_secret)
self._oauth_consumer = oauth.Consumer(key = consumer_key, secret = consumer_secret)
def ClearCredentials(self):
'''Clear the any credentials for this instance
'''
self._consumer_key = None
self._consumer_secret = None
self._access_token_key = None
self._access_token_secret = None
self._oauth_consumer = None
def GetSearch(self,
term = None,
geocode = None,
since_id = None,
max_id = None,
until = None,
count = 15,
lang = None,
locale = None,
result_type = "mixed",
include_entities = None):
'''Return twitter search results for a given term.
Args:
term:
Term to search by. Optional if you include geocode.
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
until:
Returns tweets generated before the given date. Date should be
formatted as YYYY-MM-DD. [Optional]
geocode:
Geolocation information in the form (latitude, longitude, radius)
[Optional]
count:
Number of results to return. Default is 15 [Optional]
lang:
Language for results as ISO 639-1 code. Default is None (all languages)
[Optional]
locale:
Language of the search query. Currently only 'ja' is effective. This is
intended for language-specific consumers and the default should work in
the majority of cases.
result_type:
Type of result which should be returned. Default is "mixed". Other
valid options are "recent" and "popular". [Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if until:
parameters['until'] = until
if lang:
parameters['lang'] = lang
if locale:
parameters['locale'] = locale
if term is None and geocode is None:
return []
if term is not None:
parameters['q'] = term
if geocode is not None:
parameters['geocode'] = ','.join(map(str, geocode))
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if result_type in ["mixed", "popular", "recent"]:
parameters['result_type'] = result_type
# Make and send requests
url = '%s/search/tweets.json' % self.base_url
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
# Return built list of statuses
return [Status.NewFromJsonDict(x) for x in data['statuses']]
def GetUsersSearch(self,
term = None,
page = 1,
count = 20,
include_entities = None):
'''Return twitter user search results for a given term.
Args:
term:
Term to search by.
page:
Page of results to return. Default is 1
[Optional]
count:
Number of results to return. Default is 20
[Optional]
include_entities:
If True, each tweet will include a node called "entities,".
This node offers a variety of metadata about the tweet in a
discrete structure, including: user_mentions, urls, and hashtags.
[Optional]
Returns:
A sequence of twitter.User instances, one for each message containing
the term
'''
# Build request parameters
parameters = {}
if term is not None:
parameters['q'] = term
if include_entities:
parameters['include_entities'] = 1
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
# Make and send requests
url = '%s/users/search.json' % self.base_url
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [User.NewFromJsonDict(x) for x in data]
def GetTrendsCurrent(self, exclude = None):
'''Get the current top trending topics (global)
Args:
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
'''
return self.GetTrendsWoeid(id = 1, exclude = exclude)
def GetTrendsWoeid(self, id, exclude = None):
'''Return the top 10 trending topics for a specific WOEID, if trending
information is available for it.
Args:
woeid:
the Yahoo! Where On Earth ID for a location.
exclude:
Appends the exclude parameter as a request parameter.
Currently only exclude=hashtags is supported. [Optional]
Returns:
A list with 10 entries. Each entry contains a trend.
'''
url = '%s/trends/place.json' % (self.base_url)
parameters = {'id': id}
if exclude:
parameters['exclude'] = exclude
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
trends = []
timestamp = data[0]['as_of']
for trend in data[0]['trends']:
trends.append(Trend.NewFromJsonDict(trend, timestamp = timestamp))
return trends
def GetHomeTimeline(self,
count = None,
since_id = None,
max_id = None,
trim_user = False,
exclude_replies = False,
contributor_details = False,
include_entities = True):
'''
Fetch a collection of the most recent Tweets and retweets posted by the
authenticating user and the users they follow.
The home timeline is central to how most users interact with the Twitter
service.
The twitter.Api instance must be authenticated.
Args:
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. Defaults to 20. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
When True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object. [Optional]
exclude_replies:
This parameter will prevent replies from appearing in the
returned timeline. Using exclude_replies with the count
parameter will mean you will receive up-to count tweets -
this is because the count parameter retrieves that many
tweets before filtering out retweets and replies.
[Optional]
contributor_details:
This parameter enhances the contributors element of the
status response to include the screen_name of the contributor.
By default only the user_id of the contributor is included.
[Optional]
include_entities:
The entities node will be disincluded when set to false.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A sequence of twitter.Status instances, one for each message
'''
url = '%s/statuses/home_timeline.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
parameters = {}
if count is not None:
try:
if int(count) > 200:
raise TwitterError("'count' may not be greater than 200")
except ValueError:
raise TwitterError("'count' must be an integer")
parameters['count'] = count
if since_id:
try:
parameters['since_id'] = long(since_id)
except ValueError:
raise TwitterError("'since_id' must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except ValueError:
raise TwitterError("'max_id' must be an integer")
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
if contributor_details:
parameters['contributor_details'] = 1
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetUserTimeline(self,
user_id = None,
screen_name = None,
since_id = None,
max_id = None,
count = None,
include_rts = None,
trim_user = None,
exclude_replies = None):
'''Fetch the sequence of public Status messages for a single user.
The twitter.Api instance must be authenticated if the user is private.
Args:
user_id:
Specifies the ID of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid user ID
is also a valid screen name. [Optional]
screen_name:
Specifies the screen name of the user for whom to return the
user_timeline. Helpful for disambiguating when a valid screen
name is also a user ID. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than (that is, older
than) or equal to the specified ID. [Optional]
count:
Specifies the number of statuses to retrieve. May not be
greater than 200. [Optional]
include_rts:
If True, the timeline will contain native retweets (if they
exist) in addition to the standard stream of tweets. [Optional]
trim_user:
If True, statuses will only contain the numerical user ID only.
Otherwise a full user object will be returned for each status.
[Optional]
exclude_replies:
If True, this will prevent replies from appearing in the returned
timeline. Using exclude_replies with the count parameter will mean you
will receive up-to count tweets - this is because the count parameter
retrieves that many tweets before filtering out retweets and replies.
This parameter is only supported for JSON and XML responses. [Optional]
Returns:
A sequence of Status instances, one for each message up to count
'''
parameters = {}
url = '%s/statuses/user_timeline.json' % (self.base_url)
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if include_rts:
parameters['include_rts'] = 1
if trim_user:
parameters['trim_user'] = 1
if exclude_replies:
parameters['exclude_replies'] = 1
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetStatus(self,
id,
trim_user = False,
include_my_retweet = True,
include_entities = True):
'''Returns a single status message, specified by the id parameter.
The twitter.Api instance must be authenticated.
Args:
id:
The numeric ID of the status you are trying to retrieve.
trim_user:
When set to True, each tweet returned in a timeline will include
a user object including only the status authors numerical ID.
Omit this parameter to receive the complete user object.
[Optional]
include_my_retweet:
When set to True, any Tweets returned that have been retweeted by
the authenticating user will include an additional
current_user_retweet node, containing the ID of the source status
for the retweet. [Optional]
include_entities:
If False, the entities node will be disincluded.
This node offers a variety of metadata about the tweet in a
discreet structure, including: user_mentions, urls, and
hashtags. [Optional]
Returns:
A twitter.Status instance representing that status message
'''
url = '%s/statuses/show.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
parameters = {}
try:
parameters['id'] = long(id)
except ValueError:
raise TwitterError("'id' must be an integer.")
if trim_user:
parameters['trim_user'] = 1
if include_my_retweet:
parameters['include_my_retweet'] = 1
if not include_entities:
parameters['include_entities'] = 'none'
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyStatus(self, id, trim_user = False):
'''Destroys the status specified by the required ID parameter.
The twitter.Api instance must be authenticated and the
authenticating user must be the author of the specified status.
Args:
id:
The numerical ID of the status you're trying to destroy.
Returns:
A twitter.Status instance representing the destroyed status message
'''
if not self._oauth_consumer:
raise TwitterError("API must be authenticated.")
try:
post_data = {'id': long(id)}
except:
raise TwitterError("id must be an integer")
url = '%s/statuses/destroy/%s.json' % (self.base_url, id)
if trim_user:
post_data['trim_user'] = 1
json = self._FetchUrl(url, post_data = post_data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
@classmethod
def _calculate_status_length(cls, status, linksize = 19):
dummy_link_replacement = 'https://-%d-chars%s/' % (linksize, '-' * (linksize - 18))
shortened = ' '.join([x if not (x.startswith('http://') or
x.startswith('https://'))
else
dummy_link_replacement
for x in status.split(' ')])
return len(shortened)
def PostUpdate(self, status, in_reply_to_status_id = None, latitude = None, longitude = None, place_id = None, display_coordinates = False, trim_user = False):
'''Post a twitter status message from the authenticated user.
The twitter.Api instance must be authenticated.
https://dev.twitter.com/docs/api/1.1/post/statuses/update
Args:
status:
The message text to be posted.
Must be less than or equal to 140 characters.
in_reply_to_status_id:
The ID of an existing status that the status to be posted is
in reply to. This implicitly sets the in_reply_to_user_id
attribute of the resulting status to the user ID of the
message being replied to. Invalid/missing status IDs will be
ignored. [Optional]
latitude:
Latitude coordinate of the tweet in degrees. Will only work
in conjunction with longitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
longitude:
Longitude coordinate of the tweet in degrees. Will only work
in conjunction with latitude argument. Both longitude and
latitude will be ignored by twitter if the user has a false
geo_enabled setting. [Optional]
place_id:
A place in the world. These IDs can be retrieved from
GET geo/reverse_geocode. [Optional]
display_coordinates:
Whether or not to put a pin on the exact coordinates a tweet
has been sent from. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the message posted.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/update.json' % self.base_url
if isinstance(status, unicode) or self._input_encoding is None:
u_status = status
else:
u_status = unicode(status, self._input_encoding)
#if self._calculate_status_length(u_status, self._shortlink_size) > CHARACTER_LIMIT:
# raise TwitterError("Text must be less than or equal to %d characters. "
# "Consider using PostUpdates." % CHARACTER_LIMIT)
data = {'status': status}
if in_reply_to_status_id:
data['in_reply_to_status_id'] = in_reply_to_status_id
if latitude is not None and longitude is not None:
data['lat'] = str(latitude)
data['long'] = str(longitude)
if place_id is not None:
data['place_id'] = str(place_id)
if display_coordinates:
data['display_coordinates'] = 'true'
if trim_user:
data['trim_user'] = 'true'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def PostUpdates(self, status, continuation = None, **kwargs):
'''Post one or more twitter status messages from the authenticated user.
Unlike api.PostUpdate, this method will post multiple status updates
if the message is longer than 140 characters.
The twitter.Api instance must be authenticated.
Args:
status:
The message text to be posted.
May be longer than 140 characters.
continuation:
The character string, if any, to be appended to all but the
last message. Note that Twitter strips trailing '...' strings
from messages. Consider using the unicode \u2026 character
(horizontal ellipsis) instead. [Defaults to None]
**kwargs:
See api.PostUpdate for a list of accepted parameters.
Returns:
A of list twitter.Status instance representing the messages posted.
'''
results = list()
if continuation is None:
continuation = ''
line_length = CHARACTER_LIMIT - len(continuation)
lines = textwrap.wrap(status, line_length)
for line in lines[0:-1]:
results.append(self.PostUpdate(line + continuation, **kwargs))
results.append(self.PostUpdate(lines[-1], **kwargs))
return results
def PostRetweet(self, original_id, trim_user = False):
'''Retweet a tweet with the Retweet API.
The twitter.Api instance must be authenticated.
Args:
original_id:
The numerical id of the tweet that will be retweeted
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A twitter.Status instance representing the original tweet with retweet details embedded.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
try:
if int(original_id) <= 0:
raise TwitterError("'original_id' must be a positive number")
except ValueError:
raise TwitterError("'original_id' must be an integer")
url = '%s/statuses/retweet/%s.json' % (self.base_url, original_id)
data = {'id': original_id}
if trim_user:
data['trim_user'] = 'true'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetUserRetweets(self, count = None, since_id = None, max_id = None, trim_user = False):
'''Fetch the sequence of retweets made by the authenticated user.
The twitter.Api instance must be authenticated.
Args:
count:
The number of status messages to retrieve. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each message up to count
'''
return self.GetUserTimeline(since_id = since_id, count = count, max_id = max_id, trim_user = trim_user, exclude_replies = True, include_rts = True)
def GetReplies(self, since_id = None, count = None, max_id = None, trim_user = False):
'''Get a sequence of status messages representing the 20 most
recent replies (status updates prefixed with @twitterID) to the
authenticating user.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A sequence of twitter.Status instances, one for each reply to the user.
'''
return self.GetUserTimeline(since_id = since_id, count = count, max_id = max_id, trim_user = trim_user, exclude_replies = False, include_rts = False)
def GetRetweets(self, statusid, count = None, trim_user = False):
'''Returns up to 100 of the first retweets of the tweet identified
by statusid
Args:
statusid:
The ID of the tweet for which retweets should be searched for
count:
The number of status messages to retrieve. [Optional]
trim_user:
If True the returned payload will only contain the user IDs,
otherwise the payload will contain the full user data item.
[Optional]
Returns:
A list of twitter.Status instances, which are retweets of statusid
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instsance must be authenticated.")
url = '%s/statuses/retweets/%s.json' % (self.base_url, statusid)
parameters = {}
if trim_user:
parameters['trim_user'] = 'true'
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetRetweetsOfMe(self,
count = None,
since_id = None,
max_id = None,
trim_user = False,
include_entities = True,
include_user_entities = True):
'''Returns up to 100 of the most recent tweets of the user that have been
retweeted by others.
Args:
count:
The number of retweets to retrieve, up to 100. If omitted, 20 is
assumed.
since_id:
Returns results with an ID greater than (newer than) this ID.
max_id:
Returns results with an ID less than or equal to this ID.
trim_user:
When True, the user object for each tweet will only be an ID.
include_entities:
When True, the tweet entities will be included.
include_user_entities:
When True, the user entities will be included.
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/statuses/retweets_of_me.json' % self.base_url
parameters = {}
if count is not None:
try:
if int(count) > 100:
raise TwitterError("'count' may not be greater than 100")
except ValueError:
raise TwitterError("'count' must be an integer")
if count:
parameters['count'] = count
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if trim_user:
parameters['trim_user'] = trim_user
if not include_entities:
parameters['include_entities'] = include_entities
if not include_user_entities:
parameters['include_user_entities'] = include_user_entities
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(s) for s in data]
def GetFriends(self, user_id = None, screen_name = None, cursor = -1, skip_status = False, include_user_entities = False):
'''Fetch the sequence of twitter.User instances, one for each friend.
The twitter.Api instance must be authenticated.
Args:
user_id:
The twitter id of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose friends you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns [Optional(ish)]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included.
Returns:
A sequence of twitter.User instances, one for each friend
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/friends/list.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFriendIDs(self, user_id = None, screen_name = None, cursor = -1, stringify_ids = False, count = None):
'''Returns a list of twitter user id's for every person
the specified user is following.
Args:
user_id:
The id of the user to retrieve the id list for
[Optional]
screen_name:
The screen_name of the user to retrieve the id list for
[Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits.
[Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of status messages to retrieve. [Optional]
Returns:
A list of integers, one for each user id.
'''
url = '%s/friends/ids.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if stringify_ids:
parameters['stringify_ids'] = True
if count is not None:
parameters['count'] = count
result = []
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def GetFollowerIDs(self, user_id = None, screen_name = None, cursor = -1, stringify_ids = False, count = None, total_count = None):
'''Returns a list of twitter user id's for every person
that is following the specified user.
Args:
user_id:
The id of the user to retrieve the id list for
[Optional]
screen_name:
The screen_name of the user to retrieve the id list for
[Optional]
cursor:
Specifies the Twitter API Cursor location to start at.
Note: there are pagination limits.
[Optional]
stringify_ids:
if True then twitter will return the ids as strings instead of integers.
[Optional]
count:
The number of user id's to retrieve per API request. Please be aware that
this might get you rate-limited if set to a small number. By default Twitter
will retrieve 5000 UIDs per call.
[Optional]
total_count:
The total amount of UIDs to retrieve. Good if the account has many followers
and you don't want to get rate limited. The data returned might contain more
UIDs if total_count is not a multiple of count (5000 by default).
[Optional]
Returns:
A list of integers, one for each user id.
'''
url = '%s/followers/ids.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if stringify_ids:
parameters['stringify_ids'] = True
if count is not None:
parameters['count'] = count
result = []
while True:
if total_count and total_count < count:
parameters['count'] = total_count
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
result += [x for x in data['ids']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
total_count -= len(data['ids'])
if total_count < 1:
break
else:
break
return result
def GetFollowers(self, user_id = None, screen_name = None, cursor = -1, skip_status = False, include_user_entities = False):
'''Fetch the sequence of twitter.User instances, one for each follower
The twitter.Api instance must be authenticated.
Args:
user_id:
The twitter id of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
screen_name:
The twitter name of the user whose followers you are fetching.
If not specified, defaults to the authenticated user. [Optional]
cursor:
Should be set to -1 for the initial call and then is used to
control what result page Twitter returns [Optional(ish)]
skip_status:
If True the statuses will not be returned in the user items.
[Optional]
include_user_entities:
When True, the user entities will be included.
Returns:
A sequence of twitter.User instances, one for each follower
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/followers/list.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
parameters['user_id'] = user_id
if screen_name is not None:
parameters['screen_name'] = screen_name
if skip_status:
parameters['skip_status'] = True
if include_user_entities:
parameters['include_user_entities'] = True
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
result += [User.NewFromJsonDict(x) for x in data['users']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def UsersLookup(self, user_id = None, screen_name = None, users = None, include_entities = True):
'''Fetch extended information for the specified users.
Users may be specified either as lists of either user_ids,
screen_names, or twitter.User objects. The list of users that
are queried is the union of all specified parameters.
The twitter.Api instance must be authenticated.
Args:
user_id:
A list of user_ids to retrieve extended information.
[Optional]
screen_name:
A list of screen_names to retrieve extended information.
[Optional]
users:
A list of twitter.User objects to retrieve extended information.
[Optional]
include_entities:
The entities node that may appear within embedded statuses will be
disincluded when set to False.
[Optional]
Returns:
A list of twitter.User objects for the requested users
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if not user_id and not screen_name and not users:
raise TwitterError("Specify at least one of user_id, screen_name, or users.")
url = '%s/users/lookup.json' % self.base_url
parameters = {}
uids = list()
if user_id:
uids.extend(user_id)
if users:
uids.extend([u.id for u in users])
if len(uids):
parameters['user_id'] = ','.join(["%s" % u for u in uids])
if screen_name:
parameters['screen_name'] = ','.join(screen_name)
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters = parameters)
try:
data = self._ParseAndCheckTwitter(json)
except TwitterError, e:
_, e, _ = sys.exc_info()
t = e.args[0]
if len(t) == 1 and ('code' in t[0]) and (t[0]['code'] == 34):
data = []
else:
raise
return [User.NewFromJsonDict(u) for u in data]
def GetUser(self, user_id = None, screen_name = None, include_entities = True):
'''Returns a single user.
The twitter.Api instance must be authenticated.
Args:
user_id:
The id of the user to retrieve.
[Optional]
screen_name:
The screen name of the user for whom to return results for. Either a
user_id or screen_name is required for this method.
[Optional]
include_entities:
if set to False, the 'entities' node will not be included.
[Optional]
Returns:
A twitter.User instance representing that user
'''
url = '%s/users/show.json' % (self.base_url)
parameters = {}
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def GetDirectMessages(self, since_id = None, max_id = None, count = None, include_entities = True, skip_status = False):
'''Returns a list of the direct messages sent to the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
include_entities:
The entities node will not be included when set to False.
[Optional]
skip_status:
When set to True statuses will not be included in the returned user
objects. [Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if not include_entities:
parameters['include_entities'] = 'false'
if skip_status:
parameters['skip_status'] = 1
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def GetSentDirectMessages(self, since_id = None, max_id = None, count = None, page = None, include_entities = True):
'''Returns a list of the direct messages sent by the authenticating user.
The twitter.Api instance must be authenticated.
Args:
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occured since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns results with an ID less than (that is, older than) or
equal to the specified ID. [Optional]
count:
Specifies the number of direct messages to try and retrieve, up to a
maximum of 200. The value of count is best thought of as a limit to the
number of Tweets to return because suspended or deleted content is
removed after the count has been applied. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
include_entities:
The entities node will not be included when set to False.
[Optional]
Returns:
A sequence of twitter.DirectMessage instances
'''
url = '%s/direct_messages/sent.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if since_id:
parameters['since_id'] = since_id
if page:
parameters['page'] = page
if max_id:
parameters['max_id'] = max_id
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [DirectMessage.NewFromJsonDict(x) for x in data]
def PostDirectMessage(self, text, user_id = None, screen_name = None):
'''Post a twitter direct message from the authenticated user
The twitter.Api instance must be authenticated. user_id or screen_name
must be specified.
Args:
text: The message text to be posted. Must be less than 140 characters.
user_id:
The ID of the user who should receive the direct message.
[Optional]
screen_name:
The screen name of the user who should receive the direct message.
[Optional]
Returns:
A twitter.DirectMessage instance representing the message posted
'''
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
url = '%s/direct_messages/new.json' % self.base_url
data = {'text': text}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def DestroyDirectMessage(self, id, include_entities = True):
'''Destroys the direct message specified in the required ID parameter.
The twitter.Api instance must be authenticated, and the
authenticating user must be the recipient of the specified direct
message.
Args:
id: The id of the direct message to be destroyed
Returns:
A twitter.DirectMessage instance representing the message destroyed
'''
url = '%s/direct_messages/destroy.json' % self.base_url
data = {'id': id}
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return DirectMessage.NewFromJsonDict(data)
def CreateFriendship(self, user_id = None, screen_name = None, follow = True):
'''Befriends the user specified by the user_id or screen_name.
The twitter.Api instance must be authenticated.
Args:
user_id:
A user_id to follow [Optional]
screen_name:
A screen_name to follow [Optional]
follow:
Set to False to disable notifications for the target user
Returns:
A twitter.User instance representing the befriended user.
'''
url = '%s/friendships/create.json' % (self.base_url)
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
if follow:
data['follow'] = 'true'
else:
data['follow'] = 'false'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def DestroyFriendship(self, user_id = None, screen_name = None):
'''Discontinues friendship with a user_id or screen_name.
The twitter.Api instance must be authenticated.
Args:
user_id:
A user_id to unfollow [Optional]
screen_name:
A screen_name to unfollow [Optional]
Returns:
A twitter.User instance representing the discontinued friend.
'''
url = '%s/friendships/destroy.json' % self.base_url
data = {}
if user_id:
data['user_id'] = user_id
elif screen_name:
data['screen_name'] = screen_name
else:
raise TwitterError("Specify at least one of user_id or screen_name.")
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def CreateFavorite(self, status = None, id = None, include_entities = True):
'''Favorites the specified status object or id as the authenticating user.
Returns the favorite status when successful.
The twitter.Api instance must be authenticated.
Args:
id:
The id of the twitter status to mark as a favorite.
[Optional]
status:
The twitter.Status object to mark as a favorite.
[Optional]
include_entities:
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-marked favorite.
'''
url = '%s/favorites/create.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError("Specify id or status")
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def DestroyFavorite(self, status = None, id = None, include_entities = True):
'''Un-Favorites the specified status object or id as the authenticating user.
Returns the un-favorited status when successful.
The twitter.Api instance must be authenticated.
Args:
id:
The id of the twitter status to unmark as a favorite.
[Optional]
status:
The twitter.Status object to unmark as a favorite.
[Optional]
include_entities:
The entities node will be omitted when set to False.
Returns:
A twitter.Status instance representing the newly-unmarked favorite.
'''
url = '%s/favorites/destroy.json' % self.base_url
data = {}
if id:
data['id'] = id
elif status:
data['id'] = status.id
else:
raise TwitterError("Specify id or status")
if not include_entities:
data['include_entities'] = 'false'
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return Status.NewFromJsonDict(data)
def GetFavorites(self,
user_id = None,
screen_name = None,
count = None,
since_id = None,
max_id = None,
include_entities = True):
'''Return a list of Status objects representing favorited tweets.
By default, returns the (up to) 20 most recent tweets for the
authenticated user.
Args:
user:
The twitter name or id of the user whose favorites you are fetching.
If not specified, defaults to the authenticated user. [Optional]
page:
Specifies the page of results to retrieve.
Note: there are pagination limits. [Optional]
'''
parameters = {}
url = '%s/favorites/list.json' % self.base_url
if user_id:
parameters['user_id'] = user_id
elif screen_name:
parameters['screen_name'] = user_id
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if include_entities:
parameters['include_entities'] = True
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def GetMentions(self,
count = None,
since_id = None,
max_id = None,
trim_user = False,
contributor_details = False,
include_entities = True):
'''Returns the 20 most recent mentions (status containing @screen_name)
for the authenticating user.
Args:
count:
Specifies the number of tweets to try and retrieve, up to a maximum of
200. The value of count is best thought of as a limit to the number of
tweets to return because suspended or deleted content is removed after
the count has been applied. [Optional]
since_id:
Returns results with an ID greater than (that is, more recent
than) the specified ID. There are limits to the number of
Tweets which can be accessed through the API. If the limit of
Tweets has occurred since the since_id, the since_id will be
forced to the oldest ID available. [Optional]
max_id:
Returns only statuses with an ID less than
(that is, older than) the specified ID. [Optional]
trim_user:
When set to True, each tweet returned in a timeline will include a user
object including only the status authors numerical ID. Omit this
parameter to receive the complete user object.
contributor_details:
If set to True, this parameter enhances the contributors element of the
status response to include the screen_name of the contributor. By
default only the user_id of the contributor is included.
include_entities:
The entities node will be disincluded when set to False.
Returns:
A sequence of twitter.Status instances, one for each mention of the user.
'''
url = '%s/statuses/mentions_timeline.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {}
if count:
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if since_id:
try:
parameters['since_id'] = long(since_id)
except:
raise TwitterError("since_id must be an integer")
if max_id:
try:
parameters['max_id'] = long(max_id)
except:
raise TwitterError("max_id must be an integer")
if trim_user:
parameters['trim_user'] = 1
if contributor_details:
parameters['contributor_details'] = 'true'
if not include_entities:
parameters['include_entities'] = 'false'
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [Status.NewFromJsonDict(x) for x in data]
def CreateList(self, name, mode = None, description = None):
'''Creates a new list with the give name for the authenticated user.
The twitter.Api instance must be authenticated.
Args:
name:
New name for the list
mode:
'public' or 'private'.
Defaults to 'public'. [Optional]
description:
Description of the list. [Optional]
Returns:
A twitter.List instance representing the new list
'''
url = '%s/lists/create.json' % self.base_url
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
parameters = {'name': name}
if mode is not None:
parameters['mode'] = mode
if description is not None:
parameters['description'] = description
json = self._FetchUrl(url, post_data = parameters)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroyList(self,
owner_screen_name = False,
owner_id = False,
list_id = None,
slug = None):
'''
Destroys the list identified by list_id or owner_screen_name/owner_id and
slug.
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/lists/destroy.json' % self.base_url
data = {}
if list_id:
try:
data['list_id'] = long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def CreateSubscription(self,
owner_screen_name = False,
owner_id = False,
list_id = None,
slug = None):
'''Creates a subscription to a list by the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the list subscribed to
'''
url = '%s/lists/subscribers/create.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
data = {}
if list_id:
try:
data['list_id'] = long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def DestroySubscription(self,
owner_screen_name = False,
owner_id = False,
list_id = None,
slug = None):
'''Destroys the subscription to a list for the authenticated user
The twitter.Api instance must be authenticated.
Args:
owner_screen_name:
The screen_name of the user who owns the list being requested by a slug.
owner_id:
The user ID of the user who owns the list being requested by a slug.
list_id:
The numerical id of the list.
slug:
You can identify a list by its slug instead of its numerical id. If you
decide to do so, note that you'll also have to specify the list owner
using the owner_id or owner_screen_name parameters.
Returns:
A twitter.List instance representing the removed list.
'''
url = '%s/lists/subscribers/destroy.json' % (self.base_url)
if not self._oauth_consumer:
raise TwitterError("The twitter.Api instance must be authenticated.")
data = {}
if list_id:
try:
data['list_id'] = long(list_id)
except:
raise TwitterError("list_id must be an integer")
elif slug:
data['slug'] = slug
if owner_id:
try:
data['owner_id'] = long(owner_id)
except:
raise TwitterError("owner_id must be an integer")
elif owner_screen_name:
data['owner_screen_name'] = owner_screen_name
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
else:
raise TwitterError("Identify list by list_id or owner_screen_name/owner_id and slug")
json = self._FetchUrl(url, post_data = data)
data = self._ParseAndCheckTwitter(json)
return List.NewFromJsonDict(data)
def GetSubscriptions(self, user_id = None, screen_name = None, count = 20, cursor = -1):
'''
Obtain a collection of the lists the specified user is subscribed to, 20
lists per page by default. Does not include the user's own lists.
The twitter.Api instance must be authenticated.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results for.
[Optional]
count:
The amount of results to return per page. Defaults to 20.
No more than 1000 results will ever be returned in a single page.
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/lists/subscriptions.json' % (self.base_url)
parameters = {}
try:
parameters['cursor'] = int(cursor)
except:
raise TwitterError("cursor must be an integer")
try:
parameters['count'] = int(count)
except:
raise TwitterError("count must be an integer")
if user_id is not None:
try:
parameters['user_id'] = long(user_id)
except:
raise TwitterError('user_id must be an integer')
elif screen_name is not None:
parameters['screen_name'] = screen_name
else:
raise TwitterError('Specify user_id or screen_name')
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
return [List.NewFromJsonDict(x) for x in data['lists']]
def GetLists(self, user_id = None, screen_name = None, count = None, cursor = -1):
'''Fetch the sequence of lists for a user.
The twitter.Api instance must be authenticated.
Args:
user_id:
The ID of the user for whom to return results for. [Optional]
screen_name:
The screen name of the user for whom to return results for.
[Optional]
count:
The amount of results to return per page. Defaults to 20. No more than
1000 results will ever be returned in a single page.
[Optional]
cursor:
"page" value that Twitter will use to start building the
list sequence from. -1 to start at the beginning.
Twitter will return in the result the values for next_cursor
and previous_cursor. [Optional]
Returns:
A sequence of twitter.List instances, one for each list
'''
if not self._oauth_consumer:
raise TwitterError("twitter.Api instance must be authenticated")
url = '%s/lists/ownerships.json' % self.base_url
result = []
parameters = {}
if user_id is not None:
try:
parameters['user_id'] = long(user_id)
except:
raise TwitterError('user_id must be an integer')
elif screen_name is not None:
parameters['screen_name'] = screen_name
else:
raise TwitterError('Specify user_id or screen_name')
if count is not None:
parameters['count'] = count
while True:
parameters['cursor'] = cursor
json = self._FetchUrl(url, parameters = parameters)
data = self._ParseAndCheckTwitter(json)
result += [List.NewFromJsonDict(x) for x in data['lists']]
if 'next_cursor' in data:
if data['next_cursor'] == 0 or data['next_cursor'] == data['previous_cursor']:
break
else:
cursor = data['next_cursor']
else:
break
return result
def VerifyCredentials(self):
'''Returns a twitter.User instance if the authenticating user is valid.
Returns:
A twitter.User instance representing that user if the
credentials are valid, None otherwise.
'''
if not self._oauth_consumer:
raise TwitterError("Api instance must first be given user credentials.")
url = '%s/account/verify_credentials.json' % self.base_url
try:
json = self._FetchUrl(url, no_cache = True)
except urllib2.HTTPError, http_error:
if http_error.code == httplib.UNAUTHORIZED:
return None
else:
raise http_error
data = self._ParseAndCheckTwitter(json)
return User.NewFromJsonDict(data)
def SetCache(self, cache):
'''Override the default cache. Set to None to prevent caching.
Args:
cache:
An instance that supports the same API as the twitter._FileCache
'''
if cache == DEFAULT_CACHE:
self._cache = _FileCache()
else:
self._cache = cache
def SetUrllib(self, urllib):
'''Override the default urllib implementation.
Args:
urllib:
An instance that supports the same API as the urllib2 module
'''
self._urllib = urllib
def SetCacheTimeout(self, cache_timeout):
'''Override the default cache timeout.
Args:
cache_timeout:
Time, in seconds, that responses should be reused.
'''
self._cache_timeout = cache_timeout
def SetUserAgent(self, user_agent):
'''Override the default user agent
Args:
user_agent:
A string that should be send to the server as the User-agent
'''
self._request_headers['User-Agent'] = user_agent
def SetXTwitterHeaders(self, client, url, version):
'''Set the X-Twitter HTTP headers that will be sent to the server.
Args:
client:
The client name as a string. Will be sent to the server as
the 'X-Twitter-Client' header.
url:
The URL of the meta.xml as a string. Will be sent to the server
as the 'X-Twitter-Client-URL' header.
version:
The client version as a string. Will be sent to the server
as the 'X-Twitter-Client-Version' header.
'''
self._request_headers['X-Twitter-Client'] = client
self._request_headers['X-Twitter-Client-URL'] = url
self._request_headers['X-Twitter-Client-Version'] = version
def SetSource(self, source):
'''Suggest the "from source" value to be displayed on the Twitter web site.
The value of the 'source' parameter must be first recognized by
the Twitter server. New source values are authorized on a case by
case basis by the Twitter development team.
Args:
source:
The source name as a string. Will be sent to the server as
the 'source' parameter.
'''
self._default_params['source'] = source
def GetRateLimitStatus(self, resources = None):
'''Fetch the rate limit status for the currently authorized user.
Args:
resources:
A comma seperated list of resource families you want to know the current
rate limit disposition of.
[Optional]
Returns:
A dictionary containing the time the limit will reset (reset_time),
the number of remaining hits allowed before the reset (remaining_hits),
the number of hits allowed in a 60-minute period (hourly_limit), and
the time of the reset in seconds since The Epoch (reset_time_in_seconds).
'''
parameters = {}
if resources is not None:
parameters['resources'] = resources
url = '%s/application/rate_limit_status.json' % self.base_url
json = self._FetchUrl(url, parameters = parameters, no_cache = True)
data = self._ParseAndCheckTwitter(json)
return data
def MaximumHitFrequency(self):
'''Determines the minimum number of seconds that a program must wait
before hitting the server again without exceeding the rate_limit
imposed for the currently authenticated user.
Returns:
The minimum second interval that a program must use so as to not
exceed the rate_limit imposed for the user.
'''
rate_status = self.GetRateLimitStatus()
reset_time = rate_status.get('reset_time', None)
limit = rate_status.get('remaining_hits', None)
if reset_time:
# put the reset time into a datetime object
reset = datetime.datetime(*rfc822.parsedate(reset_time)[:7])
# find the difference in time between now and the reset time + 1 hour
delta = reset + datetime.timedelta(hours = 1) - datetime.datetime.utcnow()
if not limit:
return int(delta.seconds)
# determine the minimum number of seconds allowed as a regular interval
max_frequency = int(delta.seconds / limit) + 1
# return the number of seconds
return max_frequency
return 60
def _BuildUrl(self, url, path_elements = None, extra_params = None):
# Break url into constituent parts
(scheme, netloc, path, params, query, fragment) = urlparse.urlparse(url)
# Add any additional path elements to the path
if path_elements:
# Filter out the path elements that have a value of None
p = [i for i in path_elements if i]
if not path.endswith('/'):
path += '/'
path += '/'.join(p)
# Add any additional query parameters to the query string
if extra_params and len(extra_params) > 0:
extra_query = self._EncodeParameters(extra_params)
# Add it to the existing query
if query:
query += '&' + extra_query
else:
query = extra_query
# Return the rebuilt URL
return urlparse.urlunparse((scheme, netloc, path, params, query, fragment))
def _InitializeRequestHeaders(self, request_headers):
if request_headers:
self._request_headers = request_headers
else:
self._request_headers = {}
def _InitializeUserAgent(self):
user_agent = 'Python-urllib/%s (python-twitter/%s)' % \
(self._urllib.__version__, __version__)
self.SetUserAgent(user_agent)
def _InitializeDefaultParameters(self):
self._default_params = {}
def _DecompressGzippedResponse(self, response):
raw_data = response.read()
if response.headers.get('content-encoding', None) == 'gzip':
url_data = gzip.GzipFile(fileobj = StringIO.StringIO(raw_data)).read()
else:
url_data = raw_data
return url_data
def _Encode(self, s):
if self._input_encoding:
return unicode(s, self._input_encoding).encode('utf-8')
else:
return unicode(s).encode('utf-8')
def _EncodeParameters(self, parameters):
'''Return a string in key=value&key=value form
Values of None are not included in the output string.
Args:
parameters:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if parameters is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in parameters.items() if v is not None]))
def _EncodePostData(self, post_data):
'''Return a string in key=value&key=value form
Values are assumed to be encoded in the format specified by self._encoding,
and are subsequently URL encoded.
Args:
post_data:
A dict of (key, value) tuples, where value is encoded as
specified by self._encoding
Returns:
A URL-encoded string in "key=value&key=value" form
'''
if post_data is None:
return None
else:
return urllib.urlencode(dict([(k, self._Encode(v)) for k, v in post_data.items()]))
def _ParseAndCheckTwitter(self, json):
"""Try and parse the JSON returned from Twitter and return
an empty dictionary if there is any error. This is a purely
defensive check because during some Twitter network outages
it will return an HTML failwhale page."""
try:
data = simplejson.loads(json)
self._CheckForTwitterError(data)
except ValueError:
if "<title>Twitter / Over capacity</title>" in json:
raise TwitterError("Capacity Error")
if "<title>Twitter / Error</title>" in json:
raise TwitterError("Technical Error")
raise TwitterError("json decoding")
return data
def _CheckForTwitterError(self, data):
"""Raises a TwitterError if twitter returns an error message.
Args:
data:
A python dict created from the Twitter json response
Raises:
TwitterError wrapping the twitter error message if one exists.
"""
# Twitter errors are relatively unlikely, so it is faster
# to check first, rather than try and catch the exception
if 'error' in data:
raise TwitterError(data['error'])
if 'errors' in data:
raise TwitterError(data['errors'])
def _FetchUrl(self,
url,
post_data = None,
parameters = None,
no_cache = None,
use_gzip_compression = None):
'''Fetch a URL, optionally caching for a specified time.
Args:
url:
The URL to retrieve
post_data:
A dict of (str, unicode) key/value pairs.
If set, POST will be used.
parameters:
A dict whose key/value pairs should encoded and added
to the query string. [Optional]
no_cache:
If true, overrides the cache on the current request
use_gzip_compression:
If True, tells the server to gzip-compress the response.
It does not apply to POST requests.
Defaults to None, which will get the value to use from
the instance variable self._use_gzip [Optional]
Returns:
A string containing the body of the response.
'''
# Build the extra parameters dict
extra_params = {}
if self._default_params:
extra_params.update(self._default_params)
if parameters:
extra_params.update(parameters)
if post_data:
http_method = "POST"
else:
http_method = "GET"
if self._debugHTTP:
_debug = 1
else:
_debug = 0
http_handler = self._urllib.HTTPHandler(debuglevel = _debug)
https_handler = self._urllib.HTTPSHandler(debuglevel = _debug)
http_proxy = os.environ.get('http_proxy')
https_proxy = os.environ.get('https_proxy')
if http_proxy is None or https_proxy is None :
proxy_status = False
else :
proxy_status = True
opener = self._urllib.OpenerDirector()
opener.add_handler(http_handler)
opener.add_handler(https_handler)
if proxy_status is True :
proxy_handler = self._urllib.ProxyHandler({'http':str(http_proxy), 'https': str(https_proxy)})
opener.add_handler(proxy_handler)
if use_gzip_compression is None:
use_gzip = self._use_gzip
else:
use_gzip = use_gzip_compression
# Set up compression
if use_gzip and not post_data:
opener.addheaders.append(('Accept-Encoding', 'gzip'))
if self._oauth_consumer is not None:
if post_data and http_method == "POST":
parameters = post_data.copy()
req = oauth.Request.from_consumer_and_token(self._oauth_consumer,
token = self._oauth_token,
http_method = http_method,
http_url = url, parameters = parameters)
req.sign_request(self._signature_method_hmac_sha1, self._oauth_consumer, self._oauth_token)
headers = req.to_header()
if http_method == "POST":
encoded_post_data = req.to_postdata()
else:
encoded_post_data = None
url = req.to_url()
else:
url = self._BuildUrl(url, extra_params = extra_params)
encoded_post_data = self._EncodePostData(post_data)
# Open and return the URL immediately if we're not going to cache
if encoded_post_data or no_cache or not self._cache or not self._cache_timeout:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
opener.close()
else:
# Unique keys are a combination of the url and the oAuth Consumer Key
if self._consumer_key:
key = self._consumer_key + ':' + url
else:
key = url
# See if it has been cached before
last_cached = self._cache.GetCachedTime(key)
# If the cached version is outdated then fetch another and store it
if not last_cached or time.time() >= last_cached + self._cache_timeout:
try:
response = opener.open(url, encoded_post_data)
url_data = self._DecompressGzippedResponse(response)
self._cache.Set(key, url_data)
except urllib2.HTTPError, e:
print e
opener.close()
else:
url_data = self._cache.Get(key)
# Always return the latest version
return url_data
class _FileCacheError(Exception):
'''Base exception class for FileCache related errors'''
class _FileCache(object):
DEPTH = 3
def __init__(self, root_directory = None):
self._InitializeRootDirectory(root_directory)
def Get(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return open(path).read()
else:
return None
def Set(self, key, data):
path = self._GetPath(key)
directory = os.path.dirname(path)
if not os.path.exists(directory):
os.makedirs(directory)
if not os.path.isdir(directory):
raise _FileCacheError('%s exists but is not a directory' % directory)
temp_fd, temp_path = tempfile.mkstemp()
temp_fp = os.fdopen(temp_fd, 'w')
temp_fp.write(data)
temp_fp.close()
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
os.rename(temp_path, path)
def Remove(self, key):
path = self._GetPath(key)
if not path.startswith(self._root_directory):
raise _FileCacheError('%s does not appear to live under %s' %
(path, self._root_directory))
if os.path.exists(path):
os.remove(path)
def GetCachedTime(self, key):
path = self._GetPath(key)
if os.path.exists(path):
return os.path.getmtime(path)
else:
return None
def _GetUsername(self):
'''Attempt to find the username in a cross-platform fashion.'''
try:
return os.getenv('USER') or \
os.getenv('LOGNAME') or \
os.getenv('USERNAME') or \
os.getlogin() or \
'nobody'
except (AttributeError, IOError, OSError), e:
return 'nobody'
def _GetTmpCachePath(self):
username = self._GetUsername()
cache_directory = 'python.cache_' + username
return os.path.join(tempfile.gettempdir(), cache_directory)
def _InitializeRootDirectory(self, root_directory):
if not root_directory:
root_directory = self._GetTmpCachePath()
root_directory = os.path.abspath(root_directory)
if not os.path.exists(root_directory):
os.mkdir(root_directory)
if not os.path.isdir(root_directory):
raise _FileCacheError('%s exists but is not a directory' %
root_directory)
self._root_directory = root_directory
def _GetPath(self, key):
try:
hashed_key = md5(key).hexdigest()
except TypeError:
hashed_key = md5.new(key).hexdigest()
return os.path.join(self._root_directory,
self._GetPrefix(hashed_key),
hashed_key)
def _GetPrefix(self, hashed_key):
return os.path.sep.join(hashed_key[0:_FileCache.DEPTH])
| 175,429
|
Python
|
.py
| 3,901
| 31.144322
| 163
| 0.55575
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,370
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/importhelper/__init__.py
|
"""Backport of importlib.import_module from 3.x."""
# While not critical (and in no way guaranteed!), it would be nice to keep this
# code compatible with Python 2.3.
import sys
def _resolve_name(name, package, level):
"""Return the absolute name of the module to be imported."""
if not hasattr(package, 'rindex'):
raise ValueError("'package' not set to a string")
dot = len(package)
for x in xrange(level, 1, -1):
try:
dot = package.rindex('.', 0, dot)
except ValueError:
raise ValueError("attempted relative import beyond top-level "
"package")
return "%s.%s" % (package[:dot], name)
def import_module(name, package=None):
"""Import a module.
The 'package' argument is required when performing a relative import. It
specifies the package to use as the anchor point from which to resolve the
relative import to an absolute import.
"""
if name.startswith('.'):
if not package:
raise TypeError("relative imports require the 'package' argument")
level = 0
for character in name:
if character != '.':
break
level += 1
name = _resolve_name(name[level:], package, level)
__import__(name)
return sys.modules[name]
| 1,327
|
Python
|
.py
| 33
| 32.575758
| 79
| 0.626843
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,371
|
iputil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/iputil.py
|
# from the Python Standard Library
import os, re, socket, sys, subprocess
# from Twisted
from twisted.internet import defer, threads, reactor
from twisted.internet.protocol import DatagramProtocol
from twisted.python.procutils import which
from twisted.python import log
try:
import resource
def increase_rlimits():
# We'd like to raise our soft resource.RLIMIT_NOFILE, since certain
# systems (OS-X, probably solaris) start with a relatively low limit
# (256), and some unit tests want to open up more sockets than this.
# Most linux systems start with both hard and soft limits at 1024,
# which is plenty.
# unfortunately the values to pass to setrlimit() vary widely from
# one system to another. OS-X reports (256, HUGE), but the real hard
# limit is 10240, and accepts (-1,-1) to mean raise it to the
# maximum. Cygwin reports (256, -1), then ignores a request of
# (-1,-1): instead you have to guess at the hard limit (it appears to
# be 3200), so using (3200,-1) seems to work. Linux reports a
# sensible (1024,1024), then rejects (-1,-1) as trying to raise the
# maximum limit, so you could set it to (1024,1024) but you might as
# well leave it alone.
try:
current = resource.getrlimit(resource.RLIMIT_NOFILE)
except AttributeError:
# we're probably missing RLIMIT_NOFILE
return
if current[0] >= 1024:
# good enough, leave it alone
return
try:
if current[1] > 0 and current[1] < 1000000:
# solaris reports (256, 65536)
resource.setrlimit(resource.RLIMIT_NOFILE,
(current[1], current[1]))
else:
# this one works on OS-X (bsd), and gives us 10240, but
# it doesn't work on linux (on which both the hard and
# soft limits are set to 1024 by default).
resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1))
new = resource.getrlimit(resource.RLIMIT_NOFILE)
if new[0] == current[0]:
# probably cygwin, which ignores -1. Use a real value.
resource.setrlimit(resource.RLIMIT_NOFILE, (3200,-1))
except ValueError:
log.msg("unable to set RLIMIT_NOFILE: current value %s"
% (resource.getrlimit(resource.RLIMIT_NOFILE),))
except:
# who knows what. It isn't very important, so log it and continue
log.err()
except ImportError:
def _increase_rlimits():
# TODO: implement this for Windows. Although I suspect the
# solution might be "be running under the iocp reactor and
# make this function be a no-op".
pass
# pyflakes complains about two 'def FOO' statements in the same time,
# since one might be shadowing the other. This hack appeases pyflakes.
increase_rlimits = _increase_rlimits
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
"""
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
strings) that are currently configured on this host, sorted in descending
order of how likely we think they are to work.
@param target: we want to learn an IP address they could try using to
connect to us; The default value is fine, but it might help if you
pass the address of a host that you are actually trying to be
reachable to.
"""
addresses = []
local_ip = get_local_ip_for(target)
if local_ip:
addresses.append(local_ip)
if sys.platform == "cygwin":
d = _cygwin_hack_find_addresses(target)
else:
d = _find_addresses_via_config()
def _collect(res):
for addr in res:
if addr != "0.0.0.0" and not addr in addresses:
addresses.append(addr)
return addresses
d.addCallback(_collect)
return d
def get_local_ip_for(target):
"""Find out what our IP address is for use by a given target.
@return: the IP address as a dotted-quad string which could be used by
to connect to us. It might work for them, it might not. If
there is no suitable address (perhaps we don't currently have an
externally-visible interface), this will return None.
"""
try:
target_ipaddr = socket.gethostbyname(target)
except socket.gaierror:
# DNS isn't running, or somehow we encountered an error
# note: if an interface is configured and up, but nothing is
# connected to it, gethostbyname("A.ROOT-SERVERS.NET") will take 20
# seconds to raise socket.gaierror . This is synchronous and occurs
# for each node being started, so users of
# test.common.SystemTestMixin (like test_system) will see something
# like 120s of delay, which may be enough to hit the default trial
# timeouts. For that reason, get_local_addresses_async() was changed
# to default to the numerical ip address for A.ROOT-SERVERS.NET, to
# avoid this DNS lookup. This also makes node startup fractionally
# faster.
return None
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
try:
udpprot.transport.connect(target_ipaddr, 7)
localip = udpprot.transport.getHost().host
except socket.error:
# no route to that host
localip = None
port.stopListening() # note, this returns a Deferred
return localip
# k: result of sys.platform, v: which kind of IP configuration reader we use
_platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"linux3": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"openbsd3": "bsd",
"openbsd4": "bsd",
"openbsd5": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"freebsd5": "bsd",
"freebsd6": "bsd",
"freebsd7": "bsd",
"freebsd8": "bsd",
"freebsd9": "bsd",
"netbsd1": "bsd",
"netbsd2": "bsd",
"netbsd3": "bsd",
"netbsd4": "bsd",
"netbsd5": "bsd",
"netbsd6": "bsd",
"dragonfly2": "bsd",
"sunos5": "sunos",
"cygwin": "cygwin",
}
class UnsupportedPlatformError(Exception):
pass
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_path = 'route.exe'
_win32_args = ('print',)
_win32_re = re.compile('^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
# These work in Redhat 6.x and Debian 2.2 potato
_linux_path = '/sbin/ifconfig'
_linux_re = re.compile('^\s*inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# NetBSD 1.4 (submitted by Rhialto), Darwin, Mac OS X
_netbsd_path = '/sbin/ifconfig'
_netbsd_args = ('-a',)
_netbsd_re = re.compile('^\s+inet [a-zA-Z]*:?(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# Irix 6.5
_irix_path = '/usr/etc/ifconfig'
# Solaris 2.x
_sunos_path = '/usr/sbin/ifconfig'
# k: platform string as provided in the value of _platform_map
# v: tuple of (path_to_tool, args, regex,)
_tool_map = {
"linux": (_linux_path, (), _linux_re,),
"win32": (_win32_path, _win32_args, _win32_re,),
"cygwin": (_win32_path, _win32_args, _win32_re,),
"bsd": (_netbsd_path, _netbsd_args, _netbsd_re,),
"irix": (_irix_path, _netbsd_args, _netbsd_re,),
"sunos": (_sunos_path, _netbsd_args, _netbsd_re,),
}
def _find_addresses_via_config():
return threads.deferToThread(_synchronously_find_addresses_via_config)
def _synchronously_find_addresses_via_config():
# originally by Greg Smith, hacked by Zooko to conform to Brian's API
platform = _platform_map.get(sys.platform)
if not platform:
raise UnsupportedPlatformError(sys.platform)
(pathtotool, args, regex,) = _tool_map[platform]
# If pathtotool is a fully qualified path then we just try that.
# If it is merely an executable name then we use Twisted's
# "which()" utility and try each executable in turn until one
# gives us something that resembles a dotted-quad IPv4 address.
if os.path.isabs(pathtotool):
return _query(pathtotool, args, regex)
else:
exes_to_try = which(pathtotool)
for exe in exes_to_try:
try:
addresses = _query(exe, args, regex)
except Exception:
addresses = []
if addresses:
return addresses
return []
def _query(path, args, regex):
env = {'LANG': 'en_US.UTF-8'}
p = subprocess.Popen([path] + list(args), stdout=subprocess.PIPE, stderr=subprocess.PIPE, env=env)
(output, err) = p.communicate()
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.groupdict()['address']
if addr not in addresses:
addresses.append(addr)
return addresses
def _cygwin_hack_find_addresses(target):
addresses = []
for h in [target, "localhost", "127.0.0.1",]:
try:
addr = get_local_ip_for(h)
if addr not in addresses:
addresses.append(addr)
except socket.gaierror:
pass
return defer.succeed(addresses)
| 9,752
|
Python
|
.py
| 226
| 35.747788
| 129
| 0.629255
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,372
|
hashexpand.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/hashexpand.py
|
# Copyright (c) 2002-2012 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
"""
Cryptographically strong pseudo-random number generator based on SHA256.
"""
class SHA256Expander:
"""
Provide a cryptographically strong pseudo-random number generator based on
SHA256. Hopefully this means that no attacker will be able to predict any
bit of output that he hasn't seen, given that he doesn't know anything about
the seed and given that he can see as many bits of output as he desires
except for the bit that he is trying to predict. Further it is hoped that
an attacker will not even be able to determine whether a given stream of
random bytes was generated by this PRNG or by flipping a coin repeatedly.
The safety of this technique has not been verified by a Real Cryptographer.
... but it is similar to the PRNG in FIPS-186...
The seed and counter are encoded in DJB's netstring format so that I
don't have to think about the possibility of ambiguity.
Note: I've since learned more about the theory of secure hash functions
and the above is a strong assumption about a secure hash function. Use
of this class should be considered deprecated and you should use a more
well-analyzed KDF (such as the nascent standard HKDF) or stream cipher or
whatever it is that you need.
"""
def __init__(self, seed=None):
warnings.warn("deprecated", DeprecationWarning)
if seed is not None:
self.seed(seed)
def seed(self, seed):
import hashlib
self.starth = hashlib.sha256('24:pyutil hash expansion v2,10:algorithm:,6:SHA256,6:value:,')
seedlen = len(seed)
seedlenstr = str(seedlen)
self.starth.update(seedlenstr)
self.starth.update(':')
self.starth.update(seed)
self.starth.update(',')
self.avail = ""
self.counter = 0
def get(self, bytes):
bytesleft = bytes
res = []
while bytesleft > 0:
if len(self.avail) == 0:
h = self.starth.copy()
counterstr = str(self.counter)
counterstrlen = len(counterstr)
counterstrlenstr = str(counterstrlen)
h.update(counterstrlenstr)
h.update(':')
h.update(counterstr)
h.update(',')
self.avail = h.digest()
self.counter += 1
numb = min(len(self.avail), bytesleft)
(chunk, self.avail,) = (self.avail[:numb], self.avail[numb:],)
res.append(chunk)
bytesleft = bytesleft - numb
resstr = ''.join(res)
assert len(resstr) == bytes
return resstr
def sha256expand(inpstr, expbytes):
return SHA256Expander(inpstr).get(expbytes)
| 2,890
|
Python
|
.py
| 64
| 36.390625
| 100
| 0.652703
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,373
|
dictutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/dictutil.py
|
"""
Tools to mess with dicts.
"""
import warnings
import copy, operator
from bisect import bisect_left, insort_left
from pyutil.assertutil import _assert, precondition
def move(k, d1, d2, strict=False):
"""
Move item with key k from d1 to d2.
"""
warnings.warn("deprecated", DeprecationWarning)
if strict and not d1.has_key(k):
raise KeyError, k
d2[k] = d1[k]
del d1[k]
def subtract(d1, d2):
"""
Remove all items from d1 whose key occurs in d2.
@returns d1
"""
warnings.warn("deprecated", DeprecationWarning)
if len(d1) > len(d2):
for k in d2.keys():
if d1.has_key(k):
del d1[k]
else:
for k in d1.keys():
if d2.has_key(k):
del d1[k]
return d1
class DictOfSets(dict):
def add(self, key, value):
warnings.warn("deprecated", DeprecationWarning)
if key in self:
self[key].add(value)
else:
self[key] = set([value])
def discard(self, key, value):
warnings.warn("deprecated", DeprecationWarning)
if not key in self:
return
self[key].discard(value)
if not self[key]:
del self[key]
class UtilDict:
def __init__(self, initialdata={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = {}
self.update(initialdata)
def del_if_present(self, key):
if self.has_key(key):
del self[key]
def items_sorted_by_value(self):
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
l = [(x[1], x[0],) for x in self.d.iteritems()]
l.sort()
return [(x[1], x[0],) for x in l]
def items_sorted_by_key(self):
"""
@return a sequence of (key, value,) pairs sorted according to key
"""
l = self.d.items()
l.sort()
return l
def __repr__(self, *args, **kwargs):
return self.d.__repr__(*args, **kwargs)
def __str__(self, *args, **kwargs):
return self.d.__str__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.d.__contains__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.d.__len__(*args, **kwargs)
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
except TypeError, le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
except:
raise le
def __eq__(self, *args, **kwargs):
return self.d.__eq__(*args, **kwargs)
def __ne__(self, *args, **kwargs):
return self.d.__ne__(*args, **kwargs)
def __gt__(self, *args, **kwargs):
return self.d.__gt__(*args, **kwargs)
def __ge__(self, *args, **kwargs):
return self.d.__ge__(*args, **kwargs)
def __le__(self, *args, **kwargs):
return self.d.__le__(*args, **kwargs)
def __lt__(self, *args, **kwargs):
return self.d.__lt__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.d.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
return self.d.__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self.d.__delitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs):
return self.d.__iter__(*args, **kwargs)
def clear(self, *args, **kwargs):
return self.d.clear(*args, **kwargs)
def copy(self, *args, **kwargs):
return self.__class__(self.d.copy(*args, **kwargs))
def fromkeys(self, *args, **kwargs):
return self.__class__(self.d.fromkeys(*args, **kwargs))
def get(self, key, default=None):
return self.d.get(key, default)
def has_key(self, *args, **kwargs):
return self.d.has_key(*args, **kwargs)
def items(self, *args, **kwargs):
return self.d.items(*args, **kwargs)
def iteritems(self, *args, **kwargs):
return self.d.iteritems(*args, **kwargs)
def iterkeys(self, *args, **kwargs):
return self.d.iterkeys(*args, **kwargs)
def itervalues(self, *args, **kwargs):
return self.d.itervalues(*args, **kwargs)
def keys(self, *args, **kwargs):
return self.d.keys(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
return self.d.popitem(*args, **kwargs)
def setdefault(self, *args, **kwargs):
return self.d.setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
self.d.update(*args, **kwargs)
def values(self, *args, **kwargs):
return self.d.values(*args, **kwargs)
class NumDict:
def __init__(self, initialdict={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = copy.deepcopy(initialdict)
def add_num(self, key, val, default=0):
"""
If the key doesn't appear in self then it is created with value default
(before addition).
"""
self.d[key] = self.d.get(key, default) + val
def subtract_num(self, key, val, default=0):
self.d[key] = self.d.get(key, default) - val
def sum(self):
"""
@return: the sum of all values
"""
return reduce(operator.__add__, self.d.values())
def inc(self, key, default=0):
"""
Increment the value associated with key in dict. If there is no such
key, then one will be created with initial value 0 (before inc() --
therefore value 1 after inc).
"""
self.add_num(key, 1, default)
def dec(self, key, default=0):
"""
Decrement the value associated with key in dict. If there is no such
key, then one will be created with initial value 0 (before dec() --
therefore value -1 after dec).
"""
self.subtract_num(key, 1, default)
def items_sorted_by_value(self):
"""
@return a sequence of (key, value,) pairs sorted according to value
"""
l = [(x[1], x[0],) for x in self.d.iteritems()]
l.sort()
return [(x[1], x[0],) for x in l]
def item_with_largest_value(self):
it = self.d.iteritems()
(winner, winnerval,) = it.next()
try:
while True:
n, nv = it.next()
if nv > winnerval:
winner = n
winnerval = nv
except StopIteration:
pass
return (winner, winnerval,)
def items_sorted_by_key(self):
"""
@return a sequence of (key, value,) pairs sorted according to key
"""
l = self.d.items()
l.sort()
return l
def __repr__(self, *args, **kwargs):
return self.d.__repr__(*args, **kwargs)
def __str__(self, *args, **kwargs):
return self.d.__str__(*args, **kwargs)
def __contains__(self, *args, **kwargs):
return self.d.__contains__(*args, **kwargs)
def __len__(self, *args, **kwargs):
return self.d.__len__(*args, **kwargs)
def __cmp__(self, other):
try:
return self.d.__cmp__(other)
except TypeError, le:
# maybe we should look for a .d member in other. I know this is insanely kludgey, but the Right Way To Do It is for dict.__cmp__ to use structural typing ("duck typing")
try:
return self.d.__cmp__(other.d)
except:
raise le
def __eq__(self, *args, **kwargs):
return self.d.__eq__(*args, **kwargs)
def __ne__(self, *args, **kwargs):
return self.d.__ne__(*args, **kwargs)
def __gt__(self, *args, **kwargs):
return self.d.__gt__(*args, **kwargs)
def __ge__(self, *args, **kwargs):
return self.d.__ge__(*args, **kwargs)
def __le__(self, *args, **kwargs):
return self.d.__le__(*args, **kwargs)
def __lt__(self, *args, **kwargs):
return self.d.__lt__(*args, **kwargs)
def __getitem__(self, *args, **kwargs):
return self.d.__getitem__(*args, **kwargs)
def __setitem__(self, *args, **kwargs):
return self.d.__setitem__(*args, **kwargs)
def __delitem__(self, *args, **kwargs):
return self.d.__delitem__(*args, **kwargs)
def __iter__(self, *args, **kwargs):
return self.d.__iter__(*args, **kwargs)
def clear(self, *args, **kwargs):
return self.d.clear(*args, **kwargs)
def copy(self, *args, **kwargs):
return self.__class__(self.d.copy(*args, **kwargs))
def fromkeys(self, *args, **kwargs):
return self.__class__(self.d.fromkeys(*args, **kwargs))
def get(self, key, default=0):
return self.d.get(key, default)
def has_key(self, *args, **kwargs):
return self.d.has_key(*args, **kwargs)
def items(self, *args, **kwargs):
return self.d.items(*args, **kwargs)
def iteritems(self, *args, **kwargs):
return self.d.iteritems(*args, **kwargs)
def iterkeys(self, *args, **kwargs):
return self.d.iterkeys(*args, **kwargs)
def itervalues(self, *args, **kwargs):
return self.d.itervalues(*args, **kwargs)
def keys(self, *args, **kwargs):
return self.d.keys(*args, **kwargs)
def pop(self, *args, **kwargs):
return self.d.pop(*args, **kwargs)
def popitem(self, *args, **kwargs):
return self.d.popitem(*args, **kwargs)
def setdefault(self, *args, **kwargs):
return self.d.setdefault(*args, **kwargs)
def update(self, *args, **kwargs):
return self.d.update(*args, **kwargs)
def values(self, *args, **kwargs):
return self.d.values(*args, **kwargs)
def del_if_present(d, k):
if d.has_key(k):
del d[k]
class ValueOrderedDict:
"""
Note: this implementation assumes that the values do not mutate and change
their sort order. That is, it stores the values in a sorted list and
as items are added and removed from the dict, it makes updates to the list
which will keep the list sorted. But if a value that is currently sitting
in the list changes its sort order, then the internal consistency of this
object will be lost.
If that happens, and if assertion checking is turned on, then you will get
an assertion failure the very next time you try to do anything with this
ValueOrderedDict. However, those internal consistency checks are very slow
and almost certainly unacceptable to leave turned on in production code.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return (le[1], le[0],)
def iteritems(self):
return ValueOrderedDict.ItemIterator(self)
def items(self):
return zip(map(operator.__getitem__, self.l, [1]*len(self.l)), map(operator.__getitem__, self.l, [0]*len(self.l)))
def values(self):
return map(operator.__getitem__, self.l, [0]*len(self.l))
def keys(self):
return map(operator.__getitem__, self.l, [1]*len(self.l))
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return le[1]
def iterkeys(self):
return ValueOrderedDict.KeyIterator(self)
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c.l), "The iterated ValueOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, self.c)
precondition((self.i == len(self.c.l)) or self.c.d.has_key(self.c.l[self.i][1]), "The iterated ValueOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the ValueOrderedDict while the iteration was in progress.", self.i, (self.i < len(self.c.l)) and self.c.l[self.i], self.c)
if self.i == len(self.c.l):
raise StopIteration
le = self.c.l[self.i]
self.i += 1
return le[0]
def itervalues(self):
return ValueOrderedDict.ValueIterator(self)
def __init__(self, initialdata={}):
warnings.warn("deprecated", DeprecationWarning)
self.d = {} # k: key, v: val
self.l = [] # sorted list of tuples of (val, key,)
self.update(initialdata)
assert self._assert_invariants()
def __len__(self):
return len(self.l)
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def __eq__(self, other):
for (k, v,) in other.iteritems():
if not self.d.has_key(k) or self.d[k] != v:
return False
return True
def __ne__(self, other):
return not self.__eq__(other)
def _assert_invariants(self):
iter = self.l.__iter__()
try:
oldx = iter.next()
while True:
x = iter.next()
# self.l is required to be sorted
_assert(x >= oldx, x, oldx)
# every element of self.l is required to appear in self.d
_assert(self.d.has_key(x[1]), x)
oldx =x
except StopIteration:
pass
for (k, v,) in self.d.iteritems():
i = bisect_left(self.l, (v, k,))
while (self.l[i][0] is not v) or (self.l[i][1] is not k):
i += 1
_assert(i < len(self.l), i, len(self.l), k, v, self.l)
_assert(self.l[i][0] is v, i, v, l=self.l, d=self.d)
_assert(self.l[i][1] is k, i, k, l=self.l, d=self.d)
return True
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, val=None):
assert self._assert_invariants()
if self.d.has_key(key):
oldval = self.d[key]
if oldval != val:
# re-sort
i = bisect_left(self.l, (oldval, key,))
while (self.l[i][0] is not oldval) or (self.l[i][1] is not key):
i += 1
self.l.pop(i)
insort_left(self.l, (val, key,))
elif oldval is not val:
# replace
i = bisect_left(self.l, (oldval, key,))
while (self.l[i][0] is not oldval) or (self.l[i][1] is not key):
i += 1
self.l[i] = (val, key,)
else:
insort_left(self.l, (val, key,))
self.d[key] = val
assert self._assert_invariants()
return val
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __getitem__(self, key, default=None, strictkey=True):
if not self.d.has_key(key):
if strictkey:
raise KeyError, key
else:
return default
return self.d[key]
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
val = self.d.pop(key)
i = bisect_left(self.l, (val, key,))
while (self.l[i][0] is not val) or (self.l[i][1] is not key):
i += 1
self.l.pop(i)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
self.d.clear()
del self.l[:]
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
for (k, v,) in otherdict.iteritems():
self.insert(k, v)
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
return self.d.has_key(key)
def popitem(self):
if not self.l:
raise KeyError, 'popitem(): dictionary is empty'
le = self.l.pop(0)
del self.d[le[1]]
return (le[1], le[0],)
def pop(self, k, default=None, strictkey=False):
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
else:
return default
v = self.d.pop(k)
i = bisect_left(self.l, (v, k,))
while (self.l[i][0] is not v) or (self.l[i][1] is not k):
i += 1
self.l.pop(i)
return v
def pop_from_list(self, i=0):
le = self.l.pop(i)
del self.d[le[1]]
return le[1]
| 20,151
|
Python
|
.py
| 491
| 31.749491
| 330
| 0.562468
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,374
|
twistedutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/twistedutil.py
|
# Copyright (c) 2005-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
# from the Twisted library
from twisted.internet import reactor
# from the pyutil library
from weakutil import WeakMethod
def callLater_weakly(delay, func, *args, **kwargs):
"""
Call func later, but if func is a bound method then make the reference it holds to object be a weak reference.
Therefore, if this scheduled event is a bound method and it is the only thing keeping the object from being garbage collected, the object will be garbage collected and the event will be cancelled.
"""
warnings.warn("deprecated", DeprecationWarning)
def cleanup(weakmeth, thedeadweakref):
if weakmeth.callId.active():
weakmeth.callId.cancel()
weakmeth = WeakMethod(func, callback=cleanup)
weakmeth.callId = reactor.callLater(delay, weakmeth, *args, **kwargs)
return weakmeth
| 956
|
Python
|
.py
| 19
| 46.052632
| 200
| 0.755102
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,375
|
assertutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/assertutil.py
|
# Copyright (c) 2003-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Tests useful in assertion checking, prints out nicely formated messages too.
"""
from humanreadable import hr
def _assert(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=[]
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def precondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["precondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def postcondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["postcondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
| 2,753
|
Python
|
.py
| 52
| 46.25
| 147
| 0.518381
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,376
|
jsonutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/jsonutil.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
# We require simplejson>= 2.1.0 and set its default behavior to
# use_decimal=True. This retains backwards compatibility with previous
# versions of jsonutil (although it means jsonutil now requires a recent
# version of simplejson).
# http://code.google.com/p/simplejson/issues/detail?id=34
r"""JSON (JavaScript Object Notation) <http://json.org> is a subset of
JavaScript syntax (ECMA-262 3rd edition) used as a lightweight data
interchange format.
:mod:`simplejson` exposes an API familiar to users of the standard library
:mod:`marshal` and :mod:`pickle` modules. It is the externally maintained
version of the :mod:`json` library contained in Python 2.6, but maintains
compatibility with Python 2.4 and Python 2.5 and (currently) has
significant performance advantages, even without using the optional C
extension for speedups.
Encoding basic Python object hierarchies::
>>> import simplejson as json
>>> json.dumps(['foo', {'bar': ('baz', None, 1.0, 2)}])
'["foo", {"bar": ["baz", null, 1.0, 2]}]'
>>> print json.dumps("\"foo\bar")
"\"foo\bar"
>>> print json.dumps(u'\u1234')
"\u1234"
>>> print json.dumps('\\')
"\\"
>>> print json.dumps({"c": 0, "b": 0, "a": 0}, sort_keys=True)
{"a": 0, "b": 0, "c": 0}
>>> from StringIO import StringIO
>>> io = StringIO()
>>> json.dump(['streaming API'], io)
>>> io.getvalue()
'["streaming API"]'
Compact encoding::
>>> import simplejson as json
>>> json.dumps([1,2,3,{'4': 5, '6': 7}], separators=(',',':'))
'[1,2,3,{"4":5,"6":7}]'
Pretty printing::
>>> import simplejson as json
>>> s = json.dumps({'4': 5, '6': 7}, sort_keys=True, indent=' ')
>>> print '\n'.join([l.rstrip() for l in s.splitlines()])
{
"4": 5,
"6": 7
}
Decoding JSON::
>>> import simplejson as json
>>> obj = [u'foo', {u'bar': [u'baz', None, 1.0, 2]}]
>>> json.loads('["foo", {"bar":["baz", null, 1.0, 2]}]') == obj
True
>>> json.loads('"\\"foo\\bar"') == u'"foo\x08ar'
True
>>> from StringIO import StringIO
>>> io = StringIO('["streaming API"]')
>>> json.load(io)[0] == 'streaming API'
True
Specializing JSON object decoding::
>>> import simplejson as json
>>> def as_complex(dct):
... if '__complex__' in dct:
... return complex(dct['real'], dct['imag'])
... return dct
...
>>> json.loads('{"__complex__": true, "real": 1, "imag": 2}',
... object_hook=as_complex)
(1+2j)
>>> from decimal import Decimal
>>> json.loads('1.1', parse_float=Decimal) == Decimal('1.1')
True
Specializing JSON object encoding::
>>> import simplejson as json
>>> def encode_complex(obj):
... if isinstance(obj, complex):
... return [obj.real, obj.imag]
... raise TypeError(repr(o) + " is not JSON serializable")
...
>>> json.dumps(2 + 1j, default=encode_complex)
'[2.0, 1.0]'
>>> json.JSONEncoder(default=encode_complex).encode(2 + 1j)
'[2.0, 1.0]'
>>> ''.join(json.JSONEncoder(default=encode_complex).iterencode(2 + 1j))
'[2.0, 1.0]'
Using simplejson.tool from the shell to validate and pretty-print::
$ echo '{"json":"obj"}' | python -m simplejson.tool
{
"json": "obj"
}
$ echo '{ 1.2:3.4}' | python -m simplejson.tool
Expecting property name: line 1 column 2 (char 2)
"""
import pkg_resources
pkg_resources.require("simplejson>=2.1.0")
# Now we just import all of the contents of the simplejson package and
# then overwrite it with a copy of the simplejson __init__.py edited
# to make use_decimal=True the default.
import simplejson
__version__ = simplejson.__version__
__all__ = simplejson.__all__
# The unit tests rely on .encoder and .decoder, and although they are not
# included in simplejson.__all__ they are still attributes of the simplejson
# package since they are modules within it.
from simplejson import encoder, decoder, scanner
encoder, decoder, scanner # http://divmod.org/trac/ticket/1499
__all__.extend(['encoder', 'decoder', 'scanner'])
__author__ = simplejson.__author__
del simplejson
from decimal import Decimal
from simplejson.decoder import JSONDecoder, JSONDecodeError
JSONDecoder, JSONDecodeError # http://divmod.org/trac/ticket/1499
from simplejson.encoder import JSONEncoder
def _import_OrderedDict():
from pyutil.odict import OrderedDict
return OrderedDict
OrderedDict = _import_OrderedDict()
def _import_c_make_encoder():
from simplejson._speedups import make_encoder# XXX
try:
return make_encoder
except ImportError:
return None
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
)
def dump(obj, fp, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True, **kw):
"""Serialize ``obj`` as a JSON formatted stream to ``fp`` (a
``.write()``-supporting file-like object).
If ``skipkeys`` is true then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the some chunks written to ``fp``
may be ``unicode`` instances, subject to normal Python ``str`` to
``unicode`` coercion rules. Unless ``fp.write()`` explicitly
understands ``unicode`` (as in ``codecs.getwriter()``) this is likely
to cause an error.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``)
in strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If *indent* is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and not kw):
iterable = _default_encoder.iterencode(obj)
else:
if cls is None:
cls = JSONEncoder
iterable = cls(skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding,
default=default, use_decimal=use_decimal, **kw).iterencode(obj)
# could accelerate with writelines in some versions of Python, at
# a debuggability cost
for chunk in iterable:
fp.write(chunk)
def dumps(obj, skipkeys=False, ensure_ascii=True, check_circular=True,
allow_nan=True, cls=None, indent=None, separators=None,
encoding='utf-8', default=None, use_decimal=True, **kw):
"""Serialize ``obj`` to a JSON formatted ``str``.
If ``skipkeys`` is false then ``dict`` keys that are not basic types
(``str``, ``unicode``, ``int``, ``long``, ``float``, ``bool``, ``None``)
will be skipped instead of raising a ``TypeError``.
If ``ensure_ascii`` is false, then the return value will be a
``unicode`` instance subject to normal Python ``str`` to ``unicode``
coercion rules instead of being escaped to an ASCII ``str``.
If ``check_circular`` is false, then the circular reference check
for container types will be skipped and a circular reference will
result in an ``OverflowError`` (or worse).
If ``allow_nan`` is false, then it will be a ``ValueError`` to
serialize out of range ``float`` values (``nan``, ``inf``, ``-inf``) in
strict compliance of the JSON specification, instead of using the
JavaScript equivalents (``NaN``, ``Infinity``, ``-Infinity``).
If ``indent`` is a string, then JSON array elements and object members
will be pretty-printed with a newline followed by that string repeated
for each level of nesting. ``None`` (the default) selects the most compact
representation without any newlines. For backwards compatibility with
versions of simplejson earlier than 2.1.0, an integer is also accepted
and is converted to a string with that many spaces.
If ``separators`` is an ``(item_separator, dict_separator)`` tuple
then it will be used instead of the default ``(', ', ': ')`` separators.
``(',', ':')`` is the most compact JSON representation.
``encoding`` is the character encoding for str instances, default is UTF-8.
``default(obj)`` is a function that should return a serializable version
of obj or raise TypeError. The default simply raises TypeError.
If *use_decimal* is true (default: ``True``) then decimal.Decimal
will be natively serialized to JSON with full precision.
To use a custom ``JSONEncoder`` subclass (e.g. one that overrides the
``.default()`` method to serialize additional types), specify it with
the ``cls`` kwarg.
"""
# cached encoder
if (not skipkeys and ensure_ascii and
check_circular and allow_nan and
cls is None and indent is None and separators is None and
encoding == 'utf-8' and default is None and use_decimal
and not kw):
return _default_encoder.encode(obj)
if cls is None:
cls = JSONEncoder
return cls(
skipkeys=skipkeys, ensure_ascii=ensure_ascii,
check_circular=check_circular, allow_nan=allow_nan, indent=indent,
separators=separators, encoding=encoding, default=default,
use_decimal=use_decimal, **kw).encode(obj)
_default_decoder = JSONDecoder(encoding=None, object_hook=None,
object_pairs_hook=None, parse_float=Decimal)
def load(fp, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=True, **kw):
"""Deserialize ``fp`` (a ``.read()``-supporting file-like object containing
a JSON document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``True``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
return loads(fp.read(),
encoding=encoding, cls=cls, object_hook=object_hook,
parse_float=parse_float, parse_int=parse_int,
parse_constant=parse_constant, object_pairs_hook=object_pairs_hook,
use_decimal=use_decimal, **kw)
def loads(s, encoding=None, cls=None, object_hook=None, parse_float=None,
parse_int=None, parse_constant=None, object_pairs_hook=None,
use_decimal=True, **kw):
"""Deserialize ``s`` (a ``str`` or ``unicode`` instance containing a JSON
document) to a Python object.
*encoding* determines the encoding used to interpret any
:class:`str` objects decoded by this instance (``'utf-8'`` by
default). It has no effect when decoding :class:`unicode` objects.
Note that currently only encodings that are a superset of ASCII work,
strings of other encodings should be passed in as :class:`unicode`.
*object_hook*, if specified, will be called with the result of every
JSON object decoded and its return value will be used in place of the
given :class:`dict`. This can be used to provide custom
deserializations (e.g. to support JSON-RPC class hinting).
*object_pairs_hook* is an optional function that will be called with
the result of any object literal decode with an ordered list of pairs.
The return value of *object_pairs_hook* will be used instead of the
:class:`dict`. This feature can be used to implement custom decoders
that rely on the order that the key and value pairs are decoded (for
example, :func:`collections.OrderedDict` will remember the order of
insertion). If *object_hook* is also defined, the *object_pairs_hook*
takes priority.
*parse_float*, if specified, will be called with the string of every
JSON float to be decoded. By default, this is equivalent to
``float(num_str)``. This can be used to use another datatype or parser
for JSON floats (e.g. :class:`decimal.Decimal`).
*parse_int*, if specified, will be called with the string of every
JSON int to be decoded. By default, this is equivalent to
``int(num_str)``. This can be used to use another datatype or parser
for JSON integers (e.g. :class:`float`).
*parse_constant*, if specified, will be called with one of the
following strings: ``'-Infinity'``, ``'Infinity'``, ``'NaN'``. This
can be used to raise an exception if invalid JSON numbers are
encountered.
If *use_decimal* is true (default: ``True``) then it implies
parse_float=decimal.Decimal for parity with ``dump``.
To use a custom ``JSONDecoder`` subclass, specify it with the ``cls``
kwarg.
"""
if (cls is None and encoding is None and object_hook is None and
parse_int is None and parse_float is None and
parse_constant is None and object_pairs_hook is None
and use_decimal and not kw):
return _default_decoder.decode(s)
if cls is None:
cls = JSONDecoder
if object_hook is not None:
kw['object_hook'] = object_hook
if object_pairs_hook is not None:
kw['object_pairs_hook'] = object_pairs_hook
if parse_float is not None:
kw['parse_float'] = parse_float
if parse_int is not None:
kw['parse_int'] = parse_int
if parse_constant is not None:
kw['parse_constant'] = parse_constant
if not use_decimal:
kw['use_decimal'] = use_decimal
return cls(encoding=encoding, **kw).decode(s)
def _toggle_speedups(enabled):
import simplejson.decoder as dec
import simplejson.encoder as enc
import simplejson.scanner as scan
c_make_encoder = _import_c_make_encoder()
if enabled:
dec.scanstring = dec.c_scanstring or dec.py_scanstring
enc.c_make_encoder = c_make_encoder
enc.encode_basestring_ascii = (enc.c_encode_basestring_ascii or
enc.py_encode_basestring_ascii)
scan.make_scanner = scan.c_make_scanner or scan.py_make_scanner
else:
dec.scanstring = dec.py_scanstring
enc.c_make_encoder = None
enc.encode_basestring_ascii = enc.py_encode_basestring_ascii
scan.make_scanner = scan.py_make_scanner
dec.make_scanner = scan.make_scanner
global _default_decoder
_default_decoder = JSONDecoder(
encoding=None,
object_hook=None,
object_pairs_hook=None,
use_decimal=True,
)
global _default_encoder
_default_encoder = JSONEncoder(
skipkeys=False,
ensure_ascii=True,
check_circular=True,
allow_nan=True,
indent=None,
separators=None,
encoding='utf-8',
default=None,
use_decimal=True,
)
| 18,420
|
Python
|
.py
| 377
| 43.35809
| 79
| 0.678969
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,377
|
odict.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/odict.py
|
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
"""
This module offers a Ordered Dict, which is a dict that preserves
insertion order. See PEP 372 for description of the problem. This
implementation uses a linked-list to get good O(1) asymptotic
performance. (Actually it is O(hashtable-update-cost), but whatever.)
Warning: if -O optimizations are not turned on then OrderedDict performs
extensive self-analysis in every function call, which can take minutes
and minutes for a large cache. Turn on -O, or comment out assert
self._assert_invariants()
"""
import operator
from assertutil import _assert, precondition
from humanreadable import hr
class OrderedDict:
"""
An efficient ordered dict.
Adding an item that is already in the dict *does not* make it the
most- recently-added item although it may change the state of the
dict itself (if the value is different than the previous value).
See also SmallOrderedDict (below), which is faster in some cases.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = p
return (k, v,)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = p
return k
class ValIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.ts][1]
def __iter__(self):
return self
def next(self):
if self.i is self.c.hs:
raise StopIteration
precondition(self.c.d.has_key(self.i), "The iterated OrderedDict doesn't have the next key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
(v, p, n,) = self.c.d[self.i]
self.i = p
return v
class Sentinel:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.msg,)
def __init__(self, initialdata={}):
self.d = {} # k: k, v: [v, prev, next,] # the dict
self.hs = OrderedDict.Sentinel("hs")
self.ts = OrderedDict.Sentinel("ts")
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.update(initialdata)
assert self._assert_invariants()
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def _assert_invariants(self):
_assert((len(self.d) > 2) == (self.d[self.hs][2] is not self.ts) == (self.d[self.ts][1] is not self.hs), "Head and tail point to something other than each other if and only if there is at least one element in the dictionary.", self.hs, self.ts, len(self.d))
foundprevsentinel = 0
foundnextsentinel = 0
for (k, (v, p, n,)) in self.d.iteritems():
_assert(v not in (self.hs, self.ts,))
_assert(p is not self.ts, "A reference to the tail sentinel may not appear in prev.", k, v, p, n)
_assert(n is not self.hs, "A reference to the head sentinel may not appear in next.", k, v, p, n)
_assert(p in self.d, "Each prev is required to appear as a key in the dict.", k, v, p, n)
_assert(n in self.d, "Each next is required to appear as a key in the dict.", k, v, p, n)
if p is self.hs:
foundprevsentinel += 1
_assert(foundprevsentinel <= 2, "No more than two references to the head sentinel may appear as a prev.", k, v, p, n)
if n is self.ts:
foundnextsentinel += 1
_assert(foundnextsentinel <= 2, "No more than one reference to the tail sentinel may appear as a next.", k, v, p, n)
_assert(foundprevsentinel == 2, "A reference to the head sentinel is required appear as a prev (plus a self-referential reference).")
_assert(foundnextsentinel == 2, "A reference to the tail sentinel is required appear as a next (plus a self-referential reference).")
count = 0
for (k, v,) in self.iteritems():
_assert(k not in (self.hs, self.ts,), k, self.hs, self.ts)
count += 1
_assert(count == len(self.d)-2, count, len(self.d)) # -2 for the sentinels
return True
def move_to_most_recent(self, k, strictkey=False):
assert self._assert_invariants()
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
return
node = self.d[k]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
# move to front
hnode = self.d[self.hs]
node[1] = self.hs
node[2] = hnode[2]
hnode[2] = k
self.d[node[2]][1] = k
assert self._assert_invariants()
def iteritems(self):
return OrderedDict.ItemIterator(self)
def itervalues(self):
return OrderedDict.ValIterator(self)
def iterkeys(self):
return self.__iter__()
def __iter__(self):
return OrderedDict.KeyIterator(self)
def __getitem__(self, key, default=None, strictkey=True):
node = self.d.get(key)
if not node:
if strictkey:
raise KeyError, key
return default
return node[0]
def __setitem__(self, k, v=None):
assert self._assert_invariants()
node = self.d.get(k)
if node:
node[0] = v
return
hnode = self.d[self.hs]
n = hnode[2]
self.d[k] = [v, self.hs, n,]
hnode[2] = k
self.d[n][1] = k
assert self._assert_invariants()
return v
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the value removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
node = self.d[key]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
del self.d[key]
assert self._assert_invariants()
return node[0]
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def has_key(self, key):
assert self._assert_invariants()
if self.d.has_key(key):
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def clear(self):
assert self._assert_invariants()
self.d.clear()
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
for (k, v,) in otherdict.iteritems():
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
def pop(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.hs][2]
self.remove(k)
assert self._assert_invariants()
return k
def popitem(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.hs][2]
val = self.remove(k)
assert self._assert_invariants()
return (k, val,)
def keys_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return t.keys()
def keys(self):
res = [None] * len(self)
i = 0
for k in self.iterkeys():
res[i] = k
i += 1
return res
def values_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return map(operator.__getitem__, t.values(), [0]*len(t))
def values(self):
res = [None] * len(self)
i = 0
for v in self.itervalues():
res[i] = v
i += 1
return res
def items(self):
res = [None] * len(self)
i = 0
for it in self.iteritems():
res[i] = it
i += 1
return res
def __len__(self):
return len(self.d) - 2
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def get(self, key, default=None):
return self.__getitem__(key, default, strictkey=False)
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
class SmallOrderedDict(dict):
"""
SmallOrderedDict is faster than OrderedDict for small sets. How small? That
depends on your machine and which operations you use most often. Use
performance profiling to determine whether the ordered dict class that you are
using makes any difference to the performance of your program, and if it
does, then run "quick_bench()" in test/test_cache.py to see which cache
implementation is faster for the size of your datasets.
A simple least-recently-used cache. It keeps an LRU queue, and
when the number of items in the cache reaches maxsize, it removes
the least recently used item.
"Looking" at an item or a key such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the
most- recently-used item although it does not change the state of
the dict itself.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return (k, dict.__getitem__(self.c, k),)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return k
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallOrderedDict doesn't have this many elements. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallOrderedDict doesn't have this key. Most likely this is because someone altered the contents of the OrderedDict while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return dict.__getitem__(self.c, k)
def __init__(self, initialdata={}, maxsize=128):
dict.__init__(self, initialdata)
self._lru = initialdata.keys() # contains keys
self._maxsize = maxsize
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
def _assert_invariants(self):
_assert(len(self._lru) <= self._maxsize, "Size is required to be <= maxsize.")
_assert(len(filter(lambda x: dict.has_key(self, x), self._lru)) == len(self._lru), "Each key in self._lru is required to be in dict.", filter(lambda x: not dict.has_key(self, x), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(filter(lambda x: x in self._lru, self.keys())) == len(self), "Each key in dict is required to be in self._lru.", filter(lambda x: x not in self._lru, self.keys()), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) == len(self), "internal consistency", filter(lambda x: x not in self.keys(), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) <= self._maxsize, "internal consistency", len(self._lru), self._lru, self._maxsize)
return True
def insert(self, key, item=None):
assert self._assert_invariants()
result = self.__setitem__(key, item)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, item=None):
assert self._assert_invariants()
if dict.has_key(self, key):
self._lru.remove(key)
else:
if len(self._lru) == self._maxsize:
# If this insert is going to increase the size of the cache to bigger than maxsize:
killkey = self._lru.pop(0)
dict.__delitem__(self, killkey)
dict.__setitem__(self, key, item)
self._lru.append(key)
assert self._assert_invariants()
return item
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if dict.has_key(self, key):
val = dict.__getitem__(self, key)
dict.__delitem__(self, key)
self._lru.remove(key)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
dict.clear(self)
self._lru = []
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) > self._maxsize:
# Handling this special case here makes it possible to implement the
# other more common cases faster below.
dict.clear(self)
self._lru = []
if self._maxsize > (len(otherdict) - self._maxsize):
dict.update(self, otherdict)
while len(self) > self._maxsize:
dict.popitem(self)
else:
for k, v, in otherdict.iteritems():
if len(self) == self._maxsize:
break
dict.__setitem__(self, k, v)
self._lru = dict.keys(self)
assert self._assert_invariants()
return self
for k in otherdict.iterkeys():
if dict.has_key(self, k):
self._lru.remove(k)
self._lru.extend(otherdict.keys())
dict.update(self, otherdict)
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
if dict.has_key(self, key):
assert key in self._lru, "key: %s, self._lru: %s" % tuple(map(hr, (key, self._lru,)))
self._lru.remove(key)
self._lru.append(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def refresh(self, key, strictkey=True):
"""
@param strictkey: raise a KeyError exception if key isn't present
"""
assert self._assert_invariants()
if not dict.has_key(self, key):
if strictkey:
raise KeyError, key
return
self._lru.remove(key)
self._lru.append(key)
def popitem(self):
if not self._lru:
raise KeyError, 'popitem(): dictionary is empty'
k = self._lru[-1]
obj = self.remove(k)
return (k, obj,)
| 20,991
|
Python
|
.py
| 477
| 33.997904
| 274
| 0.583027
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,378
|
mathutil.py~
|
CouchPotato_CouchPotatoServer/libs/pyutil/mathutil.py~
|
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
A few commonly needed functions.
"""
import math
def div_ceil(n, d):
"""
The smallest integer k such that k*d >= n.
"""
return (n/d) + (n%d != 0)
def next_multiple(n, k):
"""
The smallest multiple of k which is >= n. Note that if n is 0 then the
answer is 0.
"""
return div_ceil(n, k) * k
def pad_size(n, k):
"""
The smallest number that has to be added to n to equal a multiple of k.
"""
if n%k:
return k - n%k
else:
return 0
def is_power_of_k(n, k):
return k**int(math.log(n, k) + 0.5) == n
def next_power_of_k(n, k):
p = 1
while p < n:
p *= k
return p
def ave(l):
return sum(l) / len(l)
def log_ceil(n, b):
"""
The smallest integer k such that b^k >= n.
log_ceil(n, 2) is the number of bits needed to store any of n values, e.g.
the number of bits needed to store any of 128 possible values is 7.
"""
p = 1
k = 0
while p < n:
p *= b
k += 1
return k
def log_floor(n, b):
"""
The largest integer k such that b^k <= n.
"""
p = 1
k = 0
while p <= n:
p *= b
k += 1
return k - 1
def linear_fit_slope(ps):
"""
Single-independent-variable linear regression -- least squares method.
At least, I *think* this function computes that answer. I no longer
remember where I learned this trick and at the moment I can't prove to
myself that this is correct.
@param ps a sequence of tuples of (x, y)
"""
avex = ave([x for (x, y) in ps])
avey = ave([y for (x, y) in ps])
sxy = sum([ (x - avex) * (y - avey) for (x, y) in ps ])
sxx = sum([ (x - avex) ** 2 for (x, y) in ps ])
if sxx == 0:
return None
return sxy / sxx
def permute(l):
"""
Return all possible permutations of l.
@type l: sequence
@rtype a set of sequences
"""
if len(l) == 1:
return [l,]
res = []
for i in range(len(l)):
l2 = list(l[:])
x = l2.pop(i)
for l3 in permute(l2):
l3.append(x)
res.append(l3)
return res
| 2,251
|
Python
|
.py
| 87
| 20.643678
| 78
| 0.559441
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,379
|
_version.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/_version.py
|
# This is the version of this tree, as created by setup.py darcsver from the darcs patch
# information: the main version number is taken from the most recent release
# tag. If some patches have been added since the last release, this will have a
# -NN "build number" suffix, or else a -rNN "revision number" suffix. Please see
# pyutil.version_class for a description of what the different fields mean.
__pkgname__ = "pyutil"
verstr = "1.9.7"
try:
from pyutil.version_class import Version as pyutil_Version
__version__ = pyutil_Version(verstr)
except (ImportError, ValueError):
# Maybe there is no pyutil installed, or this may be an older version of
# pyutil.version_class which does not support SVN-alike revision numbers.
from distutils.version import LooseVersion as distutils_Version
__version__ = distutils_Version(verstr)
| 855
|
Python
|
.py
| 15
| 54.266667
| 88
| 0.763723
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,380
|
strutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/strutil.py
|
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
def commonprefix(l):
cp = []
for i in range(min(map(len, l))):
c = l[0][i]
for s in l[1:]:
if s[i] != c:
return ''.join(cp)
cp.append(c)
return ''.join(cp)
def commonsuffix(l):
cp = []
for i in range(min(map(len, l))):
c = l[0][-i-1]
for s in l[1:]:
if s[-i-1] != c:
cp.reverse()
return ''.join(cp)
cp.append(c)
cp.reverse()
return ''.join(cp)
def split_on_newlines(s):
"""
Splits s on all of the three newline sequences: "\r\n", "\r", or "\n".
"""
res = []
for x in s.split('\r\n'):
for y in x.split('\r'):
res.extend(y.split('\n'))
return res
def pop_trailing_newlines(s):
"""
@return a copy of s minus any trailing "\n"'s or "\r"'s
"""
i = len(s)-1
if i < 0:
return ''
while s[i] in ('\n', '\r',):
i = i - 1
if i < 0:
return ''
return s[:i+1]
| 1,121
|
Python
|
.py
| 43
| 19.023256
| 74
| 0.475303
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,381
|
randutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/randutil.py
|
# Copyright (c) 2002-2012 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import warnings
import os, random
try:
import hashexpand
class SHA256Random(hashexpand.SHA256Expander, random.Random):
def __init__(self, seed=None, deterministic=True):
warnings.warn("deprecated", DeprecationWarning)
if not deterministic:
raise NotImplementedError, "SHA256Expander is always deterministic. For non-deterministic, try urandomRandom."
hashexpand.SHA256Expander.__init__(self)
random.Random.__init__(self, seed)
self.seed(seed)
def seed(self, seed=None):
if seed is None:
import increasing_timer
seed = repr(increasing_timer.time())
hashexpand.SHA256Expander.seed(self, seed)
class SHA256Random(hashexpand.SHA256Expander, random.Random):
def __init__(self, seed=""):
warnings.warn("deprecated", DeprecationWarning)
hashexpand.SHA256Expander.__init__(self)
self.seed(seed)
def seed(self, seed=None):
if seed is None:
seed = os.urandom(32)
hashexpand.SHA256Expander.seed(self, seed)
except ImportError, le:
class InsecureSHA256Random:
def __init__(self, seed=None):
raise ImportError, le
class SHA256Random:
def __init__(self, seed=""):
raise ImportError, le
class devrandomRandom(random.Random):
""" The problem with using this one, of course, is that it blocks. This
is, of course, a security flaw. (On Linux and probably on other
systems.) --Zooko 2005-03-04
Not repeatable.
"""
def __init__(self):
warnings.warn("deprecated", DeprecationWarning)
self.dr = open("/dev/random", "r")
def get(self, bytes):
return self.dr.read(bytes)
class devurandomRandom(random.Random):
""" The problem with using this one is that it gives answers even when it
has never been properly seeded, e.g. when you are booting from CD and have
just started up and haven't yet gathered enough entropy to actually be
unguessable. (On Linux and probably on other systems.) --Zooko 2005-03-04
Not repeatable.
"""
def get(self, bytes):
warnings.warn("deprecated", DeprecationWarning)
return os.urandom(bytes)
randobj = devurandomRandom()
get = randobj.get
random = randobj.random
randrange = randobj.randrange
shuffle = randobj.shuffle
choice = randobj.choice
seed = randobj.seed
def randstr(n):
return ''.join(map(chr, map(randrange, [0]*n, [256]*n)))
def insecurerandstr(n):
return os.urandom(n)
| 2,725
|
Python
|
.py
| 67
| 33.38806
| 127
| 0.665657
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,382
|
humanreadable.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/humanreadable.py
|
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko "Zooko" Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
import exceptions, os
from repr import Repr
class BetterRepr(Repr):
def __init__(self):
Repr.__init__(self)
# Note: These levels can get adjusted dynamically! My goal is to get more info when printing important debug stuff like exceptions and stack traces and less info when logging normal events. --Zooko 2000-10-14
self.maxlevel = 6
self.maxdict = 6
self.maxlist = 6
self.maxtuple = 6
self.maxstring = 300
self.maxother = 300
def repr_function(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.func_name + '() at ' + os.path.basename(obj.func_code.co_filename) + ':' + str(obj.func_code.co_firstlineno) + '>'
else:
return '<' + obj.func_name + '() at (builtin)'
def repr_instance_method(self, obj, level):
if hasattr(obj, 'func_code'):
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at ' + os.path.basename(obj.im_func.func_code.co_filename) + ':' + str(obj.im_func.func_code.co_firstlineno) + '>'
else:
return '<' + obj.im_class.__name__ + '.' + obj.im_func.__name__ + '() at (builtin)'
def repr_long(self, obj, level):
s = `obj` # XXX Hope this isn't too slow...
if len(s) > self.maxlong:
i = max(0, (self.maxlong-3)/2)
j = max(0, self.maxlong-3-i)
s = s[:i] + '...' + s[len(s)-j:]
if s[-1] == 'L':
return s[:-1]
return s
def repr_instance(self, obj, level):
"""
If it is an instance of Exception, format it nicely (trying to emulate
the format that you see when an exception is actually raised, plus
bracketing '<''s). If it is an instance of dict call self.repr_dict()
on it. If it is an instance of list call self.repr_list() on it. Else
call Repr.repr_instance().
"""
if isinstance(obj, exceptions.Exception):
# Don't cut down exception strings so much.
tms = self.maxstring
self.maxstring = max(512, tms * 4)
tml = self.maxlist
self.maxlist = max(12, tml * 4)
try:
if hasattr(obj, 'args'):
if len(obj.args) == 1:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args[0], level-1) + '>'
else:
return '<' + obj.__class__.__name__ + ': ' + self.repr1(obj.args, level-1) + '>'
else:
return '<' + obj.__class__.__name__ + '>'
finally:
self.maxstring = tms
self.maxlist = tml
if isinstance(obj, dict):
return self.repr_dict(obj, level)
if isinstance(obj, list):
return self.repr_list(obj, level)
return Repr.repr_instance(self, obj, level)
def repr_list(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating lists.
"""
if level <= 0: return '[...]'
n = len(obj)
myl = obj[:min(n, self.maxlist)]
s = ''
for item in myl:
entry = self.repr1(item, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxlist: s = s + ', ...'
return '[' + s + ']'
def repr_dict(self, obj, level):
"""
copied from standard repr.py and fixed to work on multithreadedly mutating dicts.
"""
if level <= 0: return '{...}'
s = ''
n = len(obj)
items = obj.items()[:min(n, self.maxdict)]
items.sort()
for key, val in items:
entry = self.repr1(key, level-1) + ':' + self.repr1(val, level-1)
if s: s = s + ', '
s = s + entry
if n > self.maxdict: s = s + ', ...'
return '{' + s + '}'
# This object can be changed by other code updating this module's "brepr"
# variables. This is so that (a) code can use humanreadable with
# "from humanreadable import hr; hr(mything)", and (b) code can override
# humanreadable to provide application-specific human readable output
# (e.g. libbase32's base32id.AbbrevRepr).
brepr = BetterRepr()
def hr(x):
return brepr.repr(x)
| 4,483
|
Python
|
.py
| 101
| 34.693069
| 218
| 0.545101
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,383
|
fileutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/fileutil.py
|
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Futz with files like a pro.
"""
import errno, exceptions, os, stat, tempfile
try:
import bsddb
except ImportError:
DBNoSuchFileError = None
else:
DBNoSuchFileError = bsddb.db.DBNoSuchFileError
# read_file() and write_file() copied from Mark Seaborn's blog post. Please
# read it for complete rationale:
# http://lackingrhoticity.blogspot.com/2009/12/readfile-and-writefile-in-python.html
def read_file(filename, mode='rb'):
""" Read the contents of the file named filename and return it in
a string. This function closes the file handle before it returns
(even if the underlying Python implementation's garbage collector
doesn't). """
fh = open(filename, mode)
try:
return fh.read()
finally:
fh.close()
def write_file(filename, data, mode='wb'):
""" Write the string data into a file named filename. This
function closes the file handle (ensuring that the written data is
flushed from the perspective of the Python implementation) before
it returns (even if the underlying Python implementation's garbage
collector doesn't)."""
fh = open(filename, mode)
try:
fh.write(data)
finally:
fh.close()
# For backwards-compatibility in case someone is using these names. We used to
# have a superkludge in fileutil.py under these names.
def rename(src, dst, tries=4, basedelay=0.1):
return os.rename(src, dst)
def remove(f, tries=4, basedelay=0.1):
return os.remove(f)
def rmdir(f, tries=4, basedelay=0.1):
return os.rmdir(f)
class _Dir(object):
"""
Hold a set of files and subdirs and clean them all up when asked to.
"""
def __init__(self, name, cleanup=True):
self.name = name
self.cleanup = cleanup
self.files = []
self.subdirs = set()
def file(self, fname, mode=None):
"""
Create a file in the tempdir and remember it so as to close() it
before attempting to cleanup the temp dir.
@rtype: file
"""
ffn = os.path.join(self.name, fname)
if mode is not None:
fo = open(ffn, mode)
else:
fo = open(ffn)
self.register_file(fo)
return fo
def subdir(self, dirname):
"""
Create a subdirectory in the tempdir and remember it so as to call
shutdown() on it before attempting to clean up.
@rtype: _Dir instance
"""
ffn = os.path.join(self.name, dirname)
sd = _Dir(ffn, self.cleanup)
self.register_subdir(sd)
make_dirs(sd.name)
return sd
def register_file(self, fileobj):
"""
Remember the file object and call close() on it before attempting to
clean up.
"""
self.files.append(fileobj)
def register_subdir(self, dirobj):
"""
Remember the _Dir object and call shutdown() on it before attempting
to clean up.
"""
self.subdirs.add(dirobj)
def shutdown(self):
if self.cleanup:
for subdir in hasattr(self, 'subdirs') and self.subdirs or []:
subdir.shutdown()
for fileobj in hasattr(self, 'files') and self.files or []:
if DBNoSuchFileError is None:
fileobj.close() # "close()" is idempotent so we don't need to catch exceptions here
else:
try:
fileobj.close()
except DBNoSuchFileError:
# Ah, except that the bsddb module's file-like object (a DB object) has a non-idempotent close...
pass
if hasattr(self, 'name'):
rm_dir(self.name)
def __repr__(self):
return "<%s instance at %x %s>" % (self.__class__.__name__, id(self), self.name)
def __str__(self):
return self.__repr__()
def __del__(self):
try:
self.shutdown()
except:
import traceback
traceback.print_exc()
class NamedTemporaryDirectory(_Dir):
"""
Call tempfile.mkdtemp(), store the name of the dir in self.name, and
rm_dir() when it gets garbage collected or "shutdown()".
Also keep track of file objects for files within the tempdir and call
close() on them before rm_dir(). This is a convenient way to open temp
files within the directory, and it is very helpful on Windows because you
can't delete a directory which contains a file which is currently open.
"""
def __init__(self, cleanup=True, *args, **kwargs):
""" If cleanup, then the directory will be rmrf'ed when the object is shutdown. """
name = tempfile.mkdtemp(*args, **kwargs)
_Dir.__init__(self, name, cleanup)
class ReopenableNamedTemporaryFile:
"""
This uses tempfile.mkstemp() to generate a secure temp file. It then closes
the file, leaving a zero-length file as a placeholder. You can get the
filename with ReopenableNamedTemporaryFile.name. When the
ReopenableNamedTemporaryFile instance is garbage collected or its shutdown()
method is called, it deletes the file.
"""
def __init__(self, *args, **kwargs):
fd, self.name = tempfile.mkstemp(*args, **kwargs)
os.close(fd)
def __repr__(self):
return "<%s instance at %x %s>" % (self.__class__.__name__, id(self), self.name)
def __str__(self):
return self.__repr__()
def __del__(self):
self.shutdown()
def shutdown(self):
remove(self.name)
def make_dirs(dirname, mode=0777):
"""
An idempotent version of os.makedirs(). If the dir already exists, do
nothing and return without raising an exception. If this call creates the
dir, return without raising an exception. If there is an error that
prevents creation or if the directory gets deleted after make_dirs() creates
it and before make_dirs() checks that it exists, raise an exception.
"""
tx = None
try:
os.makedirs(dirname, mode)
except OSError, x:
tx = x
if not os.path.isdir(dirname):
if tx:
raise tx
raise exceptions.IOError, "unknown error prevented creation of directory, or deleted the directory immediately after creation: %s" % dirname # careful not to construct an IOError with a 2-tuple, as that has a special meaning...
def rmtree(dirname):
"""
A threadsafe and idempotent version of shutil.rmtree(). If the dir is
already gone, do nothing and return without raising an exception. If this
call removes the dir, return without raising an exception. If there is an
error that prevents deletion or if the directory gets created again after
rm_dir() deletes it and before rm_dir() checks that it is gone, raise an
exception.
"""
excs = []
try:
os.chmod(dirname, stat.S_IWRITE | stat.S_IEXEC | stat.S_IREAD)
for f in os.listdir(dirname):
fullname = os.path.join(dirname, f)
if os.path.isdir(fullname):
rm_dir(fullname)
else:
remove(fullname)
os.rmdir(dirname)
except EnvironmentError, le:
# Ignore "No such file or directory", collect any other exception.
if (le.args[0] != 2 and le.args[0] != 3) or (le.args[0] != errno.ENOENT):
excs.append(le)
except Exception, le:
excs.append(le)
# Okay, now we've recursively removed everything, ignoring any "No
# such file or directory" errors, and collecting any other errors.
if os.path.exists(dirname):
if len(excs) == 1:
raise excs[0]
if len(excs) == 0:
raise OSError, "Failed to remove dir for unknown reason."
raise OSError, excs
def rm_dir(dirname):
# Renamed to be like shutil.rmtree and unlike rmdir.
return rmtree(dirname)
def remove_if_possible(f):
try:
remove(f)
except EnvironmentError:
pass
def remove_if_present(f):
try:
remove(f)
except EnvironmentError, le:
# Ignore "No such file or directory", re-raise any other exception.
if (le.args[0] != 2 and le.args[0] != 3) or (le.args[0] != errno.ENOENT):
raise
def rmdir_if_possible(f):
try:
rmdir(f)
except EnvironmentError:
pass
def open_or_create(fname, binarymode=True):
try:
f = open(fname, binarymode and "r+b" or "r+")
except EnvironmentError:
f = open(fname, binarymode and "w+b" or "w+")
return f
def du(basedir):
size = 0
for root, dirs, files in os.walk(basedir):
for f in files:
fn = os.path.join(root, f)
size += os.path.getsize(fn)
return size
| 8,863
|
Python
|
.py
| 229
| 31.318777
| 235
| 0.633613
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,384
|
logutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/logutil.py
|
# Copyright (c) 2005-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
# This little file makes it so that we can use "log.msg()" and the contents
# get logged to the Twisted logger if present, else to the Python Standard
# Library logger.
import warnings
warnings.warn("deprecated", DeprecationWarning)
try:
from twisted.python import log
log # http://divmod.org/trac/ticket/1499
except ImportError:
import logging
class MinimalLogger:
def msg(self, m):
logging.log(0, m)
log = MinimalLogger()
| 582
|
Python
|
.py
| 16
| 32.6875
| 75
| 0.73357
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,385
|
repeatable_random.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/repeatable_random.py
|
"""
If you execute force_repeatability() then the following things are changed in the runtime:
1. random.random() and its sibling functions, and random.Random.seed() in the random module are seeded with a known seed so that they will return the same sequence on each run.
2. os.urandom() is replaced by a fake urandom that returns a pseudorandom sequence.
3. time.time() is replaced by a fake time that returns an incrementing number. (Original time.time is available as time.realtime.)
Which seed will be used?
If the environment variable REPEATABLE_RANDOMNESS_SEED is set, then it will use that. Else, it will use the current real time. In either case it logs the seed that it used.
Caveats:
1. If some code has acquired a random.Random object before force_repeatability() is executed, then that Random object will produce non-reproducible results. For example, the tempfile module in the Python Standard Library does this.
2. Likewise if some code called time.time() before force_repeatability() was called, then it will have gotten a real time stamp. For example, trial does this. (Then it later subtracts that real timestamp from a faketime timestamp to calculate elapsed time, resulting in a large negative elapsed time.)
3. Fake urandom has an added constraint for performance reasons -- you can't ask it for more than 64 bytes of randomness at a time. (I couldn't figure out how to generate large fake random strings efficiently.)
"""
import os, random, time
if not hasattr(time, "realtime"):
time.realtime = time.time
if not hasattr(os, "realurandom"):
os.realurandom = os.urandom
if not hasattr(random, "realseed"):
random.realseed = random.seed
tdelta = 0
seeded = False
def force_repeatability():
now = 1043659734.0
def faketime():
global tdelta
tdelta += 1
return now + tdelta
time.faketime = faketime
time.time = faketime
from idlib import i2b
def fakeurandom(n):
if n > 64:
raise ("Can't produce more than 64 bytes of pseudorandomness efficiently.")
elif n == 0:
return ''
else:
z = i2b(random.getrandbits(n*8))
x = z + "0" * (n-len(z))
assert len(x) == n
return x
os.fakeurandom = fakeurandom
os.urandom = fakeurandom
global seeded
if not seeded:
SEED = os.environ.get('REPEATABLE_RANDOMNESS_SEED', None)
if SEED is None:
# Generate a seed which is integral and fairly short (to ease cut-and-paste, writing it down, etc.).
t = time.realtime()
subsec = t % 1
t += (subsec * 1000000)
t %= 1000000
SEED = long(t)
import sys
sys.stdout.write("REPEATABLE_RANDOMNESS_SEED: %s\n" % SEED) ; sys.stdout.flush()
sys.stdout.write("In order to reproduce this run of the code, set the environment variable \"REPEATABLE_RANDOMNESS_SEED\" to %s before executing.\n" % SEED) ; sys.stdout.flush()
random.seed(SEED)
def seed_which_refuses(a):
sys.stdout.write("I refuse to reseed to %s. Go away!\n" % (a,)) ; sys.stdout.flush()
return
random.realseed = random.seed
random.seed = seed_which_refuses
seeded = True
import setutil
setutil.RandomSet.DETERMINISTIC = True
def restore_real_clock():
time.time = time.realtime
def restore_real_urandom():
os.urandom = os.realurandom
def restore_real_seed():
random.seed = random.realseed
def restore_non_repeatability():
restore_real_seed()
restore_real_urandom()
restore_real_clock()
| 3,622
|
Python
|
.py
| 74
| 42.756757
| 304
| 0.691393
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,386
|
nummedobj.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/nummedobj.py
|
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# mailto:zooko@zooko.com
# This file is part of pyutil; see README.rst for licensing terms.
import dictutil
class NummedObj(object):
"""
This is useful for nicer debug printouts. Instead of objects of the same class being
distinguished from one another by their memory address, they each get a unique number, which
can be read as "the first object of this class", "the second object of this class", etc. This
is especially useful because separate runs of a program will yield identical debug output,
(assuming that the objects get created in the same order in each run). This makes it possible
to diff outputs from separate runs to see what changed, without having to ignore a difference
on every line due to different memory addresses of objects.
"""
objnums = dictutil.NumDict() # key: class names, value: highest used object number
def __init__(self, klass=None):
"""
@param klass: in which class are you counted? If default value of `None', then self.__class__ will be used.
"""
if klass is None:
klass = self.__class__
self._classname = klass.__name__
NummedObj.objnums.inc(self._classname)
self._objid = NummedObj.objnums[self._classname]
def __repr__(self):
return "<%s #%d>" % (self._classname, self._objid,)
def __lt__(self, other):
return (self._objid, self._classname,) < (other._objid, other._classname,)
def __le__(self, other):
return (self._objid, self._classname,) <= (other._objid, other._classname,)
def __eq__(self, other):
return (self._objid, self._classname,) == (other._objid, other._classname,)
def __ne__(self, other):
return (self._objid, self._classname,) != (other._objid, other._classname,)
def __gt__(self, other):
return (self._objid, self._classname,) > (other._objid, other._classname,)
def __ge__(self, other):
return (self._objid, self._classname,) >= (other._objid, other._classname,)
def __hash__(self):
return id(self)
| 2,141
|
Python
|
.py
| 40
| 46.725
| 116
| 0.659124
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,387
|
find_exe.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/find_exe.py
|
import warnings
import os, sys
from twisted.python.procutils import which
def find_exe(exename):
"""
Look for something named exename or exename + ".py".
This is a kludge.
@return: a list containing one element which is the path to the exename
(if it is thought to be executable), or else the first element being
sys.executable and the second element being the path to the
exename + ".py", or else return False if one can't be found
"""
warnings.warn("deprecated", DeprecationWarning)
exes = which(exename)
exe = exes and exes[0]
if not exe:
exe = os.path.join(sys.prefix, 'scripts', exename + '.py')
if os.path.exists(exe):
path, ext = os.path.splitext(exe)
if ext.lower() in [".exe", ".bat",]:
cmd = [exe,]
else:
cmd = [sys.executable, exe,]
return cmd
else:
return False
| 918
|
Python
|
.py
| 26
| 28.769231
| 76
| 0.631757
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,388
|
version_class.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/version_class.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2004-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
extended version number class
"""
# verlib a.k.a. distutils.version by Tarek Ziadé.
from pyutil.verlib import NormalizedVersion
def cmp_version(v1, v2):
return cmp(NormalizedVersion(str(v1)), NormalizedVersion(str(v2)))
# Python Standard Library
import re
# End users see version strings like this:
# "1.0.0"
# ^ ^ ^
# | | |
# | | '- micro version number
# | '- minor version number
# '- major version number
# The first number is "major version number". The second number is the "minor
# version number" -- it gets bumped whenever we make a new release that adds or
# changes functionality. The third version is the "micro version number" -- it
# gets bumped whenever we make a new release that doesn't add or change
# functionality, but just fixes bugs (including performance issues).
# Early-adopter end users see version strings like this:
# "1.0.0a1"
# ^ ^ ^^^
# | | |||
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate, or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The optional "a" or "b" stands for "alpha release" or "beta release"
# respectively. The number after "a" or "b" gets bumped every time we
# make a new alpha or beta release. This has the same form and the same
# meaning as version numbers of releases of Python.
# Developers see "full version strings", like this:
# "1.0.0a1-55"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- nano version number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# or else like this:
# "1.0.0a1-r22155"
# ^ ^ ^^^ ^
# | | ||| |
# | | ||| '- revision number
# | | ||'- release number
# | | |'- a=alpha, b=beta, c=release candidate or none
# | | '- micro version number
# | '- minor version number
# '- major version number
# The presence of the nano version number means that this is a development
# version. There are no guarantees about compatibility, etc. This version is
# considered to be more recent than the version without this field
# (e.g. "1.0.0a1").
# The nano version number or revision number is meaningful only to developers.
# It gets generated automatically from darcs revision control history by
# "darcsver.py". The nano version number is the count of patches that have been
# applied since the last version number tag was applied. The revision number is
# the count of all patches that have been applied in the history.
VERSION_BASE_RE_STR="(\d+)(\.(\d+)(\.(\d+))?)?((a|b|c)(\d+))?(\.dev(\d+))?"
VERSION_SUFFIX_RE_STR="(-(\d+|r\d+)|.post\d+)?"
VERSION_RE_STR=VERSION_BASE_RE_STR + VERSION_SUFFIX_RE_STR
VERSION_RE=re.compile("^" + VERSION_RE_STR + "$")
class Version(object):
def __init__(self, vstring=None):
self.major = None
self.minor = None
self.micro = None
self.prereleasetag = None
self.prerelease = None
self.nano = None
self.revision = None
if vstring:
try:
self.parse(vstring)
except ValueError, le:
le.args = tuple(le.args + ('vstring:', vstring,))
raise
def parse(self, vstring):
mo = VERSION_RE.search(vstring)
if not mo:
raise ValueError, "Not a valid version string for pyutil.version_class.Version(): %r" % (vstring,)
self.major = int(mo.group(1))
self.minor = mo.group(3) and int(mo.group(3)) or 0
self.micro = mo.group(5) and int(mo.group(5)) or 0
reltag = mo.group(6)
if reltag:
reltagnum = int(mo.group(8))
self.prereleasetag = mo.group(7)
self.prerelease = reltagnum
if mo.group(11):
if mo.group(11)[0] == '-':
if mo.group(12)[0] == 'r':
self.revision = int(mo.group(12)[1:])
else:
self.nano = int(mo.group(12))
else:
assert mo.group(11).startswith('.post'), mo.group(11)
self.revision = int(mo.group(11)[5:])
# XXX in the future, to be compatible with the Python "rational version numbering" scheme, we should move to using .post$REV instead of -r$REV:
# self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and ".post%d" % (self.revision,) or "",)
self.fullstr = "%d.%d.%d%s%s" % (self.major, self.minor, self.micro, self.prereleasetag and "%s%d" % (self.prereleasetag, self.prerelease,) or "", self.nano and "-%d" % (self.nano,) or self.revision and "-r%d" % (self.revision,) or "",)
def user_str(self):
return self.full_str()
def full_str(self):
if hasattr(self, 'fullstr'):
return self.fullstr
else:
return 'None'
def __str__(self):
return self.full_str()
def __repr__(self):
return self.__str__()
def __cmp__ (self, other):
return cmp_version(self, other)
| 5,299
|
Python
|
.py
| 122
| 38.057377
| 249
| 0.625996
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,389
|
cache.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/cache.py
|
# Copyright (c) 2002-2010 Zooko "Zooko" Wilcox-O'Hearn
"""
This module offers three implementations of an LRUCache, which is a dict that
drops items according to a Least-Recently-Used policy if the dict exceeds a
fixed maximum size.
Warning: if -O optimizations are not turned on then LRUCache performs
extensive self-analysis in every function call, which can take minutes
and minutes for a large cache. Turn on -O, or comment out ``assert self._assert_invariants()``
"""
import operator
from assertutil import _assert, precondition
from humanreadable import hr
class LRUCache:
"""
An efficient least-recently-used cache. It keeps an LRU queue, and when
the number of items in the cache reaches maxsize, it removes the least
recently used item.
"Looking" at an item, key, or value such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the most-
recently-used item although it does not change the state of the dict
itself.
See also SmallLRUCache (below), which is faster in some cases.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = n
return (k, v,)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
k = self.i
precondition(self.c.d.has_key(k), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", k, self.c)
(v, p, n,) = self.c.d[k]
self.i = n
return k
class ValIterator:
def __init__(self, c):
self.c = c
self.i = c.d[c.hs][2]
def __iter__(self):
return self
def next(self):
if self.i is self.c.ts:
raise StopIteration
precondition(self.c.d.has_key(self.i), "The iterated LRUCache doesn't have the next key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
(v, p, n,) = self.c.d[self.i]
self.i = n
return v
class Sentinel:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.msg,)
def __init__(self, initialdata={}, maxsize=128):
precondition(maxsize > 0)
self.m = maxsize+2 # The +2 is for the head and tail nodes.
self.d = {} # k: k, v: [v, prev, next,] # the dict
self.hs = LRUCache.Sentinel("hs")
self.ts = LRUCache.Sentinel("ts")
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.update(initialdata)
assert self._assert_invariants()
def __repr_n__(self, n=None):
s = ["{",]
try:
iter = self.iteritems()
x = iter.next()
s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
i = 1
while (n is None) or (i < n):
x = iter.next()
s.append(", "); s.append(str(x[0])); s.append(": "); s.append(str(x[1]))
except StopIteration:
pass
s.append("}")
return ''.join(s)
def __repr__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(),)
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.__repr_n__(16),)
def _assert_invariants(self):
_assert(len(self.d) <= self.m, "Size is required to be <= maxsize.", len(self.d), self.m)
_assert((len(self.d) > 2) == (self.d[self.hs][2] is not self.ts) == (self.d[self.ts][1] is not self.hs), "Head and tail point to something other than each other if and only if there is at least one element in the dictionary.", self.hs, self.ts, len(self.d))
foundprevsentinel = 0
foundnextsentinel = 0
for (k, (v, p, n,)) in self.d.iteritems():
_assert(v not in (self.hs, self.ts,))
_assert(p is not self.ts, "A reference to the tail sentinel may not appear in prev.", k, v, p, n)
_assert(n is not self.hs, "A reference to the head sentinel may not appear in next.", k, v, p, n)
_assert(p in self.d, "Each prev is required to appear as a key in the dict.", k, v, p, n)
_assert(n in self.d, "Each next is required to appear as a key in the dict.", k, v, p, n)
if p is self.hs:
foundprevsentinel += 1
_assert(foundprevsentinel <= 2, "No more than two references to the head sentinel may appear as a prev.", k, v, p, n)
if n is self.ts:
foundnextsentinel += 1
_assert(foundnextsentinel <= 2, "No more than one reference to the tail sentinel may appear as a next.", k, v, p, n)
_assert(foundprevsentinel == 2, "A reference to the head sentinel is required appear as a prev (plus a self-referential reference).")
_assert(foundnextsentinel == 2, "A reference to the tail sentinel is required appear as a next (plus a self-referential reference).")
count = 0
for (k, v,) in self.iteritems():
_assert(k not in (self.hs, self.ts,))
count += 1
_assert(count == len(self.d)-2, count, len(self.d)) # -2 for the sentinels
return True
def freshen(self, k, strictkey=False):
assert self._assert_invariants()
if not self.d.has_key(k):
if strictkey:
raise KeyError, k
return
node = self.d[k]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
# move to front
hnode = self.d[self.hs]
node[1] = self.hs
node[2] = hnode[2]
hnode[2] = k
self.d[node[2]][1] = k
assert self._assert_invariants()
def iteritems(self):
return LRUCache.ItemIterator(self)
def itervalues(self):
return LRUCache.ValIterator(self)
def iterkeys(self):
return self.__iter__()
def __iter__(self):
return LRUCache.KeyIterator(self)
def __getitem__(self, key, default=None, strictkey=True):
node = self.d.get(key)
if not node:
if strictkey:
raise KeyError, key
return default
self.freshen(key)
return node[0]
def __setitem__(self, k, v=None):
assert self._assert_invariants()
node = self.d.get(k)
if node:
node[0] = v
self.freshen(k)
return
if len(self.d) == self.m:
# If this insert is going to increase the size of the cache to
# bigger than maxsize.
self.pop()
hnode = self.d[self.hs]
n = hnode[2]
self.d[k] = [v, self.hs, n,]
hnode[2] = k
self.d[n][1] = k
assert self._assert_invariants()
return v
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the value removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if self.d.has_key(key):
node = self.d[key]
# relink
self.d[node[1]][2] = node[2]
self.d[node[2]][1] = node[1]
del self.d[key]
assert self._assert_invariants()
return node[0]
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def has_key(self, key):
assert self._assert_invariants()
if self.d.has_key(key):
self.freshen(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def clear(self):
assert self._assert_invariants()
self.d.clear()
self.d[self.hs] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
self.d[self.ts] = [None, self.hs, self.ts,] # This allows us to use sentinels as normal nodes.
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) >= (self.m-2): # -2 for the sentinel nodes
# optimization
self.clear()
assert self._assert_invariants()
i = otherdict.iteritems()
try:
while len(self.d) < self.m:
(k, v,) = i.next()
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
return self
except StopIteration:
_assert(False, "Internal error -- this should never have happened since the while loop should have terminated first.")
return self
for (k, v,) in otherdict.iteritems():
assert self._assert_invariants()
self[k] = v
assert self._assert_invariants()
def pop(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.ts][1]
self.remove(k)
assert self._assert_invariants()
return k
def popitem(self):
assert self._assert_invariants()
if len(self.d) < 2: # the +2 is for the sentinels
raise KeyError, 'popitem(): dictionary is empty'
k = self.d[self.ts][1]
val = self.remove(k)
assert self._assert_invariants()
return (k, val,)
def keys_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return t.keys()
def keys(self):
res = [None] * len(self)
i = 0
for k in self.iterkeys():
res[i] = k
i += 1
return res
def values_unsorted(self):
assert self._assert_invariants()
t = self.d.copy()
del t[self.hs]
del t[self.ts]
assert self._assert_invariants()
return map(operator.__getitem__, t.values(), [0]*len(t))
def values(self):
res = [None] * len(self)
i = 0
for v in self.itervalues():
res[i] = v
i += 1
return res
def items(self):
res = [None] * len(self)
i = 0
for it in self.iteritems():
res[i] = it
i += 1
return res
def __len__(self):
return len(self.d) - 2
def insert(self, key, val=None):
assert self._assert_invariants()
result = self.__setitem__(key, val)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def get(self, key, default=None):
return self.__getitem__(key, default, strictkey=False)
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
class SmallLRUCache(dict):
"""
SmallLRUCache is faster than LRUCache for small sets. How small? That
depends on your machine and which operations you use most often. Use
performance profiling to determine whether the cache class that you are
using makes any difference to the performance of your program, and if it
does, then run "quick_bench()" in test/test_cache.py to see which cache
implementation is faster for the size of your datasets.
A simple least-recently-used cache. It keeps an LRU queue, and
when the number of items in the cache reaches maxsize, it removes
the least recently used item.
"Looking" at an item or a key such as with "has_key()" makes that
item become the most recently used item.
You can also use "refresh()" to explicitly make an item become the most
recently used item.
Adding an item that is already in the dict *does* make it the
most- recently-used item although it does not change the state of
the dict itself.
"""
class ItemIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return (k, dict.__getitem__(self.c, k),)
class KeyIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return k
class ValueIterator:
def __init__(self, c):
self.c = c
self.i = 0
def __iter__(self):
return self
def next(self):
precondition(self.i <= len(self.c._lru), "The iterated SmallLRUCache doesn't have this many elements. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c)
precondition(dict.has_key(self.c, self.c._lru[self.i]), "The iterated SmallLRUCache doesn't have this key. Most likely this is because someone altered the contents of the LRUCache while the iteration was in progress.", self.i, self.c._lru[self.i], self.c)
if self.i == len(self.c._lru):
raise StopIteration
k = self.i
self.i += 1
return dict.__getitem__(self.c, k)
def __init__(self, initialdata={}, maxsize=128):
dict.__init__(self, initialdata)
self._lru = initialdata.keys() # contains keys
self._maxsize = maxsize
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
def _assert_invariants(self):
_assert(len(self._lru) <= self._maxsize, "Size is required to be <= maxsize.")
_assert(len(filter(lambda x: dict.has_key(self, x), self._lru)) == len(self._lru), "Each key in self._lru is required to be in dict.", filter(lambda x: not dict.has_key(self, x), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(filter(lambda x: x in self._lru, self.keys())) == len(self), "Each key in dict is required to be in self._lru.", filter(lambda x: x not in self._lru, self.keys()), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) == len(self), "internal consistency", filter(lambda x: x not in self.keys(), self._lru), len(self._lru), self._lru, len(self), self)
_assert(len(self._lru) <= self._maxsize, "internal consistency", len(self._lru), self._lru, self._maxsize)
return True
def insert(self, key, item=None):
assert self._assert_invariants()
result = self.__setitem__(key, item)
assert self._assert_invariants()
return result
def setdefault(self, key, default=None):
assert self._assert_invariants()
if not self.has_key(key):
self[key] = default
assert self._assert_invariants()
return self[key]
def __setitem__(self, key, item=None):
assert self._assert_invariants()
if dict.has_key(self, key):
self._lru.remove(key)
else:
if len(self._lru) == self._maxsize:
# If this insert is going to increase the size of the cache to bigger than maxsize:
killkey = self._lru.pop(0)
dict.__delitem__(self, killkey)
dict.__setitem__(self, key, item)
self._lru.append(key)
assert self._assert_invariants()
return item
def remove(self, key, default=None, strictkey=True):
assert self._assert_invariants()
result = self.__delitem__(key, default, strictkey)
assert self._assert_invariants()
return result
def __delitem__(self, key, default=None, strictkey=True):
"""
@param strictkey: True if you want a KeyError in the case that
key is not there, False if you want a reference to default
in the case that key is not there
@param default: the object to return if key is not there; This
is ignored if strictkey.
@return: the object removed or default if there is not item by
that key and strictkey is False
"""
assert self._assert_invariants()
if dict.has_key(self, key):
val = dict.__getitem__(self, key)
dict.__delitem__(self, key)
self._lru.remove(key)
assert self._assert_invariants()
return val
elif strictkey:
assert self._assert_invariants()
raise KeyError, key
else:
assert self._assert_invariants()
return default
def clear(self):
assert self._assert_invariants()
dict.clear(self)
self._lru = []
assert self._assert_invariants()
def update(self, otherdict):
"""
@return: self
"""
assert self._assert_invariants()
if len(otherdict) > self._maxsize:
# Handling this special case here makes it possible to implement the
# other more common cases faster below.
dict.clear(self)
self._lru = []
if self._maxsize > (len(otherdict) - self._maxsize):
dict.update(self, otherdict)
while len(self) > self._maxsize:
dict.popitem(self)
else:
for k, v, in otherdict.iteritems():
if len(self) == self._maxsize:
break
dict.__setitem__(self, k, v)
self._lru = dict.keys(self)
assert self._assert_invariants()
return self
for k in otherdict.iterkeys():
if dict.has_key(self, k):
self._lru.remove(k)
self._lru.extend(otherdict.keys())
dict.update(self, otherdict)
over = len(self) - self._maxsize
if over > 0:
map(dict.__delitem__, [self]*over, self._lru[:over])
del self._lru[:over]
assert self._assert_invariants()
return self
def has_key(self, key):
assert self._assert_invariants()
if dict.has_key(self, key):
assert key in self._lru, "key: %s, self._lru: %s" % tuple(map(hr, (key, self._lru,)))
self._lru.remove(key)
self._lru.append(key)
assert self._assert_invariants()
return True
else:
assert self._assert_invariants()
return False
def refresh(self, key, strictkey=True):
"""
@param strictkey: raise a KeyError exception if key isn't present
"""
assert self._assert_invariants()
if not dict.has_key(self, key):
if strictkey:
raise KeyError, key
return
self._lru.remove(key)
self._lru.append(key)
def popitem(self):
if not self._lru:
raise KeyError, 'popitem(): dictionary is empty'
k = self._lru[-1]
obj = self.remove(k)
return (k, obj,)
class LinkedListLRUCache:
"""
This is slower and less featureful than LRUCache. It is included
here for comparison purposes.
Implementation of a length-limited O(1) LRU queue.
Built for and used by PyPE:
http://pype.sourceforge.net
original Copyright 2003 Josiah Carlson.
useful methods and _assert_invariant added by Zooko for testing and benchmarking purposes
"""
class Node:
def __init__(self, prev, me):
self.prev = prev
self.me = me
self.next = None
def __init__(self, initialdata={}, maxsize=128):
self._maxsize = max(maxsize, 1)
self.d = {}
self.first = None
self.last = None
for key, value in initialdata.iteritems():
self[key] = value
def clear(self):
self.d = {}
self.first = None
self.last = None
def update(self, otherdict):
for (k, v,) in otherdict.iteritems():
self[k] = v
def setdefault(self, key, default=None):
if not self.has_key(key):
self[key] = default
return self[key]
def _assert_invariants(self):
def lliterkeys(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me[0]
cur = cur2
def lllen(self):
# Ugh.
acc = 0
for x in lliterkeys(self):
acc += 1
return acc
def llhaskey(self, key):
# Ugh.
for x in lliterkeys(self):
if x is key:
return True
return False
for k in lliterkeys(self):
_assert(self.d.has_key(k), "Each key in the linked list is required to be in the dict.", k)
for k in self.d.iterkeys():
_assert(llhaskey(self, k), "Each key in the dict is required to be in the linked list.", k)
_assert(lllen(self) == len(self.d), "internal consistency", self, self.d)
_assert(len(self.d) <= self._maxsize, "Size is required to be <= maxsize.")
return True
def __contains__(self, obj):
return obj in self.d
def has_key(self, key):
return self.__contains__(key)
def __getitem__(self, obj):
a = self.d[obj].me
self[a[0]] = a[1]
return a[1]
def get(self, key, default=None, strictkey=False):
if not self.has_key(key) and strictkey:
raise KeyError, key
if self.has_key(key):
return self.__getitem__(key)
else:
return default
def __setitem__(self, obj, val):
if obj in self.d:
del self[obj]
nobj = self.Node(self.last, (obj, val))
if self.first is None:
self.first = nobj
if self.last:
self.last.next = nobj
self.last = nobj
self.d[obj] = nobj
if len(self.d) > self._maxsize:
if self.first == self.last:
self.first = None
self.last = None
return
a = self.first
a.next.prev = None
self.first = a.next
a.next = None
del self.d[a.me[0]]
del a
def insert(self, key, item=None):
return self.__setitem__(key, item)
def __delitem__(self, obj, default=None, strictkey=True):
if self.d.has_key(obj):
nobj = self.d[obj]
if nobj.prev:
nobj.prev.next = nobj.next
else:
self.first = nobj.next
if nobj.next:
nobj.next.prev = nobj.prev
else:
self.last = nobj.prev
val = self.d[obj]
del self.d[obj]
return val.me[1]
elif strictkey:
raise KeyError, obj
else:
return default
def remove(self, obj, default=None, strictkey=True):
return self.__delitem__(obj, default=default, strictkey=strictkey)
def __iter__(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me[1]
cur = cur2
def iteritems(self):
cur = self.first
while cur != None:
cur2 = cur.next
yield cur.me
cur = cur2
def iterkeys(self):
return iter(self.d)
def itervalues(self):
for i,j in self.iteritems():
yield j
def values(self):
l = []
for v in self.itervalues():
l.append(v)
return l
def keys(self):
return self.d.keys()
def __len__(self):
return self.d.__len__()
def popitem(self):
i = self.last.me
obj = self.remove(i[0])
return obj
| 27,000
|
Python
|
.py
| 653
| 31.174579
| 268
| 0.569237
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,390
|
__init__.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/__init__.py
|
"""
Library of useful Python functions and classes.
Projects that have contributed substantial portions to pyutil:
U{Mojo Nation<http://mojonation.net/>}
U{Mnet<http://sf.net/projects/mnet>}
U{Allmydata<http://allmydata.com/>}
U{Tahoe-LAFS<http://tahoe-lafs.org/>}
mailto:zooko@zooko.com
pyutil web site: U{http://tahoe-lafs.org/trac/pyutil}
"""
__version__ = "unknown"
try:
from _version import __version__
except ImportError:
# We're running in a tree that hasn't run "./setup.py darcsver", and didn't
# come with a _version.py, so we don't know what our version is. This should
# not happen very often.
pass
__version__ # hush pyflakes
| 663
|
Python
|
.py
| 19
| 32.631579
| 80
| 0.729688
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,391
|
iputil.py~
|
CouchPotato_CouchPotatoServer/libs/pyutil/iputil.py~
|
# portions extracted from ipaddresslib by Autonomous Zone Industries, LGPL (author: Greg Smith)
# portions adapted from nattraverso.ipdiscover
# portions authored by Brian Warner, working for Allmydata
# most recent version authored by Zooko O'Whielacronx, working for Allmydata
# from the Python Standard Library
import os, re, socket, sys
# from Twisted
from twisted.internet import defer, reactor
from twisted.python import failure
from twisted.internet.protocol import DatagramProtocol
from twisted.internet.utils import getProcessOutput
from twisted.python.procutils import which
from twisted.python import log
# from pyutil
import observer
try:
import resource
def increase_rlimits():
# We'd like to raise our soft resource.RLIMIT_NOFILE, since certain
# systems (OS-X, probably solaris) start with a relatively low limit
# (256), and some unit tests want to open up more sockets than this.
# Most linux systems start with both hard and soft limits at 1024,
# which is plenty.
# unfortunately the values to pass to setrlimit() vary widely from
# one system to another. OS-X reports (256, HUGE), but the real hard
# limit is 10240, and accepts (-1,-1) to mean raise it to the
# maximum. Cygwin reports (256, -1), then ignores a request of
# (-1,-1): instead you have to guess at the hard limit (it appears to
# be 3200), so using (3200,-1) seems to work. Linux reports a
# sensible (1024,1024), then rejects (-1,-1) as trying to raise the
# maximum limit, so you could set it to (1024,1024) but you might as
# well leave it alone.
try:
current = resource.getrlimit(resource.RLIMIT_NOFILE)
except AttributeError:
# we're probably missing RLIMIT_NOFILE
return
if current[0] >= 1024:
# good enough, leave it alone
return
try:
if current[1] > 0 and current[1] < 1000000:
# solaris reports (256, 65536)
resource.setrlimit(resource.RLIMIT_NOFILE,
(current[1], current[1]))
else:
# this one works on OS-X (bsd), and gives us 10240, but
# it doesn't work on linux (on which both the hard and
# soft limits are set to 1024 by default).
resource.setrlimit(resource.RLIMIT_NOFILE, (-1,-1))
new = resource.getrlimit(resource.RLIMIT_NOFILE)
if new[0] == current[0]:
# probably cygwin, which ignores -1. Use a real value.
resource.setrlimit(resource.RLIMIT_NOFILE, (3200,-1))
except ValueError:
log.msg("unable to set RLIMIT_NOFILE: current value %s"
% (resource.getrlimit(resource.RLIMIT_NOFILE),))
except:
# who knows what. It isn't very important, so log it and continue
log.err()
except ImportError:
def _increase_rlimits():
# TODO: implement this for Windows. Although I suspect the
# solution might be "be running under the iocp reactor and
# make this function be a no-op".
pass
# pyflakes complains about two 'def FOO' statements in the same time,
# since one might be shadowing the other. This hack appeases pyflakes.
increase_rlimits = _increase_rlimits
def get_local_addresses_async(target="198.41.0.4"): # A.ROOT-SERVERS.NET
"""
Return a Deferred that fires with a list of IPv4 addresses (as dotted-quad
strings) that are currently configured on this host, sorted in descending
order of how likely we think they are to work.
@param target: we want to learn an IP address they could try using to
connect to us; The default value is fine, but it might help if you
pass the address of a host that you are actually trying to be
reachable to.
"""
addresses = []
local_ip = get_local_ip_for(target)
if local_ip:
addresses.append(local_ip)
if sys.platform == "cygwin":
d = _cygwin_hack_find_addresses(target)
else:
d = _find_addresses_via_config()
def _collect(res):
for addr in res:
if addr != "0.0.0.0" and not addr in addresses:
addresses.append(addr)
return addresses
d.addCallback(_collect)
return d
def get_local_ip_for(target):
"""Find out what our IP address is for use by a given target.
@return: the IP address as a dotted-quad string which could be used by
to connect to us. It might work for them, it might not. If
there is no suitable address (perhaps we don't currently have an
externally-visible interface), this will return None.
"""
try:
target_ipaddr = socket.gethostbyname(target)
except socket.gaierror:
# DNS isn't running, or somehow we encountered an error
# note: if an interface is configured and up, but nothing is connected to it,
# gethostbyname("A.ROOT-SERVERS.NET") will take 20 seconds to raise socket.gaierror
# . This is synchronous and occurs for each node being started, so users of certain unit
# tests will see something like 120s of delay, which may be enough to hit the default
# trial timeouts. For that reason, get_local_addresses_async() was changed to default to
# the numerical ip address for A.ROOT-SERVERS.NET, to avoid this DNS lookup. This also
# makes node startup a tad faster.
return None
udpprot = DatagramProtocol()
port = reactor.listenUDP(0, udpprot)
try:
udpprot.transport.connect(target_ipaddr, 7)
localip = udpprot.transport.getHost().host
except socket.error:
# no route to that host
localip = None
port.stopListening() # note, this returns a Deferred
return localip
# k: result of sys.platform, v: which kind of IP configuration reader we use
_platform_map = {
"linux-i386": "linux", # redhat
"linux-ppc": "linux", # redhat
"linux2": "linux", # debian
"win32": "win32",
"irix6-n32": "irix",
"irix6-n64": "irix",
"irix6": "irix",
"openbsd2": "bsd",
"darwin": "bsd", # Mac OS X
"freebsd4": "bsd",
"freebsd5": "bsd",
"freebsd6": "bsd",
"netbsd1": "bsd",
"sunos5": "sunos",
"cygwin": "cygwin",
}
class UnsupportedPlatformError(Exception):
pass
# Wow, I'm really amazed at home much mileage we've gotten out of calling
# the external route.exe program on windows... It appears to work on all
# versions so far. Still, the real system calls would much be preferred...
# ... thus wrote Greg Smith in time immemorial...
_win32_path = 'route.exe'
_win32_args = ('print',)
_win32_re = re.compile('^\s*\d+\.\d+\.\d+\.\d+\s.+\s(?P<address>\d+\.\d+\.\d+\.\d+)\s+(?P<metric>\d+)\s*$', flags=re.M|re.I|re.S)
# These work in Redhat 6.x and Debian 2.2 potato
_linux_path = '/sbin/ifconfig'
_linux_re = re.compile('^\s*inet addr:(?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# originally NetBSD 1.4 (submitted by Rhialto), Darwin, Mac OS X, FreeBSD, OpenBSD
_bsd_path = '/sbin/ifconfig'
_bsd_args = ('-a',)
_bsd_re = re.compile('^\s+inet (?P<address>\d+\.\d+\.\d+\.\d+)\s.+$', flags=re.M|re.I|re.S)
# Irix 6.5
_irix_path = '/usr/etc/ifconfig'
# Solaris 2.x
_sunos_path = '/usr/sbin/ifconfig'
class SequentialTrier(object):
""" I hold a list of executables to try and try each one in turn
until one gives me a list of IP addresses."""
def __init__(self, exebasename, args, regex):
assert not os.path.isabs(exebasename)
self.exes_left_to_try = which(exebasename)
self.exes_left_to_try.reverse()
self.args = args
self.regex = regex
self.o = observer.OneShotObserverList()
self._try_next()
def _try_next(self):
if not self.exes_left_to_try:
self.o.fire(None)
else:
exe = self.exes_left_to_try.pop()
d2 = _query(exe, self.args, self.regex)
def cb(res):
if res:
self.o.fire(res)
else:
self._try_next()
def eb(why):
self._try_next()
d2.addCallbacks(cb, eb)
def when_tried(self):
return self.o.when_fired()
# k: platform string as provided in the value of _platform_map
# v: tuple of (path_to_tool, args, regex,)
_tool_map = {
"linux": (_linux_path, (), _linux_re,),
"win32": (_win32_path, _win32_args, _win32_re,),
"cygwin": (_win32_path, _win32_args, _win32_re,),
"bsd": (_bsd_path, _bsd_args, _bsd_re,),
"irix": (_irix_path, _bsd_args, _bsd_re,),
"sunos": (_sunos_path, _bsd_args, _bsd_re,),
}
def _find_addresses_via_config():
# originally by Greg Smith, hacked by Zooko to conform to Brian Warner's API.
platform = _platform_map.get(sys.platform)
(pathtotool, args, regex,) = _tool_map.get(platform, ('ifconfig', _bsd_args, _bsd_re,))
# If the platform isn't known then we attempt BSD-style ifconfig. If it
# turns out that we don't get anything resembling a dotted quad IPv4 address
# out of it, then we'll raise UnsupportedPlatformError.
# If pathtotool is a fully qualified path then we just try that.
# If it is merely an executable name then we use Twisted's
# "which()" utility and try each executable in turn until one
# gives us something that resembles a dotted-quad IPv4 address.
if os.path.isabs(pathtotool):
d = _query(pathtotool, args, regex)
else:
d = SequentialTrier(pathtotool, args, regex).when_tried()
d.addCallback(_check_result)
return d
def _check_result(result):
if not result and not _platform_map.has_key(sys.platform):
return failure.Failure(UnsupportedPlatformError(sys.platform))
else:
return result
def _query(path, args, regex):
d = getProcessOutput(path, args)
def _parse(output):
addresses = []
outputsplit = output.split('\n')
for outline in outputsplit:
m = regex.match(outline)
if m:
addr = m.groupdict()['address']
if addr not in addresses:
addresses.append(addr)
return addresses
d.addCallback(_parse)
return d
def _cygwin_hack_find_addresses(target):
addresses = []
for h in [target, "localhost", "127.0.0.1",]:
try:
addr = get_local_ip_for(h)
if addr not in addresses:
addresses.append(addr)
except socket.gaierror:
pass
return defer.succeed(addresses)
| 10,720
|
Python
|
.py
| 244
| 36.52459
| 129
| 0.639858
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,392
|
memutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/memutil.py
|
# Copyright (c) 2002-2010 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
# from the Python Standard Library
import exceptions, gc, math, operator, os, sys, types
# from the pyutil library
from assertutil import precondition
import mathutil
class Canary:
"""
Want to get a printout when your object is garbage collected? Then put "self.canary = Canary(self)" in your object's constructor.
"""
def __init__(self, owner):
self.ownerdesc = repr(owner)
def __del__(self):
print "Canary says that %s is gone." % self.ownerdesc
def estimate_mem_of_obj(o):
# assumes 32-bit CPUs...
PY_STRUCT_HEAD_LEN=4
if hasattr(o, '__len__'):
if isinstance(o, str):
return PY_STRUCT_HEAD_LEN + o.__len__() * 1
if isinstance(o, unicode):
return PY_STRUCT_HEAD_LEN + o.__len__() * 4 # 4 depends on implementation and is approximate
if isinstance(o, (tuple, list,)):
return PY_STRUCT_HEAD_LEN + o.__len__() * 4
if isinstance(o, (dict, set,)):
return PY_STRUCT_HEAD_LEN + o.__len__() * 4 * 2 * 2 # approximate
if isinstance(o, int):
return PY_STRUCT_HEAD_LEN + 4
if isinstance(o, long):
return PY_STRUCT_HEAD_LEN + 4
if o < 1:
return PY_STRUCT_HEAD_LEN
else:
return PY_STRUCT_HEAD_LEN + math.log(o) / 5 # the 5 was empirically determined (it is approximate)
if isinstance(o, float):
return PY_STRUCT_HEAD_LEN + 8
# Uh-oh... I wonder what we are missing here...
return PY_STRUCT_HEAD_LEN
def check_for_obj_leakage(f, *args, **kwargs):
"""
The idea is that I am going to invoke f(), then run gc.collect(), then run
gc.get_objects() to get a complete list of all objects in the system, then
invoke f() a second time, then run gc.collect(), then run gc.get_objects()
to get a list of all the objects *now* in the system.
Then I return a tuple two things: the first element of the tuple is the
difference between the number of objects in the second list and the number
of objects in the first list.
I.e., if this number is zero then you can be pretty sure there is no memory
leak, unless f is deleting some objects and replacing them by exactly the
same number of objects but the new objects take up more memory. If this
number is greater than zero then you can pretty sure there is a memory
leak, unless f is doing some memoization/caching behavior and it will
eventually stabilize, which you can detect by running
check_for_obj_leakage() more times and seeing if it stabilizes.
(Actually we run f() followed by gc.collect() one time before we start in
order to account for any static objects which are created the first time
you run f() and then re-used after that.)
The second element in the return value is the set of all objects which were
present in the second list and not in the first. Some of these objects
might be memory-leaked objects, or perhaps f deleted some objects and
replaced them with equivalent objects, in which case these objects are not
leaked.
(We actually invoke gc.collect() three times in a row in case there are
objects which get collected in the first pass that have finalizers which
create new reference-cycled objects... "3" is a superstitious number -- we
figure most of the time the finalizers of the things produced by the first
round of finalizers won't themselves product another round of
reference-cycled objects.)
"""
f()
gc.collect();gc.collect();gc.collect()
f()
gc.collect();gc.collect();gc.collect()
r1 = gc.get_objects()
f()
gc.collect();gc.collect();gc.collect()
r2 = gc.get_objects()
d2 = dict([(id(x), x) for x in r2])
# Now remove everything from r1, and r1 itself, from d2.
del d2[id(r1)]
for o in r1:
if id(o) in d2:
del d2[id(o)]
return (len(r2) - len(r1) - 1, d2)
def measure_obj_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs):
"""
The idea is we are going to use count_all_objects() to see how many
objects are in use, and keep track of that number with respect to how
many times we've invoked f(), and return the slope of the best linear
fit.
@param numsamples: recommended: 2**7
@param iterspersample: how many times f() should be invoked per sample;
Basically, choose iterspersample such that
iterspersample * numsamples *
how-long-it-takes-to-compute-f() is slightly less
than how long you are willing to wait for this
leak test.
@return: the slope of the best linear fit, which can be interpreted as 'the
approximate number of Python objects created and not destroyed
per invocation of f()'
"""
precondition(numsamples > 0, "numsamples is required to be positive.", numsamples)
precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample)
resiters = [None]*numsamples # values: iters
resnumobjs = [None]*numsamples # values: numobjs
totaliters = 0
for i in range(numsamples):
for j in range(iterspersample):
f(*args, **kwargs)
totaliters = totaliters + iterspersample
resiters[i] = totaliters
gc.collect()
resnumobjs[i] = count_all_objects()
# print "totaliters: %s, numobjs: %s" % (resiters[-1], resnumobjs[-1],)
avex = float(reduce(operator.__add__, resiters)) / len(resiters)
avey = float(reduce(operator.__add__, resnumobjs)) / len(resnumobjs)
sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resnumobjs)))
sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, resiters))
return sxy / sxx
def linear_fit_slope(xs, ys):
avex = float(reduce(operator.__add__, xs)) / len(xs)
avey = float(reduce(operator.__add__, ys)) / len(ys)
sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(xs, ys)))
sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, xs))
return sxy / sxx
def measure_ref_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs):
"""
The idea is we are going to use sys.gettotalrefcount() to see how many
references are extant, and keep track of that number with respect to how
many times we've invoked f(), and return the slope of the best linear
fit.
@param numsamples: recommended: 2**7
@param iterspersample: how many times f() should be invoked per sample;
Basically, choose iterspersample such that
iterspersample * numsamples *
how-long-it-takes-to-compute-f() is slightly less
than how long you are willing to wait for this
leak test.
@return: the slope of the best linear fit, which can be interpreted as 'the
approximate number of Python references created and not
nullified per invocation of f()'
"""
precondition(numsamples > 0, "numsamples is required to be positive.", numsamples)
precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample)
try:
sys.gettotalrefcount()
except AttributeError, le:
raise AttributeError(le, "Probably this is not a debug build of Python, so it doesn't have a sys.gettotalrefcount function.")
resiters = [None]*numsamples # values: iters
resnumrefs = [None]*numsamples # values: numrefs
totaliters = 0
for i in range(numsamples):
for j in range(iterspersample):
f(*args, **kwargs)
totaliters = totaliters + iterspersample
resiters[i] = totaliters
gc.collect()
resnumrefs[i] = sys.gettotalrefcount()
# print "totaliters: %s, numrefss: %s" % (resiters[-1], resnumrefs[-1],)
avex = float(reduce(operator.__add__, resiters)) / len(resiters)
avey = float(reduce(operator.__add__, resnumrefs)) / len(resnumrefs)
sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resnumrefs)))
sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, resiters))
return sxy / sxx
class NotSupportedException(exceptions.StandardError):
"""
Just an exception class. It is thrown by get_mem_usage if the OS does
not support the operation.
"""
pass
def get_mem_used():
"""
This only works on Linux, and only if the /proc/$PID/statm output is the
same as that in linux kernel 2.6. Also `os.getpid()' must work.
@return: tuple of (res, virt) used by this process
"""
try:
import resource
except ImportError:
raise NotSupportedException
# sample output from cat /proc/$PID/statm:
# 14317 3092 832 279 0 2108 0
a = os.popen("cat /proc/%s/statm 2>/dev/null" % os.getpid()).read().split()
if not a:
raise NotSupportedException
return (int(a[1]) * resource.getpagesize(), int(a[0]) * resource.getpagesize(),)
def get_mem_used_res():
"""
This only works on Linux, and only if the /proc/$PID/statm output is the
same as that in linux kernel 2.6. Also `os.getpid()' must work.
"""
try:
import resource
except ImportError:
raise NotSupportedException
# sample output from cat /proc/$PID/statm:
# 14317 3092 832 279 0 2108 0
a = os.popen("cat /proc/%s/statm" % os.getpid()).read().split()
if not len(a) > 1:
raise NotSupportedException
return int(a[1]) * resource.getpagesize()
def get_mem_usage_virt_and_res():
"""
This only works on Linux, and only if the /proc/$PID/statm output is the
same as that in linux kernel 2.6. Also `os.getpid()' must work.
"""
try:
import resource
except ImportError:
raise NotSupportedException
# sample output from cat /proc/$PID/statm:
# 14317 3092 832 279 0 2108 0
a = os.popen("cat /proc/%s/statm" % os.getpid()).read().split()
if not len(a) > 1:
raise NotSupportedException
return (int(a[0]) * resource.getpagesize(), int(a[1]) * resource.getpagesize(),)
class Measurer(object):
def __init__(self, f, numsamples=2**7, iterspersample=2**4, *args, **kwargs):
"""
@param f a callable; If it returns a deferred then the memory will not
be measured and the next iteration will not be started until the
deferred fires; else the memory will be measured and the next
iteration started when f returns.
"""
self.f = f
self.numsamples = numsamples
self.iterspersample = iterspersample
self.args = args
self.kwargs = kwargs
# from twisted
from twisted.internet import defer
self.d = defer.Deferred()
def when_complete(self):
return self.d
def _invoke(self):
d = self.f(*self.args, **self.kwargs)
# from twisted
from twisted.internet import defer
if isinstance(d, defer.Deferred):
d.addCallback(self._after)
else:
self._after(None)
def start(self):
self.resiters = [None]*self.numsamples # values: iters
self.resmemusage = [None]*self.numsamples # values: memusage
self.totaliters = 0
self.i = 0
self.j = 0
self._invoke()
def _after(self, o):
self.j += 1
if self.j < self.iterspersample:
self._invoke()
return
if self.i < self.numsamples:
self.j = 0
self.i += 1
self.totaliters += self.iterspersample
self.resiters[self.i] = self.totaliters
self.resmemusage[self.i] = get_mem_used_res()
self._invoke()
return
self.d.callback(mathutil.linear_fit_slope(zip(self.resiters, self.resmemusage)))
def measure_mem_leakage(f, numsamples=2**7, iterspersample=2**4, *args, **kwargs):
"""
This does the same thing as measure_obj_leakage() but instead of using
count_all_objects() it uses get_mem_usage(), which is currently
implemented for Linux and barely implemented for Mac OS X.
@param numsamples: recommended: 2**7
@param iterspersample: how many times `f()' should be invoked per sample;
Basically, choose `iterspersample' such that
(iterspersample * numsamples *
how-long-it-takes-to-compute-`f()') is slightly
less than how long you are willing to wait for
this leak test.
@return: the slope of the best linear fit, which can be interpreted as
'the approximate number of system bytes allocated and not freed
per invocation of f()'
"""
precondition(numsamples > 0, "numsamples is required to be positive.", numsamples)
precondition(iterspersample > 0, "iterspersample is required to be positive.", iterspersample)
resiters = [None]*numsamples # values: iters
resmemusage = [None]*numsamples # values: memusage
totaliters = 0
for i in range(numsamples):
for j in range(iterspersample):
f(*args, **kwargs)
totaliters = totaliters + iterspersample
resiters[i] = totaliters
gc.collect()
resmemusage[i] = get_mem_used_res()
# print "totaliters: %s, numobjs: %s" % (resiters[-1], resmemusage[-1],)
avex = float(reduce(operator.__add__, resiters)) / len(resiters)
avey = float(reduce(operator.__add__, resmemusage)) / len(resmemusage)
sxy = reduce(operator.__add__, map(lambda a, avex=avex, avey=avey: (a[0] - avex) * (a[1] - avey), zip(resiters, resmemusage)))
sxx = reduce(operator.__add__, map(lambda a, avex=avex: (a - avex) ** 2, resiters))
if sxx == 0:
return None
return sxy / sxx
def describe_object(o, FunctionType=types.FunctionType, MethodType=types.MethodType, InstanceType=types.InstanceType):
"""
For human analysis, when humans are attempting to understand where all the
memory is going. Argument o is an object, return value is a string
describing the object.
"""
sl = []
if isinstance(o, FunctionType):
try:
sl.append("<type 'function' %s>" % str(o.func_name))
except:
pass
elif isinstance(o, MethodType):
try:
sl.append("<type 'method' %s>" % str(o.im_func.func_name))
except:
pass
elif isinstance(o, InstanceType):
try:
sl.append("<type 'instance' %s>" % str(o.__class__.__name__))
except:
pass
else:
sl.append(str(type(o)))
try:
sl.append(str(len(o)))
except:
pass
return ''.join(sl)
import dictutil
def describe_object_with_dict_details(o):
sl = []
sl.append(str(type(o)))
if isinstance(o, types.FunctionType):
try:
sl.append(str(o.func_name))
except:
pass
elif isinstance(o, types.MethodType):
try:
sl.append(str(o.im_func.func_name))
except:
pass
try:
sl.append(str(len(o)))
except:
pass
if isinstance(o, dict) and o:
sl.append('-')
nd = dictutil.NumDict()
for k, v in o.iteritems():
nd.inc((describe_object(k), describe_object(v),))
k, v = nd.item_with_largest_value()
sl.append("-")
iterator = o.iteritems()
k,v = iterator.next()
sl.append(describe_object(k))
sl.append(":")
sl.append(describe_object(v))
return ''.join(sl)
def describe_dict(o):
sl = ['<dict']
l = len(o)
sl.append(str(l))
if l:
sl.append("-")
iterator = o.iteritems()
firstitem=True
try:
while True:
if firstitem:
firstitem = False
else:
sl.append(", ")
k,v = iterator.next()
sl.append(describe_object(k))
sl.append(": ")
sl.append(describe_object(v))
except StopIteration:
pass
sl.append('>')
return ''.join(sl)
def count_all_objects():
ids = set()
ls = locals()
import inspect
cf = inspect.currentframe()
for o in gc.get_objects():
if o is ids or o is ls or o is cf:
continue
if not id(o) in ids:
ids.add(id(o))
for so in gc.get_referents(o):
if not id(so) in ids:
ids.add(id(so))
return len(ids)
def visit_all_objects(f):
"""
Brian and I *think* that this gets all objects. This is predicated on the
assumption that every object either participates in gc, or is at most one
hop from an object that participates in gc. This was Brian's clever idea.
"""
ids = set()
ls = locals()
import inspect
cf = inspect.currentframe()
for o in gc.get_objects():
if o is ids or o is ls or o is cf:
continue
if not id(o) in ids:
ids.add(id(o))
f(o)
for so in gc.get_referents(o):
if not id(so) in ids:
ids.add(id(so))
f(so)
def get_all_objects():
objs = []
def addit(o):
objs.append(o)
visit_all_objects(addit)
return objs
def describe_all_objects():
import dictutil
d = dictutil.NumDict()
for o in get_all_objects():
d.inc(describe_object(o))
return d
def dump_description_of_object(o, f):
f.write("%x" % (id(o),))
f.write("-")
f.write(describe_object(o))
f.write("\n")
def dump_description_of_object_refs(o, f):
# This holds the ids of all referents that we've already dumped.
dumped = set()
# First, any __dict__ items
try:
itemsiter = o.__dict__.iteritems()
except:
pass
else:
for k, v in itemsiter:
try:
idr = id(v)
if idr not in dumped:
dumped.add(idr)
f.write("%d:"%len(k))
f.write(k)
f.write(",")
f.write("%0x,"%idr)
except:
pass
# Then anything else that gc.get_referents() returns.
for r in gc.get_referents(o):
idr = id(r)
if idr not in dumped:
dumped.add(idr)
f.write("0:,%0x,"%idr)
def dump_descriptions_of_all_objects(f):
ids = set()
ls = locals()
for o in gc.get_objects():
if o is f or o is ids or o is ls:
continue
if not id(o) in ids:
ids.add(id(o))
dump_description_of_object(o, f)
for so in gc.get_referents(o):
if o is f or o is ids or o is ls:
continue
if not id(so) in ids:
ids.add(id(so))
dump_description_of_object(so, f)
ls = None # break reference cycle
return len(ids)
def dump_description_of_object_with_refs(o, f):
f.write("%0x" % (id(o),))
f.write("-")
desc = describe_object(o)
f.write("%d:"%len(desc))
f.write(desc)
f.write(",")
dump_description_of_object_refs(o, f)
f.write("\n")
def dump_descriptions_of_all_objects_with_refs(f):
ids = set()
ls = locals()
for o in gc.get_objects():
if o is f or o is ids or o is ls:
continue
if not id(o) in ids:
ids.add(id(o))
dump_description_of_object_with_refs(o, f)
for so in gc.get_referents(o):
if o is f or o is ids or o is ls:
continue
if not id(so) in ids:
ids.add(id(so))
dump_description_of_object_with_refs(so, f)
ls = None # break reference cycle
return len(ids)
import re
NRE = re.compile("[1-9][0-9]*$")
def undump_descriptions_of_all_objects(inf):
d = {}
for l in inf:
dash=l.find('-')
if dash == -1:
raise l
mo = NRE.search(l)
if mo:
typstr = l[dash+1:mo.start(0)]
num=int(mo.group(0))
if str(num) != mo.group(0):
raise mo.group(0)
else:
typstr = l[dash+1:]
num = None
d[l[:dash]] = (typstr, num,)
return d
| 20,767
|
Python
|
.py
| 521
| 31.641075
| 134
| 0.605272
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,393
|
PickleSaver.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/PickleSaver.py
|
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
An object that makes some of the attributes of your class persistent, pickling
them and lazily writing them to a file.
"""
# from the Python Standard Library
import os
import cPickle as pickle
import warnings
# from the pyutil library
import fileutil
import nummedobj
import twistedutil
# from the Twisted library
from twisted.python import log
class PickleSaver(nummedobj.NummedObj):
"""
This makes some of the attributes of your class persistent, saving
them in a pickle and saving them lazily.
The general idea: You are going to tell PickleSaver which of your
attributes ought to be persistently saved, and the name of a file to
save them in. Those attributes will get saved to disk, and when
your object is instantiated those attributes will get set to the
values loaded from the file.
Usage: inherit from PickleSaver and call PickleSaver.__init__() in your
constructor. You will pass arguments to PickleSaver.__init__()
telling it which attributes to save, which file to save them in, and
what values they should have if there is no value stored for them in
the file.
Note: do *not* assign values to your persistent attributes in your
constructor, because you might thus overwrite their persistent
values.
Then whenever you change one of the persistent attributes, call
self.lazy_save() (it won't *really* save -- it'll just schedule a
save for DELAY minutes later.) If you update an attribute and
forget to call self.lazy_save() then the change will not be saved,
unless you later call self.lazy_save() before you shut down.
Data could be lost if the Python interpreter were to die
unexpectedly (for example, due to a segfault in a compiled machine
code module or due to the Python process being killed without
warning via SIGKILL) before the delay passes. However if the Python
interpreter shuts down cleanly (i.e., if it garbage collects and
invokes the __del__ methods of the collected objects), then the data
will be saved at that time (unless your class has the "not-collectable"
problem: http://python.org/doc/current/lib/module-gc.html -- search
in text for "uncollectable").
Note: you can pass DELAY=0 to make PickleSaver a not-so-lazy saver.
The advantage of laziness is that you don't touch the disk as
often -- touching disk is a performance cost.
To cleanly shutdown, invoke shutdown(). Further operations after that
will result in exceptions.
"""
class ExtRes:
"""
This is for holding things (external resources) that PickleSaver needs
to finalize after PickleSaver is killed. (post-mortem finalization)
In particular, this holds the names and values of all attributes
that have been changed, so that after the PickleSaver is
garbage-collected those values will be saved to the persistent file.
"""
def __init__(self, fname, objname):
self.fname = fname
self.objname = objname
self.dirty = False # True iff the attrs have been changed and need to be saved to disk; When you change this flag from False to True, you schedule a save task for 10 minutes later. When the save task goes off it changes the flag from True to False.
self.savertask = None
self.valstr = None # the pickled (serialized, string) contents of the attributes that should be saved
def _save_to_disk(self):
if self.valstr is not None:
log.msg("%s._save_to_disk(): fname: %s" % (self.objname, self.fname,))
of = open(self.fname + ".tmp", "wb")
of.write(self.valstr)
of.flush()
of.close()
of = None
fileutil.remove_if_possible(self.fname)
fileutil.rename(self.fname + ".tmp", self.fname)
log.msg("%s._save_to_disk(): now, having finished write(), os.path.isfile(%s): %s" % (self, self.fname, os.path.isfile(self.fname),))
self.valstr = None
self.dirty = False
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def shutdown(self):
if self.dirty:
self._save_to_disk()
if self.savertask:
try:
self.savertask.callId.cancel()
except:
pass
self.savertask = None
def __del__(self):
self.shutdown()
def __init__(self, fname, attrs, DELAY=60*60, savecb=None):
"""
@param attrs: a dict whose keys are the names of all the attributes to be persistently stored and whose values are the initial default value that the attribute gets set to the first time it is ever used; After this first initialization, the value will be persistent so the initial default value will never be used again.
@param savecb: if not None, then it is a callable that will be called after each save completes (useful for unit tests) (savecb doesn't get called after a shutdown-save, only after a scheduled save)
"""
warnings.warn("deprecated", DeprecationWarning)
nummedobj.NummedObj.__init__(self)
self._DELAY = DELAY
self._attrnames = attrs.keys()
self._extres = PickleSaver.ExtRes(fname=fname, objname=self.__repr__())
self._savecb = savecb
for attrname, defaultval in attrs.items():
setattr(self, attrname, defaultval)
try:
attrdict = pickle.loads(open(self._extres.fname, "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le:
try:
attrdict = pickle.loads(open(self._extres.fname + ".tmp", "rb").read())
for attrname, attrval in attrdict.items():
if not hasattr(self, attrname):
log.msg("WARNING: %s has no attribute named %s on load from disk, value: %s." % (self, attrname, attrval,))
setattr(self, attrname, attrval)
except (pickle.UnpicklingError, IOError, EOFError,), le2:
log.msg("Got exception attempting to load attrs. (This is normal if this is the first time you've used this persistent %s object.) fname: %s, le: %s, le2: %s" % (self.__class__, self._extres.fname, le, le2,))
self.lazy_save()
def _store_attrs_in_extres(self):
d = {}
for attrname in self._attrnames:
d[attrname] = getattr(self, attrname)
# log.msg("%s._store_attrs_in_extres: attrname: %s, val: %s" % (self, attrname, getattr(self, attrname),))
# pickle the attrs now, to ensure that there are no reference cycles
self._extres.valstr = pickle.dumps(d, True)
# log.msg("%s._store_attrs_in_extres: valstr: %s" % (self, self._extres.valstr,))
self._extres.dirty = True
def _save_to_disk(self):
log.msg("%s._save_to_disk()" % (self,))
self._extres._save_to_disk()
if self._savecb:
self._savecb()
def _lazy_save(self, delay=None):
""" @deprecated: use lazy_save() instead """
return self.lazy_save(delay)
def lazy_save(self, delay=None):
"""
@param delay: how long from now before the data gets saved to disk, or `None' in order to use the default value provided in the constructor
"""
if delay is None:
delay=self._DELAY
# copy the values into extres so that if `self' gets garbage-collected the values will be written to disk during post-mortem finalization. (This also marks it as dirty.)
self._store_attrs_in_extres()
newsavetask = twistedutil.callLater_weakly(delay, self._save_to_disk)
if self._extres.savertask:
if self._extres.savertask.callId.getTime() < newsavetask.callId.getTime():
try:
newsavetask.callId.cancel()
except:
pass
else:
try:
self._extres.savertask.callId.cancel()
except:
pass
self._extres.savertask = newsavetask
else:
self._extres.savertask = newsavetask
def shutdown(self):
self.extres.shutdown()
self.extres = None
| 8,932
|
Python
|
.py
| 169
| 42.822485
| 329
| 0.637842
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,394
|
increasing_timer.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/increasing_timer.py
|
# Copyright (c) 2001 Autonomous Zone Industries
# Copyright (c) 2002-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
This module was invented when it was discovered that time.time() can return
decreasing answers, which was causing scheduled tasks to get executed out of
order. See python bug report `[ #447945 ] time.time() is not
non-decreasing',
http://sourceforge.net/tracker/index.php?func=detail&aid=447945&group_id=5470&atid=105470
http://mail.python.org/pipermail/python-list/2001-August/thread.html#58296
After posting that bug report, I figured out that this isn't really a bug,
but a misunderstanding about the semantics of gettimeofday(). gettimeofday()
relies on the hardware clock, which is supposed to reflect the "real" time
i.e. the position and orientation of our planet with regard to our sun. But
the hardware clock gets adjusted, either for skew (because hardware clocks
always run a little faster or a little slower than they ought), or in order to
sync up with another clock e.g. through NTP. So it isn't really a bug in the
underlying platform (except perhaps a bug in the lack of a prominent warning
in the documentation), but if you depend on a monotonically increasing
timestamps, you need to use IncreasingTimer.time() instead of the Python
standard library's time.time(). --Zooko 2001-08-04
"""
import time as standardtime
# Here is a global reference to an IncreasingTimer.
# This singleton global IncreasingTimer instance gets created at module load time.
timer = None
class IncreasingTimer:
def __init__(self, inittime=None):
"""
@param inittime starting time (in seconds) or None in which case it
will be initialized to standardtime.time()
"""
if inittime is None:
inittime = standardtime.time()
self.lasttime = inittime # This stores the most recent answer that we returned from time().
self.delta = 0 # We add this to the result from the underlying standardtime.time().
# How big of an increment do we need to add in order to make the new float greater than the old float?
trye = 1.0
while (self.lasttime + trye) > self.lasttime:
olde = trye
trye = trye / 2.0
self._EPSILON = olde
def time(self):
"""
This returns the current time as a float, with as much precision as
the underlying Python interpreter can muster. In addition, successive
calls to time() always return bigger numbers. (standardtime.time()
can sometimes return the same or even a *smaller* number!)
On the other hand, calling time() is a bit slower than calling
standardtime.time(), so you might want to avoid it inside tight loops
and deal with decreasing or identical answers yourself.
Now by definition you cannot "reset" this clock to an earlier state.
This means that if you start a Python interpreter and instantiate an
IncreasingTimer, and then you subsequently realize that your
computer's clock was set to next year, and you set it back to the
correct year, that subsequent calls to standardtime.time() will return
a number indicating this year and IncreasingTimer.time() will continue
to return a number indicating next year. Therefore, you should use
the answers from IncreasingTimer.time() in such a way that the only
things you depend on are correctness in the relative *order* of two
times, (and, with the following caveat, the relative *difference*
between two times as well), not the global "correctness" of the times
with respect to the rest of the world.
The caveat is that if the underlying answers from standardtime.time()
jump *forward*, then this *does* distort the relative difference
between two answers from IncreasingTimer.time(). What
IncreasingTimer.time() does is if the underlying clock goes
*backwards*, then IncreasingTimer.time() still returns successively
higher numbers. Then if the underlying clock jumps *forwards*,
IncreasingTimer.time() also jumps forward the same amount. A weird
consequence of this is that if you were to set your system clock to
point to 10 years ago, and call:
t1 = increasingtimer.time()
and then set your system clock back to the present, and call:
t2 = increasingtimer.time()
, then there would be a 10-year difference between t2 and t1.
In practice, adjustments to the underlying system time are rarely that
drastic, and for some systems (e.g. Mnet's DoQ, for which this module
was invented) it doesn't matter anyway if time jumps forward.
Another note: Brian Warner has pointed out that there is another
caveat, which is due to there being a delay between successive calls
to IncreasingTimer.time(). When the underlying clock jumps backward,
then events which were scheduled before the jump and scheduled to go
off after the jump may be delayed by at most d, where d is the delay
between the two successive calls to IncreasingTimer which spanned the
jump.
@singlethreaded You must guarantee that you never have more than one
thread in this function at a time.
"""
t = standardtime.time() + self.delta
lasttime = self.lasttime
if t <= lasttime:
self.delta = self.delta + (lasttime - t) + self._EPSILON
t = lasttime + self._EPSILON
# TODO: If you were sure that you could generate a bigger float in one
# pass, you could change this `while' to an `if' and optimize out a
# test.
while t <= lasttime:
# We can get into here only if self._EPSILON is too small to make
# # the time float "tick over" to a new higher value. So we
# (permanently) # double self._EPSILON.
# TODO: Is doubling epsilon the best way to quickly get a
# minimally bigger float?
self._EPSILON = self._EPSILON * 2.0
# Delta, having smaller magnitude than t, can be incremented by
# more than t was incremented. (Up to the old epsilon more.)
# That's OK.
self.delta = self.delta + self._EPSILON
t = t + self._EPSILON
self.lasttime = t
return t
# create the global IncreasingTimer instance and `time' function
timer = IncreasingTimer()
time = timer.time
| 6,607
|
Python
|
.py
| 114
| 50.315789
| 110
| 0.698192
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,395
|
benchutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/benchutil.py
|
# Copyright (c) 2002-2013 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Benchmark a function for its behavior with respect to N.
How to use this module:
1. Define a function which runs the code that you want to benchmark. The
function takes a single argument which is the size of the task (i.e. the "N"
parameter). Pass this function as the first argument to rep_bench(), and N as
the second, e.g.:
>>> from pyutil.benchutil import rep_bench
>>> def fib(N):
... if N <= 1:
... return 1
... else:
... return fib(N-1) + fib(N-2)
...
>>> rep_bench(fib, 25, UNITS_PER_SECOND=1000)
best: 1.968e+00, 3th-best: 1.987e+00, mean: 2.118e+00, 3th-worst: 2.175e+00, worst: 2.503e+00 (of 10)
The output is reporting the number of milliseconds that executing the
function took, divided by N, from ten different invocations of
fib(). It reports the best, worst, M-th best, M-th worst, and mean,
where "M" is 1/4 of the number of invocations (in this case 10).
2. Now run it with different values of N and look for patterns:
>>> for N in 1, 5, 9, 13, 17, 21:
... print "%2d" % N,
... rep_bench(fib, N, UNITS_PER_SECOND=1000000)
...
1 best: 9.537e-01, 3th-best: 9.537e-01, mean: 1.121e+00, 3th-worst: 1.192e+00, worst: 2.146e+00 (of 10)
5 best: 5.722e-01, 3th-best: 6.199e-01, mean: 7.200e-01, 3th-worst: 8.106e-01, worst: 8.106e-01 (of 10)
9 best: 2.437e+00, 3th-best: 2.464e+00, mean: 2.530e+00, 3th-worst: 2.570e+00, worst: 2.676e+00 (of 10)
13 best: 1.154e+01, 3th-best: 1.168e+01, mean: 5.638e+01, 3th-worst: 1.346e+01, worst: 4.478e+02 (of 10)
17 best: 6.230e+01, 3th-best: 6.247e+01, mean: 6.424e+01, 3th-worst: 6.460e+01, worst: 7.294e+01 (of 10)
21 best: 3.376e+02, 3th-best: 3.391e+02, mean: 3.521e+02, 3th-worst: 3.540e+02, worst: 3.963e+02 (of 10)
>>> print_bench_footer(UNITS_PER_SECOND=1000000)
all results are in time units per N
time units per second: 1000000; seconds per time unit: 0.000001
(The pattern here is that as N grows, the time per N grows.)
2. If you need to do some setting up before the code can run, then put the
setting-up code into a separate function so that it won't be included in the
timing measurements. A good way to share state between the setting-up function
and the main function is to make them be methods of the same object, e.g.:
>>> import random
>>> class O:
... def __init__(self):
... self.l = []
... def setup(self, N):
... del self.l[:]
... self.l.extend(range(N))
... random.shuffle(self.l)
... def sort(self, N):
... self.l.sort()
...
>>> o = O()
>>> for N in 1000, 10000, 100000, 1000000:
... print "%7d" % N,
... rep_bench(o.sort, N, o.setup)
...
1000 best: 4.830e+02, 3th-best: 4.950e+02, mean: 5.730e+02, 3th-worst: 5.858e+02, worst: 7.451e+02 (of 10)
10000 best: 6.342e+02, 3th-best: 6.367e+02, mean: 6.678e+02, 3th-worst: 6.851e+02, worst: 7.848e+02 (of 10)
100000 best: 8.309e+02, 3th-best: 8.338e+02, mean: 8.435e+02, 3th-worst: 8.540e+02, worst: 8.559e+02 (of 10)
1000000 best: 1.327e+03, 3th-best: 1.339e+03, mean: 1.349e+03, 3th-worst: 1.357e+03, worst: 1.374e+03 (of 10)
3. Useful fact! rep_bench() returns a dict containing the numbers.
4. Things to fix:
a. I used to have it hooked up to use the "hotshot" profiler on the
code being measured. I recently tried to change it to use the newer
cProfile profiler instead, but I don't understand the interface to
cProfiler so it just gives an exception if you pass
profile=True. Please fix this and send me a patch. xxx change it to
statprof
b. Wouldn't it be great if this script emitted results in a json format that
was understood by a tool to make pretty interactive explorable graphs? The
pretty graphs could look like those on http://speed.pypy.org/ . Please make
this work and send me a patch!
"""
import cProfile, operator, time
from decimal import Decimal as D
#from pyutil import jsonutil as json
import platform
if 'windows' in platform.system().lower():
clock = time.clock
else:
clock = time.time
from assertutil import _assert
def makeg(func):
def blah(n, func=func):
for i in xrange(n):
func()
return blah
def to_decimal(x):
"""
See if D(x) returns something. If instead it raises TypeError, x must have been a float, so convert it to Decimal by way of string. (In Python >= 2.7, D(x) does this automatically.
"""
try:
return D(x)
except TypeError:
return D("%0.54f" % (x,))
def mult(a, b):
"""
If we get TypeError from * (possibly because one is float and the other is Decimal), then promote them both to Decimal.
"""
try:
return a * b
except TypeError:
return to_decimal(a) * to_decimal(b)
def rep_bench(func, n, runtime=1.0, initfunc=None, MAXREPS=10, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", UNITS_PER_SECOND=1, quiet=False):
"""
Will run the func up to MAXREPS times, but won't start a new run if MAXTIME
(wall-clock time) has already elapsed (unless MAXTIME is None).
@param quiet Don't print anything--just return the results dict.
"""
assert isinstance(n, int), (n, type(n))
global worstemptymeasure
emsta = clock()
do_nothing(2**32)
emstop = clock()
empty = emstop - emsta
if empty > worstemptymeasure:
worstemptymeasure = empty
if (worstemptymeasure*2) >= runtime:
raise BadMeasure("Apparently simply invoking an empty Python function can take as long as %0.10f seconds, and we were running iterations for only about %0.10f seconds. So the measurement of the runtime of the code under benchmark is not reliable. Please pass a higher number for the 'runtime' argument to bench_it().")
startwallclocktime = time.time()
tls = [] # (elapsed time per iter in seconds, iters)
bmes = []
while ((len(tls) < MAXREPS) or (MAXREPS is None)) and ((MAXTIME is None) or ((time.time() - startwallclocktime) < MAXTIME)):
if initfunc:
initfunc(n)
try:
tl, iters = bench_it(func, n, runtime=runtime, profile=profile, profresults=profresults)
except BadMeasure, bme:
bmes.append(bme)
else:
tls.append((tl, iters))
if len(tls) == 0:
raise Exception("Couldn't get any measurements within time limits or number-of-attempts limits. Maybe something is wrong with your clock? %s" % (bmes,))
sumtls = sum([tl for (tl, iters) in tls])
mean = sumtls / len(tls)
tls.sort()
worst = tls[-1][0]
best = tls[0][0]
m = len(tls)/4
if m > 0:
mthbest = tls[m-1][0]
mthworst = tls[-m][0]
else:
mthbest = tls[0][0]
mthworst = tls[-1][0]
# The +/-0 index is the best/worst, the +/-1 index is the 2nd-best/worst,
# etc, so we use mp1 to name it.
mp1 = m+1
res = {
'worst': mult(worst, UNITS_PER_SECOND)/n,
'best': mult(best, UNITS_PER_SECOND)/n,
'mp1': mp1,
'mth-best': mult(mthbest, UNITS_PER_SECOND)/n,
'mth-worst': mult(mthworst, UNITS_PER_SECOND)/n,
'mean': mult(mean, UNITS_PER_SECOND)/n,
'num': len(tls),
}
if not quiet:
print "best: %(best)#8.03e, %(mp1)3dth-best: %(mth-best)#8.03e, mean: %(mean)#8.03e, %(mp1)3dth-worst: %(mth-worst)#8.03e, worst: %(worst)#8.03e (of %(num)6d)" % res
return res
MARGINOFERROR = 10
worstemptymeasure = 0
class BadMeasure(Exception):
""" Either the clock wrapped (which happens with time.clock()) or
it went backwards (which happens with time.time() on rare
occasions), (or the code being measured completed before a single
clock tick). """
def __init__(self, startt, stopt, clock):
self.startt = startt
self.stopt = stopt
self.clock = clock
def __repr__(self):
return "<%s %s - %s (%s)>" % (self.__class__.__name__, self.startt, self.stopt, self.clock)
def do_nothing(n):
pass
def bench_it(func, n, runtime=1.0, profile=False, profresults="pyutil-benchutil.prof"):
if profile:
raise NotImplementedException()
else:
iters = 0
st = clock()
deadline = st + runtime
sto = clock()
while sto < deadline:
func(n)
iters += 1
sto = clock()
timeelapsed = sto - st
if (timeelapsed <= 0) or (iters == 0):
raise BadMeasure((timeelapsed, iters))
return (timeelapsed / iters, iters)
def bench(func, initfunc=None, TOPXP=21, MAXREPS=5, MAXTIME=60.0, profile=False, profresults="pyutil-benchutil.prof", outputjson=False, jsonresultsfname="pyutil-benchutil-results.json", UNITS_PER_SECOND=1):
BSIZES = []
for i in range(TOPXP-6, TOPXP+1, 2):
n = int(2 ** i)
if n < 1:
n = 1
if BSIZES and n <= BSIZES[-1]:
n *= 2
BSIZES.append(n)
res = {}
for BSIZE in BSIZES:
print "N: %7d," % BSIZE,
r = rep_bench(func, BSIZE, initfunc=initfunc, MAXREPS=MAXREPS, MAXTIME=MAXTIME, profile=profile, profresults=profresults, UNITS_PER_SECOND=UNITS_PER_SECOND)
res[BSIZE] = r
#if outputjson:
# write_file(jsonresultsfname, json.dumps(res))
return res
def print_bench_footer(UNITS_PER_SECOND=1):
print "all results are in time units per N"
print "time units per second: %s; seconds per time unit: %s" % (UNITS_PER_SECOND, D(1)/UNITS_PER_SECOND)
| 9,510
|
Python
|
.py
| 212
| 40.089623
| 330
| 0.65277
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,396
|
observer.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/observer.py
|
# -*- test-case-name: allmydata.test.test_observer -*-
from twisted.internet import defer
try:
from foolscap.eventual import eventually
eventually # http://divmod.org/trac/ticket/1499
except ImportError:
from twisted.internet import reactor
def eventually(f, *args, **kwargs):
return reactor.callLater(0, f, *args, **kwargs)
"""The idiom we use is for the observed object to offer a method named
'when_something', which returns a deferred. That deferred will be fired when
something happens. The way this is typically implemented is that the observed
has an ObserverList whose when_fired method is called in the observed's
'when_something'."""
class OneShotObserverList:
"""A one-shot event distributor."""
def __init__(self):
self._fired = False
self._result = None
self._watchers = []
self.__repr__ = self._unfired_repr
def _unfired_repr(self):
return "<OneShotObserverList [%s]>" % (self._watchers, )
def _fired_repr(self):
return "<OneShotObserverList -> %s>" % (self._result, )
def _get_result(self):
return self._result
def when_fired(self):
if self._fired:
return defer.succeed(self._get_result())
d = defer.Deferred()
self._watchers.append(d)
return d
def fire(self, result):
assert not self._fired
self._fired = True
self._result = result
self._fire(result)
def _fire(self, result):
for w in self._watchers:
eventually(w.callback, result)
del self._watchers
self.__repr__ = self._fired_repr
def fire_if_not_fired(self, result):
if not self._fired:
self.fire(result)
class LazyOneShotObserverList(OneShotObserverList):
"""
a variant of OneShotObserverList which does not retain
the result it handles, but rather retains a callable()
through which is retrieves the data if and when needed.
"""
def __init__(self):
OneShotObserverList.__init__(self)
def _get_result(self):
return self._result_producer()
def fire(self, result_producer):
"""
@param result_producer: a no-arg callable which
returns the data which is to be considered the
'result' for this observer list. note that this
function may be called multiple times - once
upon initial firing, and potentially once more
for each subsequent when_fired() deferred created
"""
assert not self._fired
self._fired = True
self._result_producer = result_producer
if self._watchers: # if not, don't call result_producer
self._fire(self._get_result())
class ObserverList:
"""A simple class to distribute events to a number of subscribers."""
def __init__(self):
self._watchers = []
def subscribe(self, observer):
self._watchers.append(observer)
def unsubscribe(self, observer):
self._watchers.remove(observer)
def notify(self, *args, **kwargs):
for o in self._watchers:
eventually(o, *args, **kwargs)
| 3,143
|
Python
|
.py
| 81
| 31.802469
| 78
| 0.652431
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,397
|
lineutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/lineutil.py
|
#!/usr/bin/env python
import os, re
def lineify_fileobjs(ifo, ofo, strip=False):
from pyutil.strutil import pop_trailing_newlines, split_on_newlines
for l in ifo:
for sl in split_on_newlines(pop_trailing_newlines(l)):
if strip:
sl = sl.strip()
ofo.write(pop_trailing_newlines(sl) + '\n')
def lineify_file(fname, strip=False, nobak=True):
f = open(fname, "rU")
from pyutil.fileutil import ReopenableNamedTemporaryFile
rntf = ReopenableNamedTemporaryFile()
fo = open(rntf.name, "wb")
for l in f:
if strip:
l = l.strip() + '\n'
fo.write(l)
fo.close()
import shutil
if not nobak:
shutil.copyfile(fname, fname + ".lines.py-bak")
import shutil
try:
shutil.move(rntf.name, fname)
except EnvironmentError:
# Couldn't atomically overwrite, so just hope that this process doesn't die
# and the target file doesn't get recreated in between the following two
# operations:
if nobak:
os.remove(fname)
else:
shutil.move(fname, fname + ".lines.py-bak-2")
shutil.move(rntf.name, fname)
def darcs_metadir_dirpruner(dirs):
if "_darcs" in dirs:
dirs.remove("_darcs")
SCRE=re.compile("\\.(py|php|c|h|cpp|hpp|txt|sh|pyx|pxi|html|htm)$|makefile$", re.IGNORECASE)
def source_code_filepruner(fname):
return SCRE.search(fname)
def all_filepruner(fname):
return True
def all_dirpruner(dirs):
return
def lineify_all_files(dirname, strip=False, nobak=True, dirpruner=all_dirpruner, filepruner=all_filepruner):
for (root, dirs, files,) in os.walk(dirname):
dirpruner(dirs)
for fname in files:
fullfname = os.path.join(root, fname)
if filepruner(fullfname):
lineify_file(fullfname, strip=strip, nobak=nobak)
| 1,661
|
Python
|
.py
| 51
| 29.941176
| 108
| 0.738452
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,398
|
verlib.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/verlib.py
|
# -*- coding: utf-8 -*-
"""
"Rational" version definition and parsing for DistutilsVersionFight
discussion at PyCon 2009.
This was written by Tarek Ziadé.
Zooko copied it from http://bitbucket.org/tarek/distutilsversion/ on 2010-07-29.
"""
import re
class IrrationalVersionError(Exception):
"""This is an irrational version."""
pass
class HugeMajorVersionNumError(IrrationalVersionError):
"""An irrational version because the major version number is huge
(often because a year or date was used).
See `error_on_huge_major_num` option in `NormalizedVersion` for details.
This guard can be disabled by setting that option False.
"""
pass
class PreconditionViolationException(Exception):
pass
# A marker used in the second and third parts of the `parts` tuple, for
# versions that don't have those segments, to sort properly. An example
# of versions in sort order ('highest' last):
# 1.0b1 ((1,0), ('b',1), ('f',))
# 1.0.dev345 ((1,0), ('f',), ('dev', 345))
# 1.0 ((1,0), ('f',), ('f',))
# 1.0.post256.dev345 ((1,0), ('f',), ('f', 'post', 256, 'dev', 345))
# 1.0.post345 ((1,0), ('f',), ('f', 'post', 345, 'f'))
# ^ ^ ^
# 'b' < 'f' ---------------------/ | |
# | |
# 'dev' < 'f' < 'post' -------------------/ |
# |
# 'dev' < 'f' ----------------------------------------------/
# Other letters would do, but 'f' for 'final' is kind of nice.
FINAL_MARKER = ('f',)
VERSION_RE = re.compile(r'''
^
(?P<version>\d+\.\d+) # minimum 'N.N'
(?P<extraversion>(?:\.\d+)*) # any number of extra '.N' segments
(?:
(?P<prerel>[abc]|rc) # 'a'=alpha, 'b'=beta, 'c'=release candidate
# 'rc'= alias for release candidate
(?P<prerelversion>\d+(?:\.\d+)*)
)?
(?P<postdev>(\.post(?P<post>\d+)|-r(?P<oldpost>\d+))?(\.dev(?P<dev>\d+))?)?
$''', re.VERBOSE)
class NormalizedVersion(object):
"""A rational version.
Good:
1.2 # equivalent to "1.2.0"
1.2.0
1.2a1
1.2.3a2
1.2.3b1
1.2.3c1
1.2.3.4
TODO: fill this out
Bad:
1 # mininum two numbers
1.2a # release level must have a release serial
1.2.3b
"""
def __init__(self, s, error_on_huge_major_num=True):
"""Create a NormalizedVersion instance from a version string.
@param s {str} The version string.
@param error_on_huge_major_num {bool} Whether to consider an
apparent use of a year or full date as the major version number
an error. Default True. One of the observed patterns on PyPI before
the introduction of `NormalizedVersion` was version numbers like this:
2009.01.03
20040603
2005.01
This guard is here to strongly encourage the package author to
use an alternate version, because a release deployed into PyPI
and, e.g. downstream Linux package managers, will forever remove
the possibility of using a version number like "1.0" (i.e.
where the major number is less than that huge major number).
"""
self._parse(s, error_on_huge_major_num)
@classmethod
def from_parts(cls, version, prerelease=FINAL_MARKER,
devpost=FINAL_MARKER):
return cls(cls.parts_to_str((version, prerelease, devpost)))
def _parse(self, s, error_on_huge_major_num=True):
"""Parses a string version into parts."""
if not isinstance(s, basestring):
raise PreconditionViolationException("s is required to be a string: %s :: %s" % (s, type(s)))
match = VERSION_RE.search(s)
if not match:
raise IrrationalVersionError(s)
groups = match.groupdict()
parts = []
# main version
block = self._parse_numdots(groups['version'], s, False, 2)
extraversion = groups.get('extraversion')
if extraversion not in ('', None):
block += self._parse_numdots(extraversion[1:], s)
parts.append(tuple(block))
# prerelease
prerel = groups.get('prerel')
if prerel is not None:
block = [prerel]
block += self._parse_numdots(groups.get('prerelversion'), s,
pad_zeros_length=1)
parts.append(tuple(block))
else:
parts.append(FINAL_MARKER)
# postdev
if groups.get('postdev'):
post = groups.get('post') or groups.get('oldpost')
dev = groups.get('dev')
postdev = []
if post is not None:
postdev.extend([FINAL_MARKER[0], 'post', int(post)])
if dev is None:
postdev.append(FINAL_MARKER[0])
if dev is not None:
postdev.extend(['dev', int(dev)])
parts.append(tuple(postdev))
else:
parts.append(FINAL_MARKER)
self.parts = tuple(parts)
if error_on_huge_major_num and self.parts[0][0] > 1980:
raise HugeMajorVersionNumError("huge major version number, %r, "
"which might cause future problems: %r" % (self.parts[0][0], s))
def _parse_numdots(self, s, full_ver_str, drop_trailing_zeros=True,
pad_zeros_length=0):
"""Parse 'N.N.N' sequences, return a list of ints.
@param s {str} 'N.N.N...' sequence to be parsed
@param full_ver_str {str} The full version string from which this
comes. Used for error strings.
@param drop_trailing_zeros {bool} Whether to drop trailing zeros
from the returned list. Default True.
@param pad_zeros_length {int} The length to which to pad the
returned list with zeros, if necessary. Default 0.
"""
nums = []
for n in s.split("."):
if len(n) > 1 and n[0] == '0':
raise IrrationalVersionError("cannot have leading zero in "
"version number segment: '%s' in %r" % (n, full_ver_str))
nums.append(int(n))
if drop_trailing_zeros:
while nums and nums[-1] == 0:
nums.pop()
while len(nums) < pad_zeros_length:
nums.append(0)
return nums
def __str__(self):
return self.parts_to_str(self.parts)
@classmethod
def parts_to_str(cls, parts):
"""Transforms a version expressed in tuple into its string
representation."""
# XXX This doesn't check for invalid tuples
main, prerel, postdev = parts
s = '.'.join(str(v) for v in main)
if prerel is not FINAL_MARKER:
s += prerel[0]
s += '.'.join(str(v) for v in prerel[1:])
if postdev and postdev is not FINAL_MARKER:
if postdev[0] == 'f':
postdev = postdev[1:]
i = 0
while i < len(postdev):
if i % 2 == 0:
s += '.'
s += str(postdev[i])
i += 1
return s
def __repr__(self):
return "%s('%s')" % (self.__class__.__name__, self)
def _cannot_compare(self, other):
raise TypeError("cannot compare %s and %s"
% (type(self).__name__, type(other).__name__))
def __eq__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts == other.parts
def __lt__(self, other):
if not isinstance(other, NormalizedVersion):
self._cannot_compare(other)
return self.parts < other.parts
def __ne__(self, other):
return not self.__eq__(other)
def __gt__(self, other):
return not (self.__lt__(other) or self.__eq__(other))
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
def suggest_normalized_version(s):
"""Suggest a normalized version close to the given version string.
If you have a version string that isn't rational (i.e. NormalizedVersion
doesn't like it) then you might be able to get an equivalent (or close)
rational version from this function.
This does a number of simple normalizations to the given string, based
on observation of versions currently in use on PyPI. Given a dump of
those version during PyCon 2009, 4287 of them:
- 2312 (53.93%) match NormalizedVersion without change
- with the automatic suggestion
- 3474 (81.04%) match when using this suggestion method
@param s {str} An irrational version string.
@returns A rational version string, or None, if couldn't determine one.
"""
try:
NormalizedVersion(s)
return s # already rational
except IrrationalVersionError:
pass
rs = s.lower()
# part of this could use maketrans
for orig, repl in (('-alpha', 'a'), ('-beta', 'b'), ('alpha', 'a'),
('beta', 'b'), ('rc', 'c'), ('-final', ''),
('-pre', 'c'),
('-release', ''), ('.release', ''), ('-stable', ''),
('+', '.'), ('_', '.'), (' ', ''), ('.final', ''),
('final', '')):
rs = rs.replace(orig, repl)
# if something ends with dev or pre, we add a 0
rs = re.sub(r"pre$", r"pre0", rs)
rs = re.sub(r"dev$", r"dev0", rs)
# if we have something like "b-2" or "a.2" at the end of the
# version, that is pobably beta, alpha, etc
# let's remove the dash or dot
rs = re.sub(r"([abc|rc])[\-\.](\d+)$", r"\1\2", rs)
# 1.0-dev-r371 -> 1.0.dev371
# 0.1-dev-r79 -> 0.1.dev79
rs = re.sub(r"[\-\.](dev)[\-\.]?r?(\d+)$", r".\1\2", rs)
# Clean: 2.0.a.3, 2.0.b1, 0.9.0~c1
rs = re.sub(r"[.~]?([abc])\.?", r"\1", rs)
# Clean: v0.3, v1.0
if rs.startswith('v'):
rs = rs[1:]
# Clean leading '0's on numbers.
#TODO: unintended side-effect on, e.g., "2003.05.09"
# PyPI stats: 77 (~2%) better
rs = re.sub(r"\b0+(\d+)(?!\d)", r"\1", rs)
# Clean a/b/c with no version. E.g. "1.0a" -> "1.0a0". Setuptools infers
# zero.
# PyPI stats: 245 (7.56%) better
rs = re.sub(r"(\d+[abc])$", r"\g<1>0", rs)
# the 'dev-rNNN' tag is a dev tag
rs = re.sub(r"\.?(dev-r|dev\.r)\.?(\d+)$", r".dev\2", rs)
# clean the - when used as a pre delimiter
rs = re.sub(r"-(a|b|c)(\d+)$", r"\1\2", rs)
# a terminal "dev" or "devel" can be changed into ".dev0"
rs = re.sub(r"[\.\-](dev|devel)$", r".dev0", rs)
# a terminal "dev" can be changed into ".dev0"
rs = re.sub(r"(?![\.\-])dev$", r".dev0", rs)
# a terminal "final" or "stable" can be removed
rs = re.sub(r"(final|stable)$", "", rs)
# The 'r' and the '-' tags are post release tags
# 0.4a1.r10 -> 0.4a1.post10
# 0.9.33-17222 -> 0.9.3.post17222
# 0.9.33-r17222 -> 0.9.3.post17222
rs = re.sub(r"\.?(r|-|-r)\.?(\d+)$", r".post\2", rs)
# Clean 'r' instead of 'dev' usage:
# 0.9.33+r17222 -> 0.9.3.dev17222
# 1.0dev123 -> 1.0.dev123
# 1.0.git123 -> 1.0.dev123
# 1.0.bzr123 -> 1.0.dev123
# 0.1a0dev.123 -> 0.1a0.dev123
# PyPI stats: ~150 (~4%) better
rs = re.sub(r"\.?(dev|git|bzr)\.?(\d+)$", r".dev\2", rs)
# Clean '.pre' (normalized from '-pre' above) instead of 'c' usage:
# 0.2.pre1 -> 0.2c1
# 0.2-c1 -> 0.2c1
# 1.0preview123 -> 1.0c123
# PyPI stats: ~21 (0.62%) better
rs = re.sub(r"\.?(pre|preview|-c)(\d+)$", r"c\g<2>", rs)
# Tcl/Tk uses "px" for their post release markers
rs = re.sub(r"p(\d+)$", r".post\1", rs)
try:
NormalizedVersion(rs)
return rs # already rational
except IrrationalVersionError:
pass
return None
| 12,275
|
Python
|
.py
| 280
| 35.417857
| 105
| 0.537959
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|
7,399
|
platformutil.py
|
CouchPotato_CouchPotatoServer/libs/pyutil/platformutil.py
|
# Thanks to Daenyth for help porting this to Arch Linux.
import os, platform, re, subprocess
_distributor_id_cmdline_re = re.compile("(?:Distributor ID:)\s*(.*)", re.I)
_release_cmdline_re = re.compile("(?:Release:)\s*(.*)", re.I)
_distributor_id_file_re = re.compile("(?:DISTRIB_ID\s*=)\s*(.*)", re.I)
_release_file_re = re.compile("(?:DISTRIB_RELEASE\s*=)\s*(.*)", re.I)
global _distname,_version
_distname = None
_version = None
def get_linux_distro():
""" Tries to determine the name of the Linux OS distribution name.
First, try to parse a file named "/etc/lsb-release". If it exists, and
contains the "DISTRIB_ID=" line and the "DISTRIB_RELEASE=" line, then return
the strings parsed from that file.
If that doesn't work, then invoke platform.dist().
If that doesn't work, then try to execute "lsb_release", as standardized in
2001:
http://refspecs.freestandards.org/LSB_1.0.0/gLSB/lsbrelease.html
The current version of the standard is here:
http://refspecs.freestandards.org/LSB_3.2.0/LSB-Core-generic/LSB-Core-generic/lsbrelease.html
that lsb_release emitted, as strings.
Returns a tuple (distname,version). Distname is what LSB calls a
"distributor id", e.g. "Ubuntu". Version is what LSB calls a "release",
e.g. "8.04".
A version of this has been submitted to python as a patch for the standard
library module "platform":
http://bugs.python.org/issue3937
"""
global _distname,_version
if _distname and _version:
return (_distname, _version)
try:
etclsbrel = open("/etc/lsb-release", "rU")
for line in etclsbrel:
m = _distributor_id_file_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_file_re.search(line)
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
(_distname, _version) = platform.dist()[:2]
if _distname and _version:
return (_distname, _version)
try:
p = subprocess.Popen(["lsb_release", "--all"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
rc = p.wait()
if rc == 0:
for line in p.stdout.readlines():
m = _distributor_id_cmdline_re.search(line)
if m:
_distname = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
m = _release_cmdline_re.search(p.stdout.read())
if m:
_version = m.group(1).strip()
if _distname and _version:
return (_distname, _version)
except EnvironmentError:
pass
if os.path.exists("/etc/arch-release"):
return ("Arch_Linux", "")
return (_distname,_version)
def get_platform():
# Our version of platform.platform(), telling us both less and more than the
# Python Standard Library's version does.
# We omit details such as the Linux kernel version number, but we add a
# more detailed and correct rendition of the Linux distribution and
# distribution-version.
if "linux" in platform.system().lower():
return platform.system()+"-"+"_".join(get_linux_distro())+"-"+platform.machine()+"-"+"_".join([x for x in platform.architecture() if x])
else:
return platform.platform()
| 3,607
|
Python
|
.py
| 79
| 36.848101
| 144
| 0.61363
|
CouchPotato/CouchPotatoServer
| 3,869
| 1,214
| 1,266
|
GPL-3.0
|
9/5/2024, 5:10:17 PM (Europe/Amsterdam)
|