repo_name
stringlengths 5
92
| path
stringlengths 4
221
| copies
stringclasses 19
values | size
stringlengths 4
6
| content
stringlengths 766
896k
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 32
997
| alpha_frac
float64 0.25
0.96
| autogenerated
bool 1
class | ratio
float64 1.5
13.6
| config_test
bool 2
classes | has_no_keywords
bool 2
classes | few_assignments
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
lincolnloop/salt-stats
|
salt/_returners/influxdb_return.py
|
1
|
2217
|
# -*- coding: utf-8 -*-
'''
Salt returner that reports stats to InfluxDB. The returner will
inspect the payload coercing values into floats where possible.
Pillar needs something like::
influxdb_returner:
url: http://localhost:8086
user: root
password: root
database: salt
'''
import base64
import fnmatch
import logging
import json
import urllib2
logger = logging.getLogger(__name__)
__virtualname__ = 'influxdb'
def __virtual__():
return __virtualname__
def _flatten_values(obj, base=None):
"""
Recursive function to flatten dictionaries and
coerce values to floats.
"""
flattened = {}
# convert list to dictionary
if isinstance(obj, list):
obj = dict([(str(pair[0]), pair[1]) for pair in enumerate(obj)])
elif not isinstance(obj, dict):
obj = {'value': obj}
for key, item in obj.items():
key = base and '.'.join([base, key]) or key
if isinstance(item, dict):
flattened.update(_flatten_values(item, base=key))
else:
try:
flattened[key] = float(item)
except ValueError:
flattened[key] = item
return flattened
def returner(ret):
config = __pillar__.get('influxdb_returner', {})
user = config.get('user', '')
password = config.get('password', '')
database = config.get('database', '')
host = config.get('url', '')
data = _flatten_values(ret['return'])
series = "{host}-{function}".format(host=ret['id'], function=ret['fun'])
logger.debug("InfluxDB series name: %s", series)
payload = json.dumps([{
'name': series,
'columns': data.keys(),
'points': [data.values()],
}])
headers = {'Content-type': 'application/json',
'Accept': 'text/plain'}
url = "{host}/db/{db}/series?u={user}&p={pw}".format(
host=host, db=database, user=user, pw=password)
req = urllib2.Request(url, payload, headers)
try:
handler = urllib2.urlopen(req)
logger.debug("InfluxDB responded %s", handler.getcode())
except urllib2.HTTPError as exp:
logger.error("InfluxDB request failed with code %s", exp.code)
handler.close()
|
mit
| 6,890,723,242,404,881,000
| 27.792208
| 76
| 0.603067
| false
| 3.764007
| false
| false
| false
|
rahulraj/web_projects
|
assignment2/src/photogallery/generator/gallerygenerator.py
|
1
|
6537
|
import os
import getopt
import sys
from ..utils.inject import assign_injectables
from ..utils.immutabledict import ImmutableDict
from manifestparser import ManifestParser
from galleryitemfactory import GalleryItemFactory
import exporter
import templatewriter
import copier
class GalleryGenerator(object):
"""
The top level class for the application. This is the only object
that the main function interacts with.
"""
def __init__(self, gallery_item_factory, input_directory, output_directory,
static_files_directory, exporter, template_writer):
"""
Constructor for GalleryGenerator. All needed service objects are injected.
Args:
gallery_item_factory the GalleryItemFactory that creates the items.
input_directory the path of the directory to start in.
output_directory the directory to which files should be written.
static_files_directory the directory containing static files to copy over.
exporter the Exporter to populate the templates.
template_writer the object that writes the templates to disk.
"""
assign_injectables(self, locals())
def run(self):
top_jpeg_directory = \
self.gallery_item_factory.create_directory(self.input_directory)
populated_templates = self.exporter.export(top_jpeg_directory)
self.template_writer.write_templates(populated_templates)
# We need to copy the JPEGs over too, and the CSS
copier.copy_jpegs(self.input_directory, self.output_directory)
copier.copy_css(self.static_files_directory, self.output_directory)
# Also, if there are scripts that enhance the experience,
# copy them over too.
copier.copy_javascript(self.static_files_directory, self.output_directory)
# Also grab a copy of directory_image.jpg
copier.copy_jpegs(self.static_files_directory, self.output_directory)
# And make a symlink for browsing convenience.
self.symlink_index(self.output_directory,
top_jpeg_directory.get_output_file_name())
def symlink_index(self, output_directory, file_name):
"""
Symlink "index.html" to file_name. Presumably, file_name is the top-level
page. This way, the page will appear when someone navigates to the directory
in a web browser.
Args:
file_name the name of the file to symlink to.
"""
full_link_name = os.path.join(output_directory, 'index.html')
try:
os.symlink(file_name, full_link_name)
except OSError:
print 'You already have a file named index.html in the output ' + \
'directory, so the symlink failed.'
print "I'll assume that there was a specific page that you wanted to" + \
'display when the user points a browser at the output directory.'
print 'Skipping the symlink...'
def create_gallery_generator(command_line_arguments, css_directory):
"""
Given command line arguments, wire up the application and return
it to the main function. This requires creating most of the objects
described in the other files from this directory.
Args:
command_line_arguments the command line arguments with the program
name removed.
css_directory the directory containing the CSS files.
"""
input_data = parse_command_line_arguments(command_line_arguments)
# First parse the manifest file
with open(input_data['manifest_file'], 'r') as manifest_file:
parser = ManifestParser(manifest_file)
lookup_table = parser.get_json_data()
factory = GalleryItemFactory(lookup_table, input_data['should_prompt'])
template_exporter = exporter.create_photo_directory_exporter()
template_writer = \
templatewriter.create_template_writer(input_data['output_directory'])
return GalleryGenerator(gallery_item_factory=factory,
input_directory=input_data['input_directory'],
output_directory=input_data['output_directory'],
static_files_directory=css_directory,
exporter=template_exporter,
template_writer=template_writer)
def parse_command_line_arguments(command_line_arguments):
"""
Acceptable command line arguments are:
-h, --help -> Prints a help message
-i, --input-directory -> The root directory for the gallery (required)
-o, --output-directory -> the output directory for the HTML (required)
-n, --no-prompt -> Automatically use inferred names for directories,
instead of prompting the user.
Args:
command_line_arguments the command line arguments with the program
name removed.
"""
try:
options, arguments = getopt.getopt(command_line_arguments,
"hi:o:m:n", ['help', 'input-directory=', 'output-directory=',
'manifest-file=', 'no-prompt'])
except getopt.GetoptError:
print_usage()
sys.exit(2)
input_data = {'should_prompt': True}
for option, argument in options:
if option in ('-h', '--help'):
print_usage()
sys.exit(0)
elif option in ('-i', '--input-directory'):
if os.path.isdir(argument):
input_data['input_directory'] = argument
else:
print argument, "doesn't appear to be a directory."
print_usage()
sys.exit(1)
elif option in ('-o', '--output-directory'):
input_data['output_directory'] = argument
elif option in ('-m', '--manifest-file'):
if os.path.isfile(argument):
input_data['manifest_file'] = argument
else:
print argument, "file couldn't be read for some reason."
print_usage()
sys.exit(1)
elif option in ('-n', '--no-prompt'):
input_data['should_prompt'] = False
if 'input_directory' not in input_data \
or 'output_directory' not in input_data \
or 'manifest_file' not in input_data:
print_usage()
sys.exit(1)
return ImmutableDict(input_data)
def print_usage():
print "Please call this script with the following arguments:"
print "-i my_pictures/ where my_pictures is the directory containing " + \
"the JPEGs to render (long form: --input-directory=)"
print "-o my_site/ where my_site is the directory in which to " + \
"write the output files (long form: --output-directory=)"
print "-m manifest.json where manifest.json is a manifest file " + \
"describing the JPEGs' metadata as a JSON string (long form:" + \
"--manifest_file=)"
print "-n Automatically infer directory titles instead of asking, " + \
"will ask by default. (long form: --no-prompt)"
print "Calling this script with -h or --help prints this message " + \
"and exits."
|
mit
| 2,190,217,508,828,373,800
| 39.602484
| 80
| 0.690684
| false
| 3.990842
| false
| false
| false
|
mscuthbert/abjad
|
abjad/tools/schemetools/Scheme.py
|
1
|
10697
|
# -*- encoding: utf-8 -*-
from abjad.tools import stringtools
from abjad.tools.abctools import AbjadValueObject
class Scheme(AbjadValueObject):
r'''Abjad model of Scheme code.
.. container:: example
**Example 1.** A Scheme boolean value:
::
>>> scheme = schemetools.Scheme(True)
>>> print(format(scheme))
##t
.. container:: example
**Example 2.** A nested Scheme expession:
::
>>> scheme = schemetools.Scheme(
... ('left', (1, 2, False)),
... ('right', (1, 2, 3.3))
... )
>>> print(format(scheme))
#((left (1 2 #f)) (right (1 2 3.3)))
.. container:: example
**Example 3.** A variable-length argument:
::
>>> scheme_1 = schemetools.Scheme(1, 2, 3)
>>> scheme_2 = schemetools.Scheme((1, 2, 3))
>>> format(scheme_1) == format(scheme_2)
True
Scheme wraps nested variable-length arguments in a tuple.
.. container:: example
**Example 4.** A quoted Scheme expression:
::
>>> scheme = schemetools.Scheme((1, 2, 3), quoting="'#")
>>> print(format(scheme))
#'#(1 2 3)
Use the `quoting` keyword to prepend Scheme's various quote, unquote,
unquote-splicing characters to formatted output.
.. container:: example
**Example 5.** A Scheme expression with forced quotes:
::
>>> scheme = schemetools.Scheme('nospaces', force_quotes=True)
>>> print(format(scheme))
#"nospaces"
Use this in certain \override situations when LilyPond's Scheme
interpreter treats unquoted strings as symbols instead of strings.
The string must contain no whitespace for this to work.
.. container:: example
**Example 6.** A Scheme expression of LilyPond functions:
::
>>> function_1 = 'tuplet-number::append-note-wrapper'
>>> function_2 = 'tuplet-number::calc-denominator-text'
>>> string = schemetools.Scheme('4', force_quotes=True)
>>> scheme = schemetools.Scheme(
... function_1,
... function_2,
... string,
... )
>>> scheme
Scheme('tuplet-number::append-note-wrapper', 'tuplet-number::calc-denominator-text', Scheme('4', force_quotes=True))
>>> print(format(scheme))
#(tuplet-number::append-note-wrapper tuplet-number::calc-denominator-text "4")
.. container:: example
**Example 7.** A Scheme lambda expression of LilyPond function that
takes a markup with a quoted string argument. Setting verbatim to true
causes the expression to format exactly as-is without modifying quotes
or whitespace:
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> scheme = schemetools.Scheme(string, verbatim=True)
>>> scheme
Scheme('(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))')
>>> print(format(scheme))
#(lambda (grob) (grob-interpret-markup grob #{ \markup \musicglyph #"noteheads.s0harmonic" #}))
Scheme objects are immutable.
'''
### CLASS VARIABLES ###
__slots__ = (
'_force_quotes',
'_quoting',
'_value',
'_verbatim',
)
### INITIALIZER ###
def __init__(self, *args, **kwargs):
if 1 == len(args):
if isinstance(args[0], type(self)):
args = args[0]._value
else:
args = args[0]
quoting = kwargs.get('quoting')
force_quotes = bool(kwargs.get('force_quotes'))
verbatim = kwargs.get('verbatim')
assert isinstance(quoting, (str, type(None)))
if quoting is not None:
assert all(x in ("'", ',', '@', '`', '#') for x in quoting)
self._force_quotes = force_quotes
self._quoting = quoting
self._value = args
self._verbatim = bool(verbatim)
### SPECIAL METHODS ###
def __format__(self, format_specification=''):
r'''Formats scheme.
Set `format_specification` to `''`', `'lilypond'` or ``'storage'``.
Interprets `''` equal to `'lilypond'`.
.. container:: example
**Example 1.** Scheme LilyPond format:
::
>>> scheme = schemetools.Scheme('foo')
>>> format(scheme)
'#foo'
.. container:: example
**Example 2.** Scheme storage format:
::
>>> print(format(scheme, 'storage'))
schemetools.Scheme(
'foo'
)
Returns string.
'''
from abjad.tools import systemtools
if format_specification in ('', 'lilypond'):
return self._lilypond_format
elif format_specification == 'storage':
return systemtools.StorageFormatManager.get_storage_format(self)
return str(self)
def __getnewargs__(self):
r'''Gets new arguments.
Returns tuple.
'''
return (self._value,)
def __str__(self):
r'''String representation of scheme object.
Returns string.
'''
if self._quoting is not None:
return self._quoting + self._formatted_value
return self._formatted_value
### PRIVATE PROPERTIES ###
@property
def _formatted_value(self):
from abjad.tools import schemetools
return schemetools.Scheme.format_scheme_value(
self._value,
force_quotes=self.force_quotes,
verbatim=self.verbatim,
)
@property
def _lilypond_format(self):
if self._quoting is not None:
return '#' + self._quoting + self._formatted_value
return '#%s' % self._formatted_value
@property
def _storage_format_specification(self):
from abjad.tools import systemtools
if stringtools.is_string(self._value):
positional_argument_values = (self._value,)
else:
positional_argument_values = self._value
keyword_argument_names = []
if self.force_quotes:
keyword_argument_names.append('force_quotes')
if self.quoting:
keyword_argument_names.append('quoting')
return systemtools.StorageFormatSpecification(
self,
keyword_argument_names=keyword_argument_names,
positional_argument_values=positional_argument_values,
)
### PUBLIC METHODS ###
@staticmethod
def format_embedded_scheme_value(value, force_quotes=False):
r'''Formats `value` as an embedded Scheme value.
'''
from abjad.tools import datastructuretools
from abjad.tools import schemetools
result = Scheme.format_scheme_value(value, force_quotes=force_quotes)
if isinstance(value, bool):
result = '#{}'.format(result)
elif isinstance(value, datastructuretools.OrdinalConstant):
result = '#{}'.format(repr(value).lower())
elif isinstance(value, str) and not force_quotes:
result = '#{}'.format(result)
elif isinstance(value, schemetools.Scheme):
result = '#{}'.format(result)
return result
@staticmethod
def format_scheme_value(value, force_quotes=False, verbatim=False):
r'''Formats `value` as Scheme would.
.. container:: example
**Example 1.** Some basic values:
::
>>> schemetools.Scheme.format_scheme_value(1)
'1'
::
>>> schemetools.Scheme.format_scheme_value('foo')
'foo'
::
>>> schemetools.Scheme.format_scheme_value('bar baz')
'"bar baz"'
::
>>> schemetools.Scheme.format_scheme_value([1.5, True, False])
'(1.5 #t #f)'
.. container:: example
**Example 2.** Strings without whitespace can be forcibly quoted
via the `force_quotes` keyword:
::
>>> schemetools.Scheme.format_scheme_value(
... 'foo',
... force_quotes=True,
... )
'"foo"'
.. container:: example
**Example 3.** Set verbatim to true to format value exactly (with
only hash preprended):
::
>>> string = '(lambda (grob) (grob-interpret-markup grob'
>>> string += r' #{ \markup \musicglyph #"noteheads.s0harmonic" #}))'
>>> schemetools.Scheme.format_scheme_value(string, verbatim=True)
'(lambda (grob) (grob-interpret-markup grob #{ \\markup \\musicglyph #"noteheads.s0harmonic" #}))'
Returns string.
'''
from abjad.tools import schemetools
if isinstance(value, str) and not verbatim:
value = value.replace('"', r'\"')
if -1 == value.find(' ') and not force_quotes:
return value
return '"{}"'.format(value)
elif isinstance(value, str) and verbatim:
return value
elif isinstance(value, bool):
if value:
return '#t'
return '#f'
elif isinstance(value, (list, tuple)):
return '({})'.format(
' '.join(schemetools.Scheme.format_scheme_value(x)
for x in value))
elif isinstance(value, schemetools.Scheme):
return str(value)
elif isinstance(value, type(None)):
return '#f'
return str(value)
### PUBLIC PROPERTIES ###
@property
def force_quotes(self):
r'''Is true when quotes should be forced in output. Otherwise false.
Returns boolean.
'''
return self._force_quotes
@property
def quoting(self):
r'''Gets Scheme quoting string.
Return string.
'''
return self._quoting
@property
def verbatim(self):
r'''Is true when formatting should format value absolutely verbatim.
Whitespace, quotes and all other parts of value are left in tact.
Defaults to false.
Set to true or false.
Returns true or false.
'''
return self._verbatim
|
gpl-3.0
| 9,166,033,055,927,871,000
| 29.303116
| 128
| 0.536973
| false
| 4.350142
| false
| false
| false
|
calancha/DIRAC
|
DataManagementSystem/Agent/RequestOperations/ReplicateAndRegister.py
|
1
|
19627
|
########################################################################
# $HeadURL $
# File: ReplicateAndRegister.py
# Author: Krzysztof.Ciba@NOSPAMgmail.com
# Date: 2013/03/13 18:49:12
########################################################################
""" :mod: ReplicateAndRegister
==========================
.. module: ReplicateAndRegister
:synopsis: ReplicateAndRegister operation handler
.. moduleauthor:: Krzysztof.Ciba@NOSPAMgmail.com
ReplicateAndRegister operation handler
"""
__RCSID__ = "$Id $"
# #
# @file ReplicateAndRegister.py
# @author Krzysztof.Ciba@NOSPAMgmail.com
# @date 2013/03/13 18:49:28
# @brief Definition of ReplicateAndRegister class.
# # imports
import re
# # from DIRAC
from DIRAC import S_OK, S_ERROR, gMonitor, gLogger
from DIRAC.Core.Utilities.Adler import compareAdler
from DIRAC.DataManagementSystem.Client.FTSClient import FTSClient
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.DataManagementSystem.Agent.RequestOperations.DMSRequestOperationsBase import DMSRequestOperationsBase
from DIRAC.Resources.Storage.StorageElement import StorageElement
from DIRAC.Resources.Catalog.FileCatalog import FileCatalog
def filterReplicas( opFile, logger = None, dataManager = None, seCache = None ):
""" filter out banned/invalid source SEs """
if not logger:
logger = gLogger
if not dataManager:
dataManager = DataManager()
if not seCache:
seCache = {}
log = logger.getSubLogger( "filterReplicas" )
ret = { "Valid" : [], "NoMetadata" : [], "Bad" : [], 'NoReplicas':[], 'NoPFN':[] }
replicas = dataManager.getActiveReplicas( opFile.LFN )
if not replicas["OK"]:
log.error( replicas["Message"] )
return replicas
reNotExists = re.compile( r".*such file.*" )
replicas = replicas["Value"]
failed = replicas["Failed"].get( opFile.LFN , "" )
if reNotExists.match( failed.lower() ):
opFile.Status = "Failed"
opFile.Error = failed
return S_ERROR( failed )
replicas = replicas["Successful"].get( opFile.LFN, {} )
for repSEName in replicas:
repSE = seCache[repSEName] if repSEName in seCache else \
seCache.setdefault( repSEName, StorageElement( repSEName ) )
pfn = repSE.getPfnForLfn( opFile.LFN )
if not pfn["OK"] or opFile.LFN not in pfn['Value']['Successful']:
log.warn( "unable to create pfn for %s lfn at %s: %s" % ( opFile.LFN,
repSEName,
pfn.get( 'Message', pfn.get( 'Value', {} ).get( 'Failed', {} ).get( opFile.LFN ) ) ) )
ret["NoPFN"].append( repSEName )
else:
pfn = pfn["Value"]['Successful'][ opFile.LFN ]
repSEMetadata = repSE.getFileMetadata( pfn )
error = repSEMetadata.get( 'Message', repSEMetadata.get( 'Value', {} ).get( 'Failed', {} ).get( pfn ) )
if error:
log.warn( 'unable to get metadata at %s for %s' % ( repSEName, opFile.LFN ), error.replace( '\n', '' ) )
if 'File does not exist' in error:
ret['NoReplicas'].append( repSEName )
else:
ret["NoMetadata"].append( repSEName )
else:
repSEMetadata = repSEMetadata['Value']['Successful'][pfn]
seChecksum = repSEMetadata.get( "Checksum" )
if opFile.Checksum and seChecksum and not compareAdler( seChecksum, opFile.Checksum ) :
# The checksum in the request may be wrong, check with FC
fcMetadata = FileCatalog().getFileMetadata( opFile.LFN )
fcChecksum = fcMetadata.get( 'Value', {} ).get( 'Successful', {} ).get( opFile.LFN, {} ).get( 'Checksum' )
if fcChecksum and fcChecksum != opFile.Checksum and compareAdler( fcChecksum , seChecksum ):
opFile.Checksum = fcChecksum
ret['Valid'].append( repSEName )
else:
log.warn( " %s checksum mismatch, request: %s @%s: %s" % ( opFile.LFN,
opFile.Checksum,
repSEName,
seChecksum ) )
ret["Bad"].append( repSEName )
else:
# # if we're here repSE is OK
ret["Valid"].append( repSEName )
return S_OK( ret )
########################################################################
class ReplicateAndRegister( DMSRequestOperationsBase ):
"""
.. class:: ReplicateAndRegister
ReplicateAndRegister operation handler
"""
def __init__( self, operation = None, csPath = None ):
"""c'tor
:param self: self reference
:param Operation operation: Operation instance
:param str csPath: CS path for this handler
"""
super( ReplicateAndRegister, self ).__init__( operation, csPath )
# # own gMonitor stuff for files
gMonitor.registerActivity( "ReplicateAndRegisterAtt", "Replicate and register attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateOK", "Replications successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "ReplicateFail", "Replications failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterOK", "Registrations successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "RegisterFail", "Registrations failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # for FTS
gMonitor.registerActivity( "FTSScheduleAtt", "Files schedule attempted",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleOK", "File schedule successful",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
gMonitor.registerActivity( "FTSScheduleFail", "File schedule failed",
"RequestExecutingAgent", "Files/min", gMonitor.OP_SUM )
# # SE cache
self.seCache = {}
# Clients
self.fc = FileCatalog()
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
self.ftsClient = FTSClient()
def __call__( self ):
""" call me maybe """
# # check replicas first
checkReplicas = self.__checkReplicas()
if not checkReplicas["OK"]:
self.log.error( checkReplicas["Message"] )
if hasattr( self, "FTSMode" ) and getattr( self, "FTSMode" ):
bannedGroups = getattr( self, "FTSBannedGroups" ) if hasattr( self, "FTSBannedGroups" ) else ()
if self.request.OwnerGroup in bannedGroups:
self.log.verbose( "usage of FTS system is banned for request's owner" )
return self.dmTransfer()
return self.ftsTransfer()
return self.dmTransfer()
def __checkReplicas( self ):
""" check done replicas and update file states """
waitingFiles = dict( [ ( opFile.LFN, opFile ) for opFile in self.operation
if opFile.Status in ( "Waiting", "Scheduled" ) ] )
targetSESet = set( self.operation.targetSEList )
replicas = self.fc.getReplicas( waitingFiles.keys() )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
return replicas
reMissing = re.compile( r".*such file.*" )
for failedLFN, errStr in replicas["Value"]["Failed"].items():
waitingFiles[failedLFN].Error = errStr
if reMissing.search( errStr.lower() ):
self.log.error( "file %s does not exists" % failedLFN )
gMonitor.addMark( "ReplicateFail", len( targetSESet ) )
waitingFiles[failedLFN].Status = "Failed"
for successfulLFN, reps in replicas["Value"]["Successful"].items():
if targetSESet.issubset( set( reps ) ):
self.log.info( "file %s has been replicated to all targets" % successfulLFN )
waitingFiles[successfulLFN].Status = "Done"
return S_OK()
def _addMetadataToFiles( self, toSchedule ):
""" Add metadata to those files that need to be scheduled through FTS
toSchedule is a dictionary:
{'lfn1': [opFile, validReplicas, validTargets], 'lfn2': [opFile, validReplicas, validTargets]}
"""
if toSchedule:
self.log.info( "found %s files to schedule, getting metadata from FC" % len( toSchedule ) )
lfns = toSchedule.keys()
else:
self.log.info( "No files to schedule" )
return S_OK()
res = self.fc.getFileMetadata( lfns )
if not res['OK']:
return res
else:
if res['Value']['Failed']:
self.log.warn( "Can't schedule %d files: problems getting the metadata: %s" % ( len( res['Value']['Failed'] ),
', '.join( res['Value']['Failed'] ) ) )
metadata = res['Value']['Successful']
filesToScheduleList = []
for lfnsToSchedule, lfnMetadata in metadata.items():
opFileToSchedule = toSchedule[lfnsToSchedule][0]
opFileToSchedule.GUID = lfnMetadata['GUID']
opFileToSchedule.Checksum = metadata[lfnsToSchedule]['Checksum']
opFileToSchedule.ChecksumType = metadata[lfnsToSchedule]['ChecksumType']
opFileToSchedule.Size = metadata[lfnsToSchedule]['Size']
filesToScheduleList.append( ( opFileToSchedule.toJSON()['Value'],
toSchedule[lfnsToSchedule][1],
toSchedule[lfnsToSchedule][2] ) )
return S_OK( filesToScheduleList )
def _filterReplicas( self, opFile ):
""" filter out banned/invalid source SEs """
return filterReplicas( opFile, logger = self.log, dataManager = self.dm, seCache = self.seCache )
def ftsTransfer( self ):
""" replicate and register using FTS """
self.log.info( "scheduling files in FTS..." )
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "FTSScheduleAtt" )
gMonitor.addMark( "FTSScheduleFail" )
return bannedTargets
if bannedTargets['Value']:
return S_OK( "%s targets are banned for writing" % ",".join( bannedTargets['Value'] ) )
# Can continue now
self.log.verbose( "No targets banned for writing" )
toSchedule = {}
for opFile in self.getWaitingFilesList():
opFile.Error = ''
gMonitor.addMark( "FTSScheduleAtt" )
# # check replicas
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if validReplicas:
validTargets = list( set( self.operation.targetSEList ) - set( validReplicas ) )
if not validTargets:
self.log.info( "file %s is already present at all targets" % opFile.LFN )
opFile.Status = "Done"
else:
toSchedule[opFile.LFN] = [ opFile, validReplicas, validTargets ]
else:
gMonitor.addMark( "FTSScheduleFail" )
if noMetaReplicas:
self.log.warn( "unable to schedule '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error( "unable to schedule %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error( "unable to schedule %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn( "unable to schedule %s, could not get a PFN at %s" % ( opFile.LFN, ','.join( noPFN ) ) )
res = self._addMetadataToFiles( toSchedule )
if not res['OK']:
return res
else:
filesToScheduleList = res['Value']
if filesToScheduleList:
ftsSchedule = self.ftsClient.ftsSchedule( self.request.RequestID,
self.operation.OperationID,
filesToScheduleList )
if not ftsSchedule["OK"]:
self.log.error( "Completely failed to schedule to FTS:", ftsSchedule["Message"] )
return ftsSchedule
# might have nothing to schedule
ftsSchedule = ftsSchedule["Value"]
if not ftsSchedule:
return S_OK()
self.log.info( "%d files have been scheduled to FTS" % len( ftsSchedule['Successful'] ) )
for opFile in self.operation:
fileID = opFile.FileID
if fileID in ftsSchedule["Successful"]:
gMonitor.addMark( "FTSScheduleOK", 1 )
opFile.Status = "Scheduled"
self.log.debug( "%s has been scheduled for FTS" % opFile.LFN )
elif fileID in ftsSchedule["Failed"]:
gMonitor.addMark( "FTSScheduleFail", 1 )
opFile.Error = ftsSchedule["Failed"][fileID]
if 'sourceSURL equals to targetSURL' in opFile.Error:
# In this case there is no need to continue
opFile.Status = 'Failed'
self.log.warn( "unable to schedule %s for FTS: %s" % ( opFile.LFN, opFile.Error ) )
else:
self.log.info( "No files to schedule after metadata checks" )
# Just in case some transfers could not be scheduled, try them with RM
return self.dmTransfer( fromFTS = True )
def dmTransfer( self, fromFTS = False ):
""" replicate and register using dataManager """
# # get waiting files. If none just return
# # source SE
sourceSE = self.operation.SourceSE if self.operation.SourceSE else None
if sourceSE:
# # check source se for read
bannedSource = self.checkSEsRSS( sourceSE, 'ReadAccess' )
if not bannedSource["OK"]:
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return bannedSource
if bannedSource["Value"]:
self.operation.Error = "SourceSE %s is banned for reading" % sourceSE
self.log.info( self.operation.Error )
return S_OK( self.operation.Error )
# # check targetSEs for write
bannedTargets = self.checkSEsRSS()
if not bannedTargets['OK']:
gMonitor.addMark( "ReplicateAndRegisterAtt", len( self.operation ) )
gMonitor.addMark( "ReplicateFail", len( self.operation ) )
return bannedTargets
if bannedTargets['Value']:
self.operation.Error = "%s targets are banned for writing" % ",".join( bannedTargets['Value'] )
return S_OK( self.operation.Error )
# Can continue now
self.log.verbose( "No targets banned for writing" )
waitingFiles = self.getWaitingFilesList()
if not waitingFiles:
return S_OK()
# # loop over files
if fromFTS:
self.log.info( "Trying transfer using replica manager as FTS failed" )
else:
self.log.info( "Transferring files using Data manager..." )
for opFile in waitingFiles:
gMonitor.addMark( "ReplicateAndRegisterAtt", 1 )
opFile.Error = ''
lfn = opFile.LFN
# Check if replica is at the specified source
replicas = self._filterReplicas( opFile )
if not replicas["OK"]:
self.log.error( replicas["Message"] )
continue
replicas = replicas["Value"]
validReplicas = replicas["Valid"]
noMetaReplicas = replicas["NoMetadata"]
noReplicas = replicas['NoReplicas']
badReplicas = replicas['Bad']
noPFN = replicas['NoPFN']
if not validReplicas:
gMonitor.addMark( "ReplicateFail" )
if noMetaReplicas:
self.log.warn( "unable to replicate '%s', couldn't get metadata at %s" % ( opFile.LFN, ','.join( noMetaReplicas ) ) )
opFile.Error = "Couldn't get metadata"
elif noReplicas:
self.log.error( "unable to replicate %s, file doesn't exist at %s" % ( opFile.LFN, ','.join( noReplicas ) ) )
opFile.Error = 'No replicas found'
opFile.Status = 'Failed'
elif badReplicas:
self.log.error( "unable to replicate %s, all replicas have a bad checksum at %s" % ( opFile.LFN, ','.join( badReplicas ) ) )
opFile.Error = 'All replicas have a bad checksum'
opFile.Status = 'Failed'
elif noPFN:
self.log.warn( "unable to replicate %s, could not get a PFN" % opFile.LFN )
continue
# # get the first one in the list
if sourceSE not in validReplicas:
if sourceSE:
self.log.warn( "%s is not at specified sourceSE %s, changed to %s" % ( lfn, sourceSE, validReplicas[0] ) )
sourceSE = validReplicas[0]
# # loop over targetSE
catalogs = self.operation.Catalog
if catalogs:
catalogs = [ cat.strip() for cat in catalogs.split( ',' ) ]
for targetSE in self.operation.targetSEList:
# # call DataManager
if targetSE in validReplicas:
self.log.warn( "Request to replicate %s to an existing location: %s" % ( lfn, targetSE ) )
opFile.Status = 'Done'
continue
res = self.dm.replicateAndRegister( lfn, targetSE, sourceSE = sourceSE, catalog = catalogs )
if res["OK"]:
if lfn in res["Value"]["Successful"]:
if "replicate" in res["Value"]["Successful"][lfn]:
repTime = res["Value"]["Successful"][lfn]["replicate"]
prString = "file %s replicated at %s in %s s." % ( lfn, targetSE, repTime )
gMonitor.addMark( "ReplicateOK", 1 )
if "register" in res["Value"]["Successful"][lfn]:
gMonitor.addMark( "RegisterOK", 1 )
regTime = res["Value"]["Successful"][lfn]["register"]
prString += ' and registered in %s s.' % regTime
self.log.info( prString )
else:
gMonitor.addMark( "RegisterFail", 1 )
prString += " but failed to register"
self.log.warn( prString )
opFile.Error = "Failed to register"
# # add register replica operation
registerOperation = self.getRegisterOperation( opFile, targetSE, type = 'RegisterReplica' )
self.request.insertAfter( registerOperation, self.operation )
else:
self.log.error( "failed to replicate %s to %s." % ( lfn, targetSE ) )
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "Failed to replicate"
else:
gMonitor.addMark( "ReplicateFail", 1 )
reason = res["Value"]["Failed"][lfn]
self.log.error( "failed to replicate and register file %s at %s:" % ( lfn, targetSE ), reason )
opFile.Error = reason
else:
gMonitor.addMark( "ReplicateFail", 1 )
opFile.Error = "DataManager error: %s" % res["Message"]
self.log.error( opFile.Error )
if not opFile.Error:
if len( self.operation.targetSEList ) > 1:
self.log.info( "file %s has been replicated to all targetSEs" % lfn )
opFile.Status = "Done"
return S_OK()
|
gpl-3.0
| -1,011,045,608,343,982,200
| 40.060669
| 150
| 0.597493
| false
| 3.861302
| false
| false
| false
|
JorrandeWit/ithenticate-api-python
|
iThenticate/API/Object/data.py
|
1
|
1868
|
class Data(dict):
def __init__(self, xml, status=None, messages=None):
"""Process the xml instance into a friendly dictionary."""
content = {
'data': None,
'status': status or 200,
'messages': messages or []
}
struct_nodes = xml.findall('./')
data = self._breakdown_tree(struct_nodes)
content['data'] = data
dict.__init__(self, content)
def _breakdown_tree(self, nodes):
# All properties in a single item
_data = {}
for node in nodes:
if node.tag == 'member':
# Dictionary item
key = node.find('name').text
value_node = node.find('value')[0]
if value_node.tag == 'int':
value = int(value_node.text.strip())
elif value_node.tag in ['array', 'struct', 'data', 'param']:
value = self._breakdown_tree(value_node.findall('./'))
elif value_node.tag == 'string':
try:
value = value_node.text.strip()
except AttributeError:
# Maliciously constructed data is detected in the responses for the string nodes
value = value_node.text
else:
# dateTime.iso8601 or something exotic
value = value_node.text
_data[key] = value
elif node.tag == 'value':
# Nodes are list items
if not isinstance(_data, list):
_data = []
_data.append(self._breakdown_tree(node.findall('./')))
else:
# Recursively find data as this is not a data node
return self._breakdown_tree(node.findall('./'))
return _data
|
bsd-2-clause
| 3,485,251,996,502,977,000
| 37.122449
| 104
| 0.478051
| false
| 4.717172
| false
| false
| false
|
avaitla/Haskell-to-C---Bridge
|
pygccxml-1.0.0/pygccxml/parser/config.py
|
1
|
7092
|
# Copyright 2004-2008 Roman Yakovenko.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""This module contains the implementation of the L{config_t} class.
"""
import os
import sys
import copy
class parser_configuration_t(object):
"""Configuration object to collect parameters for invoking C++ parser
This class serves as a base class for the parameters that can be used
to customize the call to C++ parser. This class also allows users to work with
relative files paths. In this case files are searched in the following order:
1. current directory
2. working directory
3. additional include paths specified by the user
"""
def __init__( self
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, cflags=""
, compiler=None):
"""Constructor.
"""
object.__init__( self )
self.__working_directory = working_directory
if not include_paths:
include_paths = []
self.__include_paths = include_paths
if not define_symbols:
define_symbols = []
self.__define_symbols = define_symbols
if not undefine_symbols:
undefine_symbols = []
self.__undefine_symbols = undefine_symbols
self.__cflags = cflags
self.__compiler = compiler
def clone(self):
raise NotImplementedError( self.__class__.__name__ )
def __get_working_directory(self):
return self.__working_directory
def __set_working_directory(self, working_dir):
self.__working_directory=working_dir
working_directory = property( __get_working_directory, __set_working_directory )
@property
def include_paths(self):
"""list of include paths to look for header files"""
return self.__include_paths
@property
def define_symbols(self):
"""list of "define" directives """
return self.__define_symbols
@property
def undefine_symbols(self):
"""list of "undefine" directives """
return self.__undefine_symbols
@property
def compiler(self):
"""compiler name to simulate"""
return self.__compiler
def __get_cflags(self):
return self.__cflags
def __set_cflags(self, val):
self.__cflags = val
cflags = property( __get_cflags, __set_cflags
, doc="additional flags to pass to compiler" )
def __ensure_dir_exists( self, dir_path, meaning ):
if os.path.isdir( dir_path ):
return
msg = None
if os.path.exists( self.working_directory ):
raise RuntimeError( '%s("%s") does not exist!' % ( meaning, dir_path ) )
else:
raise RuntimeError( '%s("%s") should be "directory", not a file.' % ( meaning, dir_path ) )
def raise_on_wrong_settings( self ):
"""validates the configuration settings and raises RuntimeError on error"""
self.__ensure_dir_exists( self.working_directory, 'working directory' )
map( lambda idir: self.__ensure_dir_exists( idir, 'include directory' )
, self.include_paths )
class gccxml_configuration_t(parser_configuration_t):
"""Configuration object to collect parameters for invoking gccxml.
This class serves as a container for the parameters that can be used
to customize the call to gccxml.
"""
def __init__( self
, gccxml_path=''
, working_directory='.'
, include_paths=None
, define_symbols=None
, undefine_symbols=None
, start_with_declarations=None
, ignore_gccxml_output=False
, cflags=""
, compiler=None):
"""Constructor.
"""
parser_configuration_t.__init__( self
, working_directory=working_directory
, include_paths=include_paths
, define_symbols=define_symbols
, undefine_symbols=undefine_symbols
, cflags=cflags
, compiler=compiler)
self.__gccxml_path = gccxml_path
if not start_with_declarations:
start_with_declarations = []
self.__start_with_declarations = start_with_declarations
self.__ignore_gccxml_output = ignore_gccxml_output
def clone(self):
return copy.deepcopy( self )
def __get_gccxml_path(self):
return self.__gccxml_path
def __set_gccxml_path(self, new_path ):
self.__gccxml_path = new_path
gccxml_path = property( __get_gccxml_path, __set_gccxml_path
, doc="gccxml binary location" )
@property
def start_with_declarations(self):
"""list of declarations gccxml should start with, when it dumps declaration tree"""
return self.__start_with_declarations
def __get_ignore_gccxml_output(self):
return self.__ignore_gccxml_output
def __set_ignore_gccxml_output(self, val=True):
self.__ignore_gccxml_output = val
ignore_gccxml_output = property( __get_ignore_gccxml_output, __set_ignore_gccxml_output
, doc="set this property to True, if you want pygccxml to ignore any error\\warning that comes from gccxml" )
def raise_on_wrong_settings( self ):
super( gccxml_configuration_t, self ).raise_on_wrong_settings()
if os.path.isfile( self.gccxml_path ):
return
if sys.platform == 'win32':
gccxml_name = 'gccxml' + '.exe'
environment_var_delimiter = ';'
elif sys.platform == 'linux2' or sys.platform == 'darwin':
gccxml_name = 'gccxml'
environment_var_delimiter = ':'
else:
raise RuntimeError( 'unable to find out location of gccxml' )
may_be_gccxml = os.path.join( self.gccxml_path, gccxml_name )
if os.path.isfile( may_be_gccxml ):
self.gccxml_path = may_be_gccxml
else:
for path in os.environ['PATH'].split( environment_var_delimiter ):
gccxml_path = os.path.join( path, gccxml_name )
if os.path.isfile( gccxml_path ):
self.gccxml_path = gccxml_path
break
else:
msg = 'gccxml_path("%s") should exists or to be a valid file name.' \
% self.gccxml_path
raise RuntimeError( msg )
config_t = gccxml_configuration_t #backward computability
|
bsd-3-clause
| 3,308,842,870,037,335,000
| 35.13089
| 145
| 0.558376
| false
| 4.488608
| true
| false
| false
|
LookThisCode/DeveloperBus
|
Season 2013/Mexico/Projects/Equipo4_MES/website/MES/settings.py
|
1
|
2949
|
"""
Django settings for MES project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = ''
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
'social.apps.django_app.default',
'south',
'crispy_forms',
'accounts',
'routes'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)
ROOT_URLCONF = 'MES.urls'
WSGI_APPLICATION = 'MES.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_CONTEXT_PROCESSORS = (
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'social.apps.django_app.context_processors.backends',
'social.apps.django_app.context_processors.login_redirect',
)
AUTHENTICATION_BACKENDS = (
'social.backends.google.GooglePlusAuth',
'social.apps.django_app.utils.BackendWrapper',
'django.contrib.auth.backends.ModelBackend',
)
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, "templates"),
)
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "assets"),
)
SOCIAL_AUTH_GOOGLE_PLUS_KEY = ''
SOCIAL_AUTH_GOOGLE_PLUS_SECRET = ''
LOGIN_REDIRECT_URL = '/'
|
apache-2.0
| 1,220,421,934,049,439,200
| 22.6
| 71
| 0.71414
| false
| 3.354949
| false
| false
| false
|
AlericInglewood/3p-google-breakpad
|
src/tools/gyp/pylib/gyp/__init__.py
|
1
|
20572
|
#!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import copy
import gyp.input
import optparse
import os.path
import re
import shlex
import sys
import traceback
# Default debug modes for GYP
debug = {}
# List of "official" debug modes, but you can use anything you like.
DEBUG_GENERAL = 'general'
DEBUG_VARIABLES = 'variables'
DEBUG_INCLUDES = 'includes'
def DebugOutput(mode, message):
if 'all' in gyp.debug.keys() or mode in gyp.debug.keys():
ctx = ('unknown', 0, 'unknown')
try:
f = traceback.extract_stack(limit=2)
if f:
ctx = f[0][:3]
except:
pass
print '%s:%s:%d:%s %s' % (mode.upper(), os.path.basename(ctx[0]),
ctx[1], ctx[2], message)
def FindBuildFiles():
extension = '.gyp'
files = os.listdir(os.getcwd())
build_files = []
for file in files:
if file.endswith(extension):
build_files.append(file)
return build_files
class GypError(Exception):
"""Error class representing an error, which is to be presented
to the user. The main entry point will catch and display this.
"""
pass
def Load(build_files, format, default_variables={},
includes=[], depth='.', params=None, check=False, circular_check=False):
"""
Loads one or more specified build files.
default_variables and includes will be copied before use.
Returns the generator for the specified format and the
data returned by loading the specified build files.
"""
if params is None:
params = {}
flavor = None
if '-' in format:
format, params['flavor'] = format.split('-', 1)
default_variables = copy.copy(default_variables)
# Default variables provided by this program and its modules should be
# named WITH_CAPITAL_LETTERS to provide a distinct "best practice" namespace,
# avoiding collisions with user and automatic variables.
default_variables['GENERATOR'] = format
# Format can be a custom python file, or by default the name of a module
# within gyp.generator.
if format.endswith('.py'):
generator_name = os.path.splitext(format)[0]
path, generator_name = os.path.split(generator_name)
# Make sure the path to the custom generator is in sys.path
# Don't worry about removing it once we are done. Keeping the path
# to each generator that is used in sys.path is likely harmless and
# arguably a good idea.
path = os.path.abspath(path)
if path not in sys.path:
sys.path.insert(0, path)
else:
generator_name = 'gyp.generator.' + format
# These parameters are passed in order (as opposed to by key)
# because ActivePython cannot handle key parameters to __import__.
generator = __import__(generator_name, globals(), locals(), generator_name)
for (key, val) in generator.generator_default_variables.items():
default_variables.setdefault(key, val)
# Give the generator the opportunity to set additional variables based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateVariables', None):
generator.CalculateVariables(default_variables, params)
# Give the generator the opportunity to set generator_input_info based on
# the params it will receive in the output phase.
if getattr(generator, 'CalculateGeneratorInputInfo', None):
generator.CalculateGeneratorInputInfo(params)
# Fetch the generator specific info that gets fed to input, we use getattr
# so we can default things and the generators only have to provide what
# they need.
generator_input_info = {
'generator_wants_absolute_build_file_paths':
getattr(generator, 'generator_wants_absolute_build_file_paths', False),
'generator_handles_variants':
getattr(generator, 'generator_handles_variants', False),
'non_configuration_keys':
getattr(generator, 'generator_additional_non_configuration_keys', []),
'path_sections':
getattr(generator, 'generator_additional_path_sections', []),
'extra_sources_for_rules':
getattr(generator, 'generator_extra_sources_for_rules', []),
'generator_supports_multiple_toolsets':
getattr(generator, 'generator_supports_multiple_toolsets', False),
'generator_wants_static_library_dependencies_adjusted':
getattr(generator,
'generator_wants_static_library_dependencies_adjusted', True),
'generator_wants_sorted_dependencies':
getattr(generator, 'generator_wants_sorted_dependencies', False),
}
# Process the input specific to this generator.
result = gyp.input.Load(build_files, default_variables, includes[:],
depth, generator_input_info, check, False)
return [generator] + result
def NameValueListToDict(name_value_list):
"""
Takes an array of strings of the form 'NAME=VALUE' and creates a dictionary
of the pairs. If a string is simply NAME, then the value in the dictionary
is set to True. If VALUE can be converted to an integer, it is.
"""
result = { }
for item in name_value_list:
tokens = item.split('=', 1)
if len(tokens) == 2:
# If we can make it an int, use that, otherwise, use the string.
try:
token_value = int(tokens[1])
except ValueError:
token_value = tokens[1]
# Set the variable to the supplied value.
result[tokens[0]] = token_value
else:
# No value supplied, treat it as a boolean and set it.
result[tokens[0]] = True
return result
def ShlexEnv(env_name):
flags = os.environ.get(env_name, [])
if flags:
flags = shlex.split(flags)
return flags
def FormatOpt(opt, value):
if opt.startswith('--'):
return '%s=%s' % (opt, value)
return opt + value
def RegenerateAppendFlag(flag, values, predicate, env_name, options):
"""Regenerate a list of command line flags, for an option of action='append'.
The |env_name|, if given, is checked in the environment and used to generate
an initial list of options, then the options that were specified on the
command line (given in |values|) are appended. This matches the handling of
environment variables and command line flags where command line flags override
the environment, while not requiring the environment to be set when the flags
are used again.
"""
flags = []
if options.use_environment and env_name:
for flag_value in ShlexEnv(env_name):
value = FormatOpt(flag, predicate(flag_value))
if value in flags:
flags.remove(value)
flags.append(value)
if values:
for flag_value in values:
flags.append(FormatOpt(flag, predicate(flag_value)))
return flags
def RegenerateFlags(options):
"""Given a parsed options object, and taking the environment variables into
account, returns a list of flags that should regenerate an equivalent options
object (even in the absence of the environment variables.)
Any path options will be normalized relative to depth.
The format flag is not included, as it is assumed the calling generator will
set that as appropriate.
"""
def FixPath(path):
path = gyp.common.FixIfRelativePath(path, options.depth)
if not path:
return os.path.curdir
return path
def Noop(value):
return value
# We always want to ignore the environment when regenerating, to avoid
# duplicate or changed flags in the environment at the time of regeneration.
flags = ['--ignore-environment']
for name, metadata in options._regeneration_metadata.iteritems():
opt = metadata['opt']
value = getattr(options, name)
value_predicate = metadata['type'] == 'path' and FixPath or Noop
action = metadata['action']
env_name = metadata['env_name']
if action == 'append':
flags.extend(RegenerateAppendFlag(opt, value, value_predicate,
env_name, options))
elif action in ('store', None): # None is a synonym for 'store'.
if value:
flags.append(FormatOpt(opt, value_predicate(value)))
elif options.use_environment and env_name and os.environ.get(env_name):
flags.append(FormatOpt(opt, value_predicate(os.environ.get(env_name))))
elif action in ('store_true', 'store_false'):
if ((action == 'store_true' and value) or
(action == 'store_false' and not value)):
flags.append(opt)
elif options.use_environment and env_name:
print >>sys.stderr, ('Warning: environment regeneration unimplemented '
'for %s flag %r env_name %r' % (action, opt,
env_name))
else:
print >>sys.stderr, ('Warning: regeneration unimplemented for action %r '
'flag %r' % (action, opt))
return flags
class RegeneratableOptionParser(optparse.OptionParser):
def __init__(self):
self.__regeneratable_options = {}
optparse.OptionParser.__init__(self)
def add_option(self, *args, **kw):
"""Add an option to the parser.
This accepts the same arguments as OptionParser.add_option, plus the
following:
regenerate: can be set to False to prevent this option from being included
in regeneration.
env_name: name of environment variable that additional values for this
option come from.
type: adds type='path', to tell the regenerator that the values of
this option need to be made relative to options.depth
"""
env_name = kw.pop('env_name', None)
if 'dest' in kw and kw.pop('regenerate', True):
dest = kw['dest']
# The path type is needed for regenerating, for optparse we can just treat
# it as a string.
type = kw.get('type')
if type == 'path':
kw['type'] = 'string'
self.__regeneratable_options[dest] = {
'action': kw.get('action'),
'type': type,
'env_name': env_name,
'opt': args[0],
}
optparse.OptionParser.add_option(self, *args, **kw)
def parse_args(self, *args):
values, args = optparse.OptionParser.parse_args(self, *args)
values._regeneration_metadata = self.__regeneratable_options
return values, args
def gyp_main(args):
my_name = os.path.basename(sys.argv[0])
parser = RegeneratableOptionParser()
usage = 'usage: %s [options ...] [build_file ...]'
parser.set_usage(usage.replace('%s', '%prog'))
parser.add_option('-D', dest='defines', action='append', metavar='VAR=VAL',
env_name='GYP_DEFINES',
help='sets variable VAR to value VAL')
parser.add_option('-f', '--format', dest='formats', action='append',
env_name='GYP_GENERATORS', regenerate=False,
help='output formats to generate')
parser.add_option('--msvs-version', dest='msvs_version',
regenerate=False,
help='Deprecated; use -G msvs_version=MSVS_VERSION instead')
parser.add_option('-I', '--include', dest='includes', action='append',
metavar='INCLUDE', type='path',
help='files to include in all loaded .gyp files')
parser.add_option('--depth', dest='depth', metavar='PATH', type='path',
help='set DEPTH gyp variable to a relative path to PATH')
parser.add_option('-d', '--debug', dest='debug', metavar='DEBUGMODE',
action='append', default=[], help='turn on a debugging '
'mode for debugging GYP. Supported modes are "variables", '
'"includes" and "general" or "all" for all of them.')
parser.add_option('-S', '--suffix', dest='suffix', default='',
help='suffix to add to generated files')
parser.add_option('-G', dest='generator_flags', action='append', default=[],
metavar='FLAG=VAL', env_name='GYP_GENERATOR_FLAGS',
help='sets generator flag FLAG to VAL')
parser.add_option('--generator-output', dest='generator_output',
action='store', default=None, metavar='DIR', type='path',
env_name='GYP_GENERATOR_OUTPUT',
help='puts generated build files under DIR')
parser.add_option('--ignore-environment', dest='use_environment',
action='store_false', default=True, regenerate=False,
help='do not read options from environment variables')
parser.add_option('--check', dest='check', action='store_true',
help='check format of gyp files')
parser.add_option('--toplevel-dir', dest='toplevel_dir', action='store',
default=None, metavar='DIR', type='path',
help='directory to use as the root of the source tree')
# --no-circular-check disables the check for circular relationships between
# .gyp files. These relationships should not exist, but they've only been
# observed to be harmful with the Xcode generator. Chromium's .gyp files
# currently have some circular relationships on non-Mac platforms, so this
# option allows the strict behavior to be used on Macs and the lenient
# behavior to be used elsewhere.
# TODO(mark): Remove this option when http://crbug.com/35878 is fixed.
parser.add_option('--no-circular-check', dest='circular_check',
action='store_false', default=True, regenerate=False,
help="don't check for circular relationships between files")
# We read a few things from ~/.gyp, so set up a var for that.
home_vars = ['HOME']
if sys.platform in ('cygwin', 'win32'):
home_vars.append('USERPROFILE')
home = None
home_dot_gyp = None
for home_var in home_vars:
home = os.getenv(home_var)
if home != None:
home_dot_gyp = os.path.join(home, '.gyp')
if not os.path.exists(home_dot_gyp):
home_dot_gyp = None
else:
break
# TODO(thomasvl): add support for ~/.gyp/defaults
options, build_files_arg = parser.parse_args(args)
build_files = build_files_arg
if not options.formats:
# If no format was given on the command line, then check the env variable.
generate_formats = []
if options.use_environment:
generate_formats = os.environ.get('GYP_GENERATORS', [])
if generate_formats:
generate_formats = re.split('[\s,]', generate_formats)
if generate_formats:
options.formats = generate_formats
else:
# Nothing in the variable, default based on platform.
if sys.platform == 'darwin':
options.formats = ['xcode']
elif sys.platform in ('win32', 'cygwin'):
options.formats = ['msvs']
else:
options.formats = ['make']
if not options.generator_output and options.use_environment:
g_o = os.environ.get('GYP_GENERATOR_OUTPUT')
if g_o:
options.generator_output = g_o
for mode in options.debug:
gyp.debug[mode] = 1
# Do an extra check to avoid work when we're not debugging.
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, 'running with these options:')
for option, value in sorted(options.__dict__.items()):
if option[0] == '_':
continue
if isinstance(value, basestring):
DebugOutput(DEBUG_GENERAL, " %s: '%s'" % (option, value))
else:
DebugOutput(DEBUG_GENERAL, " %s: %s" % (option, str(value)))
if not build_files:
build_files = FindBuildFiles()
if not build_files:
raise GypError((usage + '\n\n%s: error: no build_file') %
(my_name, my_name))
# TODO(mark): Chromium-specific hack!
# For Chromium, the gyp "depth" variable should always be a relative path
# to Chromium's top-level "src" directory. If no depth variable was set
# on the command line, try to find a "src" directory by looking at the
# absolute path to each build file's directory. The first "src" component
# found will be treated as though it were the path used for --depth.
if not options.depth:
for build_file in build_files:
build_file_dir = os.path.abspath(os.path.dirname(build_file))
build_file_dir_components = build_file_dir.split(os.path.sep)
components_len = len(build_file_dir_components)
for index in xrange(components_len - 1, -1, -1):
if build_file_dir_components[index] == 'src':
options.depth = os.path.sep.join(build_file_dir_components)
break
del build_file_dir_components[index]
# If the inner loop found something, break without advancing to another
# build file.
if options.depth:
break
if not options.depth:
raise GypError('Could not automatically locate src directory. This is'
'a temporary Chromium feature that will be removed. Use'
'--depth as a workaround.')
# If toplevel-dir is not set, we assume that depth is the root of our source
# tree.
if not options.toplevel_dir:
options.toplevel_dir = options.depth
# -D on the command line sets variable defaults - D isn't just for define,
# it's for default. Perhaps there should be a way to force (-F?) a
# variable's value so that it can't be overridden by anything else.
cmdline_default_variables = {}
defines = []
if options.use_environment:
defines += ShlexEnv('GYP_DEFINES')
if options.defines:
defines += options.defines
cmdline_default_variables = NameValueListToDict(defines)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL,
"cmdline_default_variables: %s" % cmdline_default_variables)
# Set up includes.
includes = []
# If ~/.gyp/include.gypi exists, it'll be forcibly included into every
# .gyp file that's loaded, before anything else is included.
if home_dot_gyp != None:
default_include = os.path.join(home_dot_gyp, 'include.gypi')
if os.path.exists(default_include):
print 'Using overrides found in ' + default_include
includes.append(default_include)
# Command-line --include files come after the default include.
if options.includes:
includes.extend(options.includes)
# Generator flags should be prefixed with the target generator since they
# are global across all generator runs.
gen_flags = []
if options.use_environment:
gen_flags += ShlexEnv('GYP_GENERATOR_FLAGS')
if options.generator_flags:
gen_flags += options.generator_flags
generator_flags = NameValueListToDict(gen_flags)
if DEBUG_GENERAL in gyp.debug.keys():
DebugOutput(DEBUG_GENERAL, "generator_flags: %s" % generator_flags)
# TODO: Remove this and the option after we've gotten folks to move to the
# generator flag.
if options.msvs_version:
print >>sys.stderr, \
'DEPRECATED: Use generator flag (-G msvs_version=' + \
options.msvs_version + ') instead of --msvs-version=' + \
options.msvs_version
generator_flags['msvs_version'] = options.msvs_version
# Generate all requested formats (use a set in case we got one format request
# twice)
for format in set(options.formats):
params = {'options': options,
'build_files': build_files,
'generator_flags': generator_flags,
'cwd': os.getcwd(),
'build_files_arg': build_files_arg,
'gyp_binary': sys.argv[0],
'home_dot_gyp': home_dot_gyp}
# Start with the default variables from the command line.
[generator, flat_list, targets, data] = Load(build_files, format,
cmdline_default_variables,
includes, options.depth,
params, options.check,
options.circular_check)
# TODO(mark): Pass |data| for now because the generator needs a list of
# build files that came in. In the future, maybe it should just accept
# a list, and not the whole data dict.
# NOTE: flat_list is the flattened dependency graph specifying the order
# that targets may be built. Build systems that operate serially or that
# need to have dependencies defined before dependents reference them should
# generate targets in the order specified in flat_list.
generator.GenerateOutput(flat_list, targets, data, params)
# Done
return 0
def main(args):
try:
return gyp_main(args)
except GypError, e:
sys.stderr.write("gyp: %s\n" % e)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
|
bsd-3-clause
| -573,254,012,917,891,500
| 38.791103
| 81
| 0.651031
| false
| 4.030564
| false
| false
| false
|
COSMOGRAIL/COSMOULINE
|
pipe/modules/asciidata/asciiheader.py
|
1
|
10614
|
"""
Various header classes to be part of the asciidata class
@author: Martin Kuemmel, Jonas Haase
@organization: Space Telescope - European Coordinating Facility (ST-ECF)
@license: Gnu Public Licence
@contact: mkuemmel@eso.org
@since: 2005/09/13
$LastChangedBy: mkuemmel $
$LastChangedDate: 2008-01-08 18:13:38 +0100 (Tue, 08 Jan 2008) $
$HeadURL: http://astropy.scipy.org/svn/astrolib/trunk/asciidata/Lib/asciiheader.py $
"""
__version__ = "Version 1.1 $LastChangedRevision: 329 $"
import string
import re
from asciiutils import *
class Header(object):
"""
The header object
This object offers the possibility to store additional
information such as change comments or column information.
This additional information may just be present at the
beginning of the data file or later be added.
"""
def __init__(self, filename=None, comment_char=None):
"""
Constructor for the Header class
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# store the comment_char
self._comment_char = comment_char
# Fullhdata contains the full nonparsed header - probably
# superflupus now
self.Fullhdata = []
# CollInfo is a list of column names extracted from the header
# please note that only is current at readintime and is currently
# not updated when columns are changed
self.CollInfo = []
# SexVectorColls are the known sextractor output parameters which
# come as vectors
self.SexVectorColls = ('MAG_APER','MAGERR_APER','FLUX_RADIUS','FLUX_APER','FLUXERR_APER','VECTOR_SOMFIT','VECTOR_ASSOC','FLUX_GROWTH','VIGNET','VIGNET_SHIFT')
# SExtarctorFlag marks whether sextractorlike header information
# was parsed
self.SExtractorFlag = False
# retrieve the comment from the data file
# hdata is the header minus the column info lines
# in case the header column info is invalid at loading hdata defaults to Fullhdata
if filename == None:
self.hdata = []
else:
self.hdata = self._load_header(filename, comment_char)
# set the number of elements
self._nentry = len(self.hdata)
def __getitem__(self, index):
"""
Defines the list operator for indexing
The method returns the indexed header entry,
if it exists. An error is raised otherwise
@param index: the index of the header entry to be returned
@type index: integer
@return: a header line
@rtype: string
"""
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# return the desired header entry
return self.hdata[index]
def __setitem__(self, index, hentry):
"""
Defines the list operator for indexed assignement
@param element: either column index or name
@type element: string/integer
@param column: the column to assign to an index
@type column: AsciiColumn
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# split the string to lines
hitems = string.split(string.strip(hentry),'\n')
# check whether more than one line
# wants to be added
if len(hitems) > 1:
raise Exception('Only one line can be set!')
# replace the header entry,
# add a newline if necessary
if hentry[-1] != '\n':
self.hdata[index] = hentry + '\n'
else:
self.hdata[index] = hentry
def __delitem__(self, index):
"""
Deletes an index.
@param index: the index of the header item to be deleted
@type index: integer
"""
# check whether the target index exists;
# raise error if not
if index+1 > self._nentry:
err_msg = 'Index: '+str(index)+' does not exist! The header contains '\
+ str(self._nentry) + ' items!'
raise Exception(err_msg)
# delete the column
del self.hdata[index]
# adjust the number of entries
self._nentry -= 1
def __str__(self):
"""
Defines a string method for the object
@return: the string representation
@rtype: string
"""
# start the string
hstring = ''
# add the different items
for line in self.hdata:
if len(line) > 0:
hstring += self._comment_char + line
else:
hstring += self._comment_char + '\n'
# return the string
return hstring
def __iter__(self):
"""
Provide an iterator object.
The function provides and returns an interator object
for the AstroAsciiData class. Due to this iterator object
sequences like:
for column in ascii_data_object:
<do something with column>
are possible.
"""
return AsciiLenGetIter(self)
def __len__(self):
"""
The length operator
@param length: the length of the instance
@type length: integer
"""
# thats rather trivial
length = self._nentry
# return the length
return length
def append(self, hlist):
"""
Append something to the header data
@param hlist: the string to append
@type hlist: string
"""
# split the string to lines
hitems = string.split(hlist,'\n')
# for each line
for item in hitems:
# append the new content
# to the header content
self.hdata.append(item+'\n')
self._nentry += 1
def _load_header(self, filename, comment_char):
"""
Loads the header from the data file
@param filename: the data file
@type filename: string
@param comment_char: the comment_char string
@type comment_char: string
"""
# start the item list
data = []
lastcoll,currcoll =0,0
lastname =''
# Define patterns for some common header formats
commentpattern = re.compile(comment_char)
sextractor_header = re.compile('^#\s*(\d+)\s+([+*-/()\w]+)([^\[]*)(\[\w+\])?(.*)\n')
# open the data file and go over its rows
for line in file(filename, 'r'):
if commentpattern.match(line):
#append everything after the comment_char separator to Fullhdata
line_with_comment_char_stripped_off = commentpattern.sub('',line,count=1)
self.Fullhdata.append(line_with_comment_char_stripped_off)
SEmatch = sextractor_header.match(line)
if SEmatch: #sextractor_header.match(line):
# seems we have a SExtractorheader
if not self.SExtractorFlag:
self.SExtractorFlag = True
groups = SEmatch.groups()
currcoll = int(groups[0])
name = groups[1]
if currcoll <= lastcoll:
#ignore multiple and definitions out of order
continue
if currcoll > (lastcoll +1):
# print currcoll,lastcoll
# we jumped some lines, pad CollInfo
vcounter = 1
while (lastcoll +1) < currcoll:
if lastname in self.SexVectorColls:
self.CollInfo.append({'NAME':lastname+str(vcounter)})
vcounter +=1
else:
self.CollInfo.append(None)
lastcoll +=1
self.CollInfo.append({'NAME':name})
lastcoll = currcoll
lastname = name
if groups[3]:
# a unit was extracted
self.CollInfo[-1]['UNIT'] = str(groups[3].strip('[]'))
if groups[2] or groups[4]:
self.CollInfo[-1]['COMMENT'] =''
self.CollInfo[-1]['COMMENT'] += groups[2].strip()
if groups[2] and groups[4]:
self.CollInfo[-1]['COMMENT'] += ' '
self.CollInfo[-1]['COMMENT'] += groups[4].strip()
else:
data.append(line_with_comment_char_stripped_off)
else:
# leave the file at the first
# non-comment line
break
return data
def reset(self):
"""
Reset the header
"""
self.hdata = []
self._nentry = 0
def set_comment_char(self, comment_char):
"""
Set the comment_char string
@param comment_char: the new comment_char string
@type comment_char: string
"""
self._comment_char = comment_char
def getCollInfo(self,index):
"""
Robustly return column info from header
returns (columnname,unit,comment)
@param index: The column index
@type index: int
"""
#default values
name = 'column' + str(index+1)
unit = None
comment = None
if index < len(self.CollInfo):
if self.CollInfo[index]:
if self.CollInfo[index].has_key('NAME'):
name = str(self.CollInfo[index]['NAME'])
if self.CollInfo[index].has_key('UNIT'):
unit = str(self.CollInfo[index]['UNIT'])
if self.CollInfo[index].has_key('COMMENT'):
comment = str(self.CollInfo[index]['COMMENT'])
else:
# is the very last column in the list a known vector?
if self.CollInfo[-1]['NAME'] in self.SexVectorColls:
name = self.CollInfo[-1]['NAME']+str(index-len(self.CollInfo)+1)
# return name, unit, comment of the column
return name, unit, comment
|
gpl-3.0
| 2,816,544,044,639,738,000
| 31.658462
| 166
| 0.543904
| false
| 4.304136
| false
| false
| false
|
ninapavlich/scout-and-rove
|
scoutandrove/apps/sr/views.py
|
1
|
1246
|
from django.contrib.auth.decorators import login_required
from django.utils.decorators import method_decorator
from django.views.generic.list import ListView
from django.views.generic.detail import DetailView
from django.utils import timezone
from .models import *
class SiteProfileListView(ListView):
model = SiteProfile
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def get_queryset(self):
return self.model.get_profiles_for_user(self.request.user)
class SiteProfileDetailView(DetailView):
model = SiteProfile
@method_decorator(login_required)
def dispatch(self, request, *args, **kwargs):
return super(self.__class__, self).dispatch(request, *args, **kwargs)
def get_context_data(self, **kwargs):
context = super(SiteProfileDetailView, self).get_context_data(**kwargs)
context['settings'] = self.object.get_settings()
context['test_result_sets'] = self.object.get_test_result_sets()
context['can_edit'] = self.object.can_user_edit(self.request.user)
return context
class TestResultSetDetailView(DetailView):
model = TestResultSet
|
mit
| -6,453,812,661,262,494,000
| 32.675676
| 79
| 0.719101
| false
| 3.943038
| false
| false
| false
|
solaris765/PythonProjects
|
rps.py
|
1
|
2268
|
'''
A simple Rock Paper Scissors game
Algorithm:
1. Have the user input r, p, or s
2. Have the computer choose a random integer (1-3)
3. Tell the user the result of the game
Todo:
- Finish mode business
- Detect if a user is repeatedly throwing the same thing and counter that
- Sweet graphics(?)
- ???
- Profit?
'''
#Housekeeping
from random import randint
from random import seed
#from collections import Counter
seed()
a = 1
rock = 0
paper = 0
scissors = 0
playerlist = []
#Defining functions
def increment(x):
#Function to incrememnt global vars based on user input
if x == "r":
global rock
rock = rock+1
elif x == "p":
global paper
paper = paper+1
elif x == "s":
global scissors
scissors = scissors+1
def decon(x):
#Functions to convert x to numbers for math
if x == "r":
x = 1
elif x == "p":
x = 2
elif x == "s":
x = 3
return x
def convert(x):
#Function to convert x to letters for printing
if x == 1:
x = "rock"
elif x == 2:
x = "paper"
elif x == 3:
x = "scissors"
return x
def save_presult(x):
#Function to append x to a list and save to txt
out_file = open("player_rps_results.txt", "wt")
out_file.write(', '.join(playerlist))
out_file.write("\n")
out_file.close()
def get_pmost_common(x):
#Function to read the mode of x from the txt
in_file = open("player_rps_results.txt", "rt")
plist = in_file.read()
in_file.close()
pmc = plist.most_common()
return plist.most_common()
#The important stuff
print("input q to quit")
print("r, p, s")
names = ['rock', 'paper', 'scissors']
while a == 1:
x = str(input("Throw: "))
if x!="r" and x!="p" and x!="s" and x!="q":
continue
elif x == "r" or x == "s" or x == "p":
increment(x)
cpu = randint(1, 3)
player_result = ["ties with", "beats", "loses to"]
result = player_result[(decon(x) - cpu) % 3]
#Print result
print(str(convert(decon((x)))).capitalize() + " " + str(result) + " " + str(convert(cpu)))
elif x == "q":
print("Goodbye")
#Print player results to txt file
save_presult(decon(x))
break
|
unlicense
| 4,312,985,369,166,982,700
| 22.381443
| 98
| 0.571429
| false
| 3.176471
| false
| false
| false
|
PersianWikipedia/pywikibot-core
|
tests/textlib_tests.py
|
1
|
75779
|
# -*- coding: utf-8 -*-
"""Test textlib module."""
#
# (C) Pywikibot team, 2011-2020
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, division, unicode_literals
import codecs
from collections import OrderedDict
import functools
import os
import re
import pywikibot
import pywikibot.textlib as textlib
from pywikibot.site import _IWEntry
from pywikibot.textlib import _MultiTemplateMatchBuilder, extract_sections
from pywikibot.tools import suppress_warnings
from pywikibot import UnknownSite
from tests.aspects import (
unittest, require_modules, TestCase, DefaultDrySiteTestCase,
PatchingTestCase, SiteAttributeTestCase,
)
from tests import mock
files = {}
dirname = os.path.join(os.path.dirname(__file__), 'pages')
for f in ['enwiki_help_editing']:
with codecs.open(os.path.join(dirname, f + '.page'),
'r', 'utf-8') as content:
files[f] = content.read()
class TestSectionFunctions(TestCase):
"""Test wikitext section handling function."""
net = False
def setUp(self):
"""Setup tests."""
self.catresult1 = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
super(TestSectionFunctions, self).setUp()
def contains(self, fn, sn):
"""Invoke does_text_contain_section()."""
return textlib.does_text_contain_section(
files[fn], sn)
def assertContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] contains sn."""
self.assertEqual(self.contains(fn, sn), True, *args, **kwargs)
def assertNotContains(self, fn, sn, *args, **kwargs):
"""Test that files[fn] does not contain sn."""
self.assertEqual(self.contains(fn, sn), False, *args, **kwargs)
def testCurrentBehaviour(self):
"""Test that 'Editing' is found."""
self.assertContains('enwiki_help_editing', 'Editing')
def testSpacesInSection(self):
"""Test with spaces in section."""
self.assertContains('enwiki_help_editing', 'Minor_edits')
self.assertNotContains('enwiki_help_editing', '#Minor edits',
"Incorrect, '#Minor edits' does not work")
self.assertNotContains('enwiki_help_editing', 'Minor Edits',
'section hashes are case-sensitive')
self.assertNotContains('enwiki_help_editing', 'Minor_Edits',
'section hashes are case-sensitive')
@unittest.expectedFailure # TODO: T133276
def test_encoded_chars_in_section(self):
"""Test encoded chars in section."""
self.assertContains(
'enwiki_help_editing', 'Talk_.28discussion.29_pages',
'As used in the TOC')
def test_underline_characters_in_section(self):
"""Test with underline chars in section."""
self.assertContains('enwiki_help_editing', 'Talk_(discussion)_pages',
'Understood by mediawiki')
def test_spaces_outside_section(self):
"""Test with spaces around section."""
self.assertContains('enwiki_help_editing', 'Naming and_moving')
self.assertContains('enwiki_help_editing', ' Naming and_moving ')
self.assertContains('enwiki_help_editing', ' Naming and_moving_')
def test_link_in_section(self):
"""Test with link inside section."""
# section is ==[[Wiki markup]]==
self.assertContains('enwiki_help_editing', '[[Wiki markup]]',
'Link as section header')
self.assertContains('enwiki_help_editing', '[[:Wiki markup]]',
'section header link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Wiki markup',
'section header must be a link')
# section is ===[[:Help]]ful tips===
self.assertContains('enwiki_help_editing', '[[Help]]ful tips',
'Containing link')
self.assertContains('enwiki_help_editing', '[[:Help]]ful tips',
'Containing link with preleading colon')
self.assertNotContains('enwiki_help_editing', 'Helpful tips',
'section header must contain a link')
class TestFormatInterwiki(TestCase):
"""Test format functions."""
family = 'wikipedia'
code = 'en'
cached = True
def test_interwiki_format_Page(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Page(pywikibot.Link('de:German', self.site)),
'fr': pywikibot.Page(pywikibot.Link('fr:French', self.site))
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
def test_interwiki_format_Link(self):
"""Test formatting interwiki links using Page instances."""
interwikis = {
'de': pywikibot.Link('de:German', self.site),
'fr': pywikibot.Link('fr:French', self.site),
}
self.assertEqual('[[de:German]]\n[[fr:French]]\n',
textlib.interwikiFormat(interwikis, self.site))
class TestFormatCategory(DefaultDrySiteTestCase):
"""Test category formatting."""
catresult = '[[Category:Cat1]]\n[[Category:Cat2]]\n'
def test_category_format_raw(self):
"""Test formatting categories as strings formatted as links."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['[[Category:Cat1]]',
'[[Category:Cat2]]'],
self.site))
def test_category_format_bare(self):
"""Test formatting categories as strings."""
self.assertEqual(self.catresult,
textlib.categoryFormat(['Cat1', 'Cat2'], self.site))
def test_category_format_Category(self):
"""Test formatting categories as Category instances."""
data = [pywikibot.Category(self.site, 'Cat1'),
pywikibot.Category(self.site, 'Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
def test_category_format_Page(self):
"""Test formatting categories as Page instances."""
data = [pywikibot.Page(self.site, 'Category:Cat1'),
pywikibot.Page(self.site, 'Category:Cat2')]
self.assertEqual(self.catresult,
textlib.categoryFormat(data, self.site))
class TestCategoryRearrangement(DefaultDrySiteTestCase):
"""
Ensure that sorting keys are not being lost.
Tests .getCategoryLinks() and .replaceCategoryLinks(),
with both a newline and an empty string as separators.
"""
old = ('[[Category:Cat1]]\n[[Category:Cat2|]]\n'
'[[Category:Cat1| ]]\n[[Category:Cat2|key]]')
def test_standard_links(self):
"""Test getting and replacing categories."""
cats = textlib.getCategoryLinks(self.old, site=self.site)
new = textlib.replaceCategoryLinks(self.old, cats, site=self.site)
self.assertEqual(self.old, new)
def test_indentation(self):
"""Test indentation from previous block."""
# Block of text
old = 'Some text\n\n' + self.old
cats = textlib.getCategoryLinks(old, site=self.site)
new = textlib.replaceCategoryLinks(old, cats, site=self.site)
self.assertEqual(old, new)
# DEFAULTSORT
old_ds = '{{DEFAULTSORT:key}}\n' + self.old
cats_ds = textlib.getCategoryLinks(old_ds, site=self.site)
new_ds = textlib.replaceCategoryLinks(old_ds, cats_ds, site=self.site)
self.assertEqual(old_ds, new_ds)
def test_in_place_replace(self):
"""Test in-place category change is reversible."""
dummy = pywikibot.Category(self.site, 'foo')
dummy.sortKey = 'bah'
cats = textlib.getCategoryLinks(self.old, site=self.site)
# Sanity checking
temp = textlib.replaceCategoryInPlace(self.old, cats[0], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[0],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[1], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[1],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[2], dummy,
site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[2],
site=self.site)
self.assertEqual(self.old, new)
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(temp, self.old)
new = textlib.replaceCategoryInPlace(temp, dummy, cats[3],
site=self.site)
self.assertEqual(self.old, new)
# Testing removing categories
temp = textlib.replaceCategoryInPlace(self.old, cats[0],
None, site=self.site)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertNotIn(cats[0], temp_cats)
# First and third categories are the same
self.assertEqual([cats[1], cats[3]], temp_cats)
# Testing adding categories
temp = textlib.replaceCategoryInPlace(
self.old, cats[0], cats[1], site=self.site,
add_only=True)
self.assertNotEqual(temp, self.old)
temp_cats = textlib.getCategoryLinks(temp, site=self.site)
self.assertEqual([cats[0], cats[1], cats[1],
cats[2], cats[1], cats[3]], temp_cats)
new_cats = textlib.getCategoryLinks(new, site=self.site)
self.assertEqual(cats, new_cats)
def test_in_place_retain_sort(self):
"""Test in-place category change does not alter the sortkey."""
# sort key should be retained when the new cat sortKey is None
dummy = pywikibot.Category(self.site, 'foo')
self.assertIsNone(dummy.sortKey)
cats = textlib.getCategoryLinks(self.old, site=self.site)
self.assertEqual(cats[3].sortKey, 'key')
orig_sortkey = cats[3].sortKey
temp = textlib.replaceCategoryInPlace(self.old, cats[3],
dummy, site=self.site)
self.assertNotEqual(self.old, temp)
new_dummy = textlib.getCategoryLinks(temp, site=self.site)[3]
self.assertIsNotNone(new_dummy.sortKey)
self.assertEqual(orig_sortkey, new_dummy.sortKey)
class TestTemplatesInCategory(TestCase):
"""Tests to verify that templates in category links are handled."""
family = 'wikipedia'
code = 'en'
cached = True
def test_templates(self):
"""Test normal templates inside category links."""
self.site = self.get_site()
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|Foo}}|bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:{{P1|{{P2|L33t|Foo}}}}|bar]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}bar]][[Category:Wiki{{P2||pedia}}]]',
self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='bar'),
pywikibot.page.Category(self.site, 'Wikipedia')])
self.assertEqual(textlib.getCategoryLinks(
'[[Category:Foo{{!}}and{{!}}bar]]', self.site, expand_text=True),
[pywikibot.page.Category(self.site, 'Foo', sort_key='and|bar')])
with mock.patch.object(pywikibot, 'warning', autospec=True) as warn:
textlib.getCategoryLinks('[[Category:nasty{{{!}}]]', self.site)
warn.assert_called_once_with(
'Invalid category title extracted: nasty{{{!}}')
class TestTemplateParams(TestCase):
"""Test to verify that template params extraction works."""
net = False
def _common_results(self, func):
"""Common cases."""
self.assertEqual(func('{{a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a}}'), [('a', OrderedDict())])
self.assertEqual(func('{{a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{ a }}'), [('a', OrderedDict())])
self.assertEqual(func('{{a|b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b|c=d}}'),
[('a', OrderedDict((('1', 'b'), ('c', 'd'))))])
self.assertEqual(func('{{a|b=c|f=g|d=e|1=}}'),
[('a', OrderedDict((('b', 'c'), ('f', 'g'),
('d', 'e'), ('1', ''))))])
self.assertEqual(func('{{a|1=2|c=d}}'),
[('a', OrderedDict((('1', '2'), ('c', 'd'))))])
self.assertEqual(func('{{a|c=d|1=2}}'),
[('a', OrderedDict((('c', 'd'), ('1', '2'))))])
self.assertEqual(func('{{a|5=d|a=b}}'),
[('a', OrderedDict((('5', 'd'), ('a', 'b'))))])
self.assertEqual(func('{{a|=2}}'),
[('a', OrderedDict((('', '2'), )))])
self.assertEqual(func('{{a|}}'), [('a', OrderedDict((('1', ''), )))])
self.assertEqual(func('{{a|=|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a||}}'),
[('a', OrderedDict((('1', ''), ('2', ''))))])
self.assertEqual(func('{{a|b={{{1}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'), )))])
self.assertEqual(func('{{a|b=<noinclude>{{{1}}}</noinclude>}}'),
[('a', OrderedDict(
(('b', '<noinclude>{{{1}}}</noinclude>'), )))])
self.assertEqual(func('{{subst:a|b=c}}'),
[('subst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{safesubst:a|b=c}}'),
[('safesubst:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{msgnw:a|b=c}}'),
[('msgnw:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{Template:a|b=c}}'),
[('Template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{template:a|b=c}}'),
[('template:a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{:a|b=c}}'),
[(':a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{subst::a|b=c}}'),
[('subst::a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b={{{1}}}|c={{{2}}}}}'),
[('a', OrderedDict((('b', '{{{1}}}'),
('c', '{{{2}}}'))))])
self.assertEqual(func('{{a|b=c}}{{d|e=f}}'),
[('a', OrderedDict((('b', 'c'), ))),
('d', OrderedDict((('e', 'f'), )))])
# initial '{' and '}' should be ignored as outer wikitext
self.assertEqual(func('{{{a|b}}X}'),
[('a', OrderedDict((('1', 'b'), )))])
# sf.net bug 1575: unclosed template
self.assertEqual(func('{{a'), [])
self.assertEqual(func('{{a}}{{foo|'), [('a', OrderedDict())])
def _unstripped(self, func):
"""Common cases of unstripped results."""
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', '<!--{{{1}}}-->'), )))])
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict(((' ', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict(((' b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b ', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', ' c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c '), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' bar '))))])
# The correct entry 'bar' is removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
# However whitespace prevents the correct item from being removed
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '), (' 2 ', ' bar '),
('2', ' baz '))))])
def _stripped(self, func):
"""Common cases of stripped results."""
self.assertEqual(func('{{a| }}'),
[('a', OrderedDict((('1', ' '), )))])
self.assertEqual(func('{{a| | }}'),
[('a', OrderedDict((('1', ' '), ('2', ' '))))])
self.assertEqual(func('{{a| =|}}'),
[('a', OrderedDict((('', ''), ('1', ''))))])
self.assertEqual(func('{{a| b=c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b =c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b= c}}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a|b=c }}'),
[('a', OrderedDict((('b', 'c'), )))])
self.assertEqual(func('{{a| foo |2= bar }}'),
[('a', OrderedDict((('1', ' foo '), ('2', 'bar'))))])
# 'bar' is always removed
self.assertEqual(func('{{a| foo |2= bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
self.assertEqual(func('{{a| foo | 2 = bar | baz }}'),
[('a', OrderedDict((('1', ' foo '),
('2', ' baz '))))])
def _etp_regex_differs(self, func):
"""Common cases not handled the same by ETP_REGEX."""
# inner {} should be treated as part of the value
self.assertEqual(func('{{a|b={} }}'),
[('a', OrderedDict((('b', '{} '), )))])
def _order_differs(self, func):
"""Common cases where the order of templates differs."""
self.assertCountEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), ))),
('c', OrderedDict())])
self.assertCountEqual(func('{{a|{{c|d}}}}'),
[('c', OrderedDict((('1', 'd'), ))),
('a', OrderedDict([('1', '{{c|d}}')]))])
# inner '}' after {{b|c}} should be treated as wikitext
self.assertCountEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b|c}}}'),
('2', 'd')])),
('b', OrderedDict([('1', 'c')]))])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh(self):
"""Test using mwparserfromhell."""
func = textlib.extract_templates_and_params_mwpfh
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self._etp_regex_differs(func)
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}'), ))),
('a', OrderedDict([('1', '{{c|{{d|}}}}')])),
('d', OrderedDict([('1', '')]))
])
@require_modules('mwparserfromhell')
def test_extract_templates_params_mwpfh_stripped(self):
"""Test using mwparserfromhell with stripping."""
func = functools.partial(textlib.extract_templates_and_params_mwpfh,
strip=True)
self._common_results(func)
self._order_differs(func)
self._stripped(func)
def test_extract_templates_params_regex(self):
"""Test using many complex regexes."""
func = functools.partial(textlib.extract_templates_and_params_regex,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._order_differs(func)
self._unstripped(func)
self.assertEqual(func('{{a|b={} }}'), []) # FIXME: {} is normal text
def test_extract_templates_params_regex_stripped(self):
"""Test using many complex regexes with stripping."""
func = textlib.extract_templates_and_params_regex
self._common_results(func)
self._order_differs(func)
self._stripped(func)
self.assertEqual(func('{{a|b=<!--{{{1}}}-->}}'),
[('a', OrderedDict((('b', ''), )))])
# Identical to mwpfh
self.assertCountEqual(func('{{a|{{c|{{d}}}}}}'),
[('c', OrderedDict((('1', '{{d}}'), ))),
('a', OrderedDict([('1', '{{c|{{d}}}}')])),
('d', OrderedDict())
])
# However fails to correctly handle three levels of balanced brackets
# with empty parameters
self.assertCountEqual(func('{{a|{{c|{{d|}}}}}}'),
[('c', OrderedDict((('1', '{{d|}}}'), ))),
('d', OrderedDict([('1', '}')]))
])
def test_extract_templates_params(self):
"""Test that the normal entry point works."""
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=False)
self._common_results(func)
self._unstripped(func)
func = functools.partial(textlib.extract_templates_and_params,
remove_disabled_parts=False, strip=True)
self._common_results(func)
self._stripped(func)
def test_template_simple_regex(self):
"""Test using simple regex."""
func = textlib.extract_templates_and_params_regex_simple
self._common_results(func)
self._etp_regex_differs(func)
# The simple regex copies the whitespace of mwpfh, but does
# not have additional entries for nested templates.
self.assertEqual(func('{{a| b={{c}}}}'),
[('a', OrderedDict(((' b', '{{c}}'), )))])
self.assertEqual(func('{{a|b={{c}}}}'),
[('a', OrderedDict((('b', '{{c}}'), )))])
self.assertEqual(func('{{a|b= {{c}}}}'),
[('a', OrderedDict((('b', ' {{c}}'), )))])
self.assertEqual(func('{{a|b={{c}} }}'),
[('a', OrderedDict((('b', '{{c}} '), )))])
# These three are from _order_differs, and while the first works
self.assertEqual(func('{{a|{{c}} }}'),
[('a', OrderedDict((('1', '{{c}} '), )))])
# an inner '|' causes extract_template_and_params_regex_simple to
# split arguments incorrectly in the next two cases.
self.assertEqual(func('{{a|{{c|d}} }}'),
[('a', OrderedDict([('1', '{{c'),
('2', 'd}} ')]))])
self.assertEqual(func('{{a|{{b|c}}}|d}}'),
[('a', OrderedDict([('1', '{{b'),
('2', 'c}}}'),
('3', 'd')]))])
# Safe fallback to handle arbitrary template levels
# by merging top level templates together.
# i.e. 'b' is not recognised as a template, and 'foo' is also
# consumed as part of 'a'.
self.assertEqual(func('{{a|{{c|{{d|{{e|}}}} }} }} foo {{b}}'),
[(None, OrderedDict())])
def test_regexes(self):
"""Test _ETP_REGEX, NESTED_TEMPLATE_REGEX and TEMP_REGEX."""
func = textlib._ETP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
func = textlib._ETP_REGEX.match
self.assertIsNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.search
self.assertIsNotNone(func('{{{1}}}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
with suppress_warnings('textlib.TEMP_REGEX is deprecated'):
func = textlib.TEMP_REGEX.match
self.assertIsNotNone(func('{{#if:foo}}'))
self.assertIsNotNone(func('{{foo:}}'))
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNone(func('{{a|b={{c}} }}'))
self.assertIsNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNone(func('{{a|{{c}} }}'))
self.assertIsNone(func('{{a|{{c|d}} }}'))
func = textlib.NESTED_TEMPLATE_REGEX.search
# Numerically named templates are rejected
self.assertIsNone(func('{{1}}'))
self.assertIsNone(func('{{#if:foo}}'))
self.assertIsNone(func('{{{1}}}'))
self.assertIsNone(func('{{{1|}}}'))
self.assertIsNone(func('{{{15|a}}}'))
self.assertIsNone(func('{{{1|{{{2|a}}} }}}'))
self.assertIsNone(func('{{{1|{{2|a}} }}}'))
func = textlib.NESTED_TEMPLATE_REGEX.match
self.assertIsNotNone(func('{{CURRENTYEAR}}'))
self.assertIsNotNone(func('{{foo:bar}}'))
self.assertIsNone(func('{{1}}'))
self.assertIsNotNone(func('{{a|b={{CURRENTYEAR}} }}'))
self.assertIsNotNone(func('{{a|b={{{1}}} }}'))
self.assertIsNotNone(func('{{a|b={{c}} }}'))
self.assertIsNotNone(func('{{a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|b={} }}'))
self.assertIsNotNone(func('{{:a|b={{c|d=1}} }}'))
self.assertIsNotNone(func('{{a|{{c}} }}'))
self.assertIsNotNone(func('{{a|{{c|d}} }}'))
# All templates are captured when template depth is greater than 2
m = func('{{a|{{c|{{d|}} }} | foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
m = func('{{a|\n{{c|{{d|}} }}\n| foo = bar }} foo {{bar}} baz')
self.assertIsNotNone(m)
self.assertIsNotNone(m.group(0))
self.assertIsNone(m.group('name'))
self.assertIsNone(m.group(1))
self.assertIsNone(m.group('params'))
self.assertIsNone(m.group(2))
self.assertIsNotNone(m.group('unhandled_depth'))
self.assertTrue(m.group(0).endswith('foo {{bar}}'))
class TestGenericTemplateParams(PatchingTestCase):
"""Test whether the generic function forwards the call correctly."""
net = False
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_mwpfh')
def extract_mwpfh(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_mwpfh."""
self._text = text
self._args = args
self._mwpfh = True
@PatchingTestCase.patched(textlib, 'extract_templates_and_params_regex')
def extract_regex(self, text, *args, **kwargs):
"""Patched call to extract_templates_and_params_regex."""
self._text = text
self._args = args
self._mwpfh = False
def test_removing_disabled_parts_regex(self):
"""Test removing disabled parts when using the regex variant."""
self.patch(textlib, 'mwparserfromhell', Exception())
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a}}')
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_removing_disabled_parts_mwpfh(self):
"""Test removing disabled parts when using the mwpfh variant."""
textlib.extract_templates_and_params('{{a<!-- -->}}', True)
self.assertEqual(self._text, '{{a}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}', False)
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a<!-- -->}}')
self.assertEqual(self._text, '{{a<!-- -->}}')
self.assertTrue(self._mwpfh)
def test_strip_regex(self):
"""Test stripping values when using the regex variant."""
self.patch(textlib, 'mwparserfromhell', Exception())
textlib.extract_templates_and_params('{{a| foo }}', False, True)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False, False)
self.assertEqual(self._args, (False, False))
self.assertFalse(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', False)
self.assertEqual(self._args, (False, True))
self.assertFalse(self._mwpfh)
@require_modules('mwparserfromhell')
def test_strip_mwpfh(self):
"""Test stripping values when using the mwpfh variant."""
textlib.extract_templates_and_params('{{a| foo }}', None, True)
self.assertEqual(self._args, (True, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}', None, False)
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
textlib.extract_templates_and_params('{{a| foo }}')
self.assertEqual(self._args, (False, ))
self.assertTrue(self._mwpfh)
class TestReplaceLinks(TestCase):
"""Test the replace_links function in textlib."""
sites = {
'wt': {
'family': 'wiktionary',
'code': 'en',
},
'wp': {
'family': 'wikipedia',
'code': 'en',
}
}
dry = True
text = ('Hello [[World]], [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
@classmethod
def setUpClass(cls):
"""Create a fake interwiki cache."""
super(TestReplaceLinks, cls).setUpClass()
# make APISite.interwiki work and prevent it from doing requests
for site in cls.sites.values():
mapping = {}
for iw in cls.sites.values():
mapping[iw['family']] = _IWEntry(True, 'invalid')
mapping[iw['family']]._site = iw['site']
mapping['bug'] = _IWEntry(False, 'invalid')
mapping['bug']._site = UnknownSite('Not a wiki')
mapping['en'] = _IWEntry(True, 'invalid')
mapping['en']._site = site['site']
site['site']._interwikimap._map = mapping
site['site']._interwikimap._site = None # prevent it from loading
cls.wp_site = cls.get_site('wp')
def test_replacements_function(self):
"""Test a dynamic function as the replacements."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
return pywikibot.Link('Homeworld', link.site)
elif link.title.lower() == 'you':
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[Homeworld]], [[how|are]] you? Are you a [[bug:1337]]?')
def test_replacements_once(self):
"""Test dynamic replacement."""
def callback(link, text, groups, rng):
if link.title.lower() == 'you':
self._count += 1
if link.section:
return pywikibot.Link(
'{0}#{1}'
.format(self._count, link.section), link.site)
else:
return pywikibot.Link('{0}'
.format(self._count), link.site)
self._count = 0 # buffer number of found instances
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello [[World]], [[how|are]] [[1#section]]? Are [[2]] a '
'[[bug:1337]]?')
del self._count
def test_unlink_all(self):
"""Test unlinking."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
return False
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello World, are you? Are you a [[bug:1337]]?')
def test_unlink_some(self):
"""Test unlinking only some links."""
self.assertEqual(
textlib.replace_links(self.text, ('World', False), self.wp_site),
'Hello World, [[how|are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
self.assertEqual(
textlib.replace_links('[[User:Namespace|Label]]\n'
'[[User:Namespace#Section|Labelz]]\n'
'[[Nothing]]',
('User:Namespace', False),
self.wp_site),
'Label\nLabelz\n[[Nothing]]')
def test_replace_neighbour(self):
"""Test that it replaces two neighbouring links."""
self.assertEqual(
textlib.replace_links('[[A]][[A]][[C]]',
('A', 'B'),
self.wp_site),
'[[B|A]][[B|A]][[C]]')
def test_replacements_simplify(self):
"""Test a tuple as replacement removing the need for a piped link."""
self.assertEqual(
textlib.replace_links(self.text,
('how', 'are'),
self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_file(self):
"""Test that it respects the namespace."""
self.assertEqual(
textlib.replace_links(
'[[File:Meh.png|thumb|Description of [[fancy]]]] '
'[[Fancy]]...', ('File:Meh.png', 'File:Fancy.png'),
self.wp_site),
'[[File:Fancy.png|thumb|Description of [[fancy]]]] [[Fancy]]...')
def test_replace_strings(self):
"""Test if strings can be used."""
self.assertEqual(
textlib.replace_links(self.text, ('how', 'are'), self.wp_site),
'Hello [[World]], [[are]] [[you#section|you]]? Are [[you]] a '
'[[bug:1337]]?')
def test_replace_invalid_link_text(self):
"""Test that it doesn't pipe a link when it's an invalid link."""
self.assertEqual(
textlib.replace_links('[[Target|Foo:]]', ('Target', 'Foo'),
self.wp_site), '[[Foo|Foo:]]')
def test_replace_modes(self):
"""Test replacing with or without label and section."""
source_text = '[[Foo#bar|baz]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'), self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site, 'Bar')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar', self.wp_site)),
self.wp_site),
'[[Bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu'),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu',
self.wp_site)),
self.wp_site),
'[[Bar#snafu]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar|foo',
self.wp_site)),
self.wp_site),
'[[Bar|foo]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar#snafu|foo'),
self.wp_site), '[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Page(self.wp_site,
'Bar#snafu|foo')),
self.wp_site),
'[[Bar#bar|baz]]')
self.assertEqual(
textlib.replace_links(source_text,
('Foo', pywikibot.Link('Bar#snafu|foo',
self.wp_site)),
self.wp_site),
'[[Bar#snafu|foo]]')
def test_replace_different_case(self):
"""Test that it uses piped links when the case is different."""
source_text = '[[Foo|Bar]] and [[Foo|bar]]'
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wp')),
'[[Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'bar'),
self.get_site('wt')),
'[[bar|Bar]] and [[bar]]')
self.assertEqual(
textlib.replace_links(source_text, ('Foo', 'Bar'),
self.get_site('wt')),
'[[Bar]] and [[Bar|bar]]')
@unittest.expectedFailure
def test_label_diff_namespace(self):
"""Test that it uses the old label when the new doesn't match."""
# These tests require to get the actual part which is before the title
# (interwiki and namespace prefixes) which could be then compared
# case insensitive.
self.assertEqual(
textlib.replace_links('[[Image:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|Image:Foobar]]')
self.assertEqual(
textlib.replace_links('[[en:File:Foobar]]',
('File:Foobar', 'File:Foo'), self.wp_site),
'[[File:Foo|en:File:Foobar]]')
def test_linktrails(self):
"""Test that the linktrails are used or applied."""
self.assertEqual(
textlib.replace_links('[[Foobar]]', ('Foobar', 'Foo'),
self.wp_site),
'[[Foo]]bar')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Talk:Tests'), self.wp_site),
'[[Talk:tests]]')
self.assertEqual(
textlib.replace_links('[[Talk:test]]s',
('Talk:Test', 'Project:Tests'),
self.wp_site),
'[[Project:Tests|Talk:tests]]')
def test_unicode_callback(self):
"""Test returning unicode in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a unicode instance not bytes
return 'homewörlder'
self.assertEqual(
textlib.replace_links(self.text, callback, self.wp_site),
'Hello homewörlder, [[how|are]] [[you#section|you]]? '
'Are [[you]] a [[bug:1337]]?')
def test_bytes_callback(self):
"""Test returning bytes in the callback."""
def callback(link, text, groups, rng):
self.assertEqual(link.site, self.wp_site)
if link.title == 'World':
# This must be a bytes instance not unicode
return b'homeworlder'
self.assertRaisesRegex(
ValueError, r'unicode \(str.*bytes \(str',
textlib.replace_links, self.text, callback, self.wp_site)
def test_replace_interwiki_links(self):
"""Make sure interwiki links can not be replaced."""
link = '[[fr:how]]'
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('how', 'de:are'), self.wp_site),
link)
self.assertEqual(
textlib.replace_links(link, ('de:how', 'de:are'), self.wp_site),
link)
class TestReplaceLinksNonDry(TestCase):
"""Test the replace_links function in textlib non-dry."""
family = 'wikipedia'
code = 'en'
cached = True
def test_replace_interlanguage_links(self):
"""Test replacing interlanguage links."""
link = '[[:fr:how]]'
self.assertEqual(
textlib.replace_links(link, (':fr:how', ':de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('fr:how', 'de:are'),
self.site),
'[[:de:Are|fr:how]]')
self.assertEqual(
textlib.replace_links(link, ('how', ':de:are'),
self.site),
link)
self.assertEqual(
textlib.replace_links(link, (':de:how', ':de:are'),
self.site),
link)
class TestLocalDigits(TestCase):
"""Test to verify that local digits are correctly being handled."""
net = False
def test_to_local(self):
"""Test converting Latin digits to local digits."""
self.assertEqual(textlib.to_local_digits(299792458, 'en'), 299792458)
self.assertEqual(
textlib.to_local_digits(299792458, 'fa'), '۲۹۹۷۹۲۴۵۸')
self.assertEqual(
textlib.to_local_digits(
'299792458 flash', 'fa'), '۲۹۹۷۹۲۴۵۸ flash')
self.assertEqual(
textlib.to_local_digits(
'299792458', 'km'), '២៩៩៧៩២៤៥៨')
class TestReplaceExcept(DefaultDrySiteTestCase):
"""Test to verify the replacements with exceptions are done correctly."""
def test_no_replace(self):
"""Test replacing when the old text does not match."""
self.assertEqual(textlib.replaceExcept('12345678', 'x', 'y', [],
site=self.site),
'12345678')
def test_simple_replace(self):
"""Test replacing without regex."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxxB', 'x', 'y', [],
site=self.site),
'AyyB')
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
site=self.site),
'AyyyB')
def test_regex_replace(self):
"""Test replacing with a regex."""
self.assertEqual(textlib.replaceExcept('A123B', r'\d', r'x', [],
site=self.site),
'AxxxB')
self.assertEqual(textlib.replaceExcept('A123B', r'\d+', r'x', [],
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('A123B',
r'A(\d)2(\d)B', r'A\1x\2B', [],
site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('', r'(a?)', r'\1B', [], site=self.site),
'B')
self.assertEqual(
textlib.replaceExcept('abc', r'x*', r'-', [], site=self.site),
'-a-b-c-')
# This is different from re.sub() as re.sub() doesn't
# allow None groups
self.assertEqual(
textlib.replaceExcept('', r'(a)?', r'\1\1', [], site=self.site),
'')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(\d)2(\d)B', r'A\g<1>x\g<2>B',
[], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(?P<b>\d)B',
r'A\g<a>x\g<b>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\g<2>B', [], site=self.site),
'A1x3B')
self.assertEqual(
textlib.replaceExcept('A123B', r'A(?P<a>\d)2(\d)B',
r'A\g<a>x\2B', [], site=self.site),
'A1x3B')
# test regex with lookbehind.
self.assertEqual(
textlib.replaceExcept('A behindB C', r'(?<=behind)\w',
r'Z', [], site=self.site),
'A behindZ C')
# test regex with lookbehind and groups.
self.assertEqual(
textlib.replaceExcept('A behindB C D', r'(?<=behind)\w( )',
r'\g<1>Z', [], site=self.site),
'A behind ZC D')
# test regex with lookahead.
self.assertEqual(
textlib.replaceExcept('A Bahead C', r'\w(?=ahead)',
r'Z', [], site=self.site),
'A Zahead C')
# test regex with lookahead and groups.
self.assertEqual(
textlib.replaceExcept('A Bahead C D', r'( )\w(?=ahead)',
r'Z\g<1>', [], site=self.site),
'AZ ahead C D')
def test_case_sensitive(self):
"""Test replacing with different case sensitivity."""
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=False,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=False,
site=self.site),
'AxB')
self.assertEqual(textlib.replaceExcept('AxB', 'x', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
self.assertEqual(textlib.replaceExcept('AxB', 'X', 'y', [],
caseInsensitive=True,
site=self.site),
'AyB')
def test_replace_with_marker(self):
"""Test replacing with a marker."""
self.assertEqual(textlib.replaceExcept('AxyxB', 'x', 'y', [],
marker='.',
site=self.site),
'Ayyy.B')
self.assertEqual(textlib.replaceExcept('AxyxB', '1', 'y', [],
marker='.',
site=self.site),
'AxyxB.')
def test_overlapping_replace(self):
"""Test replacing with and without overlap."""
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=False,
site=self.site),
'2121')
self.assertEqual(textlib.replaceExcept('1111', '11', '21', [],
allowoverlap=True,
site=self.site),
'2221')
def test_replace_exception(self):
"""Test replacing not inside a specific regex."""
self.assertEqual(textlib.replaceExcept('123x123', '123', '000', [],
site=self.site),
'000x000')
self.assertEqual(textlib.replaceExcept('123x123', '123', '000',
[re.compile(r'\w123')],
site=self.site),
'000x123')
def test_replace_tags(self):
"""Test replacing not inside various tags."""
self.assertEqual(textlib.replaceExcept('A <!-- x --> B', 'x', 'y',
['comment'], site=self.site),
'A <!-- x --> B')
self.assertEqual(textlib.replaceExcept('\n==x==\n', 'x', 'y',
['header'], site=self.site),
'\n==x==\n')
self.assertEqual(textlib.replaceExcept('\n<!--'
'\ncomment-->==x==<!--comment'
'\n-->\n', 'x', 'y',
['header'], site=self.site),
'\n<!--\ncomment-->==x==<!--comment\n-->\n')
self.assertEqual(textlib.replaceExcept('<pre>x</pre>', 'x', 'y',
['pre'], site=self.site),
'<pre>x</pre>')
self.assertEqual(textlib.replaceExcept('<nowiki >x</nowiki >x',
'x', 'y', ['nowiki'],
site=self.site),
'<nowiki >x</nowiki >y') # T191559
self.assertEqual(textlib.replaceExcept('<source lang="xml">x</source>',
'x', 'y', ['source'],
site=self.site),
'<source lang="xml">x</source>')
self.assertEqual(textlib.replaceExcept('<source>x</source>',
'x', 'y', ['source'],
site=self.site),
'<source>x</source>')
self.assertEqual(textlib.replaceExcept(
'<syntaxhighlight lang="xml">x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight lang="xml">x</syntaxhighlight>')
self.assertEqual(
textlib.replaceExcept('<syntaxhighlight>x</syntaxhighlight>',
'x', 'y', ['source'], site=self.site),
'<syntaxhighlight>x</syntaxhighlight>')
self.assertEqual(textlib.replaceExcept('<includeonly>x</includeonly>',
'x', 'y', ['includeonly'],
site=self.site),
'<includeonly>x</includeonly>')
self.assertEqual(textlib.replaceExcept('<ref>x</ref>', 'x', 'y',
['ref'], site=self.site),
'<ref>x</ref>')
self.assertEqual(textlib.replaceExcept('<ref name="x">A</ref>',
'x', 'y',
['ref'], site=self.site),
'<ref name="x">A</ref>')
self.assertEqual(textlib.replaceExcept(' xA ', 'x', 'y',
['startspace'], site=self.site),
' xA ')
self.assertEqual(textlib.replaceExcept(':xA ', 'x', 'y',
['startcolon'], site=self.site),
':xA ')
self.assertEqual(textlib.replaceExcept('<table>x</table>', 'x', 'y',
['table'], site=self.site),
'<table>x</table>')
self.assertEqual(textlib.replaceExcept('x [http://www.sample.com x]',
'x', 'y', ['hyperlink'],
site=self.site),
'y [http://www.sample.com y]')
self.assertEqual(textlib.replaceExcept(
'x http://www.sample.com/x.html', 'x', 'y',
['hyperlink'], site=self.site), 'y http://www.sample.com/x.html')
self.assertEqual(textlib.replaceExcept('<gallery>x</gallery>',
'x', 'y', ['gallery'],
site=self.site),
'<gallery>x</gallery>')
self.assertEqual(textlib.replaceExcept('[[x]]', 'x', 'y', ['link'],
site=self.site),
'[[x]]')
self.assertEqual(textlib.replaceExcept('{{#property:p171}}', '1', '2',
['property'], site=self.site),
'{{#property:p171}}')
self.assertEqual(textlib.replaceExcept('{{#invoke:x}}', 'x', 'y',
['invoke'], site=self.site),
'{{#invoke:x}}')
self.assertEqual(
textlib.replaceExcept(
'<ref name=etwa /> not_in_ref <ref> in_ref </ref>',
'not_in_ref', 'text', ['ref'], site=self.site),
'<ref name=etwa /> text <ref> in_ref </ref>')
self.assertEqual(
textlib.replaceExcept(
'<ab> content </a>', 'content', 'text', ['a'], site=self.site),
'<ab> text </a>')
def test_replace_with_count(self):
"""Test replacing with count argument."""
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=5),
'y [[y]] y y')
self.assertEqual(textlib.replaceExcept('x [[x]] x x', 'x', 'y', [],
site=self.site, count=2),
'y [[y]] x x')
self.assertEqual(textlib.replaceExcept(
'x [[x]] x x', 'x', 'y', ['link'], site=self.site, count=2),
'y [[x]] y x')
def test_replace_tag_category(self):
"""Test replacing not inside category links."""
for ns_name in self.site.namespaces[14]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['category'],
site=self.site),
'[[{}:x]]'.format(ns_name))
def test_replace_tag_file(self):
"""Test replacing not inside file links."""
for ns_name in self.site.namespaces[6]:
self.assertEqual(textlib.replaceExcept('[[{}:x]]'.format(ns_name),
'x', 'y', ['file'],
site=self.site),
'[[{}:x]]'.format(ns_name))
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|foo|bar x]] x',
'x', 'y', ['file'], site=self.site),
'[[File:x|foo|bar x]] y')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|]][[File:x|foo]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|]][[File:x|foo]]')
self.assertEqual(
textlib.replaceExcept(
'[[NonFile:x]]',
'x', 'y', ['file'], site=self.site),
'[[NonFile:y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:]]',
'File:', 'NonFile:', ['file'], site=self.site),
'[[File:]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:x|[[foo]].]]',
'x', 'y', ['file'], site=self.site),
'[[File:x|[[foo]].]]')
# ensure only links inside file are captured
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]][[bar]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]][[bar]].x]][[y]]')
# Correctly handle single brackets in the text.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [bar].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [bar].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[bar] [[foo]] .x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[bar] [[foo]] .x]][[y]]')
def test_replace_tag_file_invalid(self):
"""Test replacing not inside file links with invalid titles."""
# Correctly handle [ and ] inside wikilinks inside file link
# even though these are an invalid title.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid ]].x]][[y]]')
@unittest.expectedFailure
def test_replace_tag_file_failure(self):
"""Test showing limits of the file link regex."""
# When the double brackets are unbalanced, the regex
# does not correctly detect the end of the file link.
self.assertEqual(
textlib.replaceExcept(
'[[File:a|[[foo]] [[bar [[invalid ]].x]][[x]]',
'x', 'y', ['file'], site=self.site),
'[[File:a|[[foo]] [[bar [invalid] ]].x]][[y]]')
def test_replace_tags_interwiki(self):
"""Test replacing not inside interwiki links."""
if ('es' not in self.site.family.langs
or 'ey' in self.site.family.langs):
raise unittest.SkipTest("family {} doesn't have languages"
.format(self.site))
self.assertEqual(textlib.replaceExcept('[[es:s]]', 's', 't',
['interwiki'], site=self.site),
'[[es:s]]') # "es" is a valid interwiki code
self.assertEqual(textlib.replaceExcept('[[ex:x]]', 'x', 'y',
['interwiki'], site=self.site),
'[[ey:y]]') # "ex" is not a valid interwiki code
def test_replace_template(self):
"""Test replacing not inside templates."""
template_sample = (r'a {{templatename '
r' | accessdate={{Fecha|1993}} '
r' |atitle=The [[real title]] }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{a}}2{{a}} '
r' | 2={{a}}1{{a}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
template_sample = (r'a {{templatename '
r' | 1={{{a}}}2{{{a}}} '
r' | 2={{{a}}}1{{{a}}} }}')
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
# sf.net bug 1575: unclosed template
template_sample = template_sample[:-2]
self.assertEqual(textlib.replaceExcept(template_sample, 'a', 'X',
['template'], site=self.site),
'X' + template_sample[1:])
def test_replace_source_reference(self):
"""Test replacing in text which contains back references."""
# Don't use a valid reference number in the original string,
# in case it tries to apply that as a reference.
self.assertEqual(textlib.replaceExcept(r'\42', r'^(.*)$', r'X\1X',
[], site=self.site),
r'X\42X')
self.assertEqual(textlib.replaceExcept(
r'\g<bar>', r'^(?P<foo>.*)$', r'X\g<foo>X', [], site=self.site),
r'X\g<bar>X')
class TestMultiTemplateMatchBuilder(DefaultDrySiteTestCase):
"""Test _MultiTemplateMatchBuilder."""
@classmethod
def setUpClass(cls):
"""Cache namespace 10 (Template) case sensitivity."""
super(TestMultiTemplateMatchBuilder, cls).setUpClass()
cls._template_not_case_sensitive = (
cls.get_site().namespaces.TEMPLATE.case != 'case-sensitive')
def test_no_match(self):
"""Test text without any desired templates."""
string = 'The quick brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNone(re.search(builder.pattern('quick'), string))
def test_match(self):
"""Test text with one match without parameters."""
string = 'The {{quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_with_params(self):
"""Test text with one match with parameters."""
string = 'The {{quick|brown}} fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_msg(self):
"""Test text with {{msg:..}}."""
string = 'The {{msg:quick}} brown fox'
builder = _MultiTemplateMatchBuilder(self.site)
self.assertIsNotNone(re.search(builder.pattern('quick'), string))
self.assertEqual(bool(re.search(builder.pattern('Quick'), string)),
self._template_not_case_sensitive)
def test_match_template_prefix(self):
"""Test pages with {{template:..}}."""
string = 'The {{%s:%s}} brown fox'
template = 'template'
builder = _MultiTemplateMatchBuilder(self.site)
if self._template_not_case_sensitive:
quick_list = ('quick', 'Quick')
else:
quick_list = ('quick', )
for t in (template.upper(), template.lower(), template.title()):
for q in quick_list:
self.assertIsNotNone(re.search(builder.pattern('quick'),
string % (t, q)))
self.assertEqual(bool(re.search(builder.pattern('Quick'),
string % (t, q))),
self._template_not_case_sensitive)
class TestGetLanguageLinks(SiteAttributeTestCase):
"""Test L{textlib.getLanguageLinks} function."""
sites = {
'enwp': {
'family': 'wikipedia',
'code': 'en',
},
'dewp': {
'family': 'wikipedia',
'code': 'de',
},
'commons': {
'family': 'commons',
'code': 'commons',
},
}
example_text = ('[[en:Site]] [[de:Site|Piped]] [[commons:Site]] '
'[[baden:Site]] [[fr:{{PAGENAME}}]]')
@classmethod
def setUpClass(cls):
"""Define set of valid targets for the example text."""
super(TestGetLanguageLinks, cls).setUpClass()
cls.sites_set = {cls.enwp, cls.dewp}
def test_getLanguageLinks(self, key):
"""Test if the function returns the correct titles and sites."""
with mock.patch('pywikibot.output') as m:
lang_links = textlib.getLanguageLinks(self.example_text,
self.site)
m.assert_called_once_with(
'[getLanguageLinks] Text contains invalid interwiki link '
'[[fr:{{PAGENAME}}]].')
self.assertEqual({page.title() for page in lang_links.values()},
{'Site'})
self.assertEqual(set(lang_links), self.sites_set - {self.site})
class TestUnescape(TestCase):
"""Test to verify that unescaping HTML chars are correctly done."""
net = False
def test_unescape(self):
"""Test unescaping HTML chars."""
self.assertEqual(textlib.unescape('!23<>'"&&'),
'!23<>\'"&&')
class TestStarList(TestCase):
"""Test starlist."""
net = False
def test_basic(self):
"""Test standardizing {{linkfa}} without parameters."""
self.assertEqual(
'foo\n{{linkfa}}\nbar\n\n',
textlib.standardize_stars('foo\n{{linkfa}}\nbar'))
def test_with_params(self):
"""Test standardizing text with {{linkfa|...}}."""
self.assertEqual(
'foo\nbar\n\n{{linkfa|...}}\n',
textlib.standardize_stars('foo\n{{linkfa|...}}\nbar'))
def test_with_sorting_params(self):
"""Test standardizing text with sorting parameters."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.standardize_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_get_stars(self):
"""Test get_starts method."""
self.assertEqual(
['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}'],
textlib.get_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}'))
def test_remove_stars(self):
"""Test remove_stars method."""
self.assertEqual(
'foo\n{{linkfa|en}}\n{{linkfa|fr}}\n{{linkfa|bar}}',
textlib.remove_stars(
'foo\n{{linkfa|en}}\n{{linkfa|de}}\n'
'{{linkfa|fr}}\n{{linkfa|bar}}', ['{{linkfa|de}}\n']))
def test_append_stars(self):
"""Test append_stars method."""
self.assertEqual(
'foo\n\n{{linkfa|bar}}\n{{linkfa|de}}\n'
'{{linkfa|en}}\n{{linkfa|fr}}\n',
textlib.append_stars(
'foo', ['{{linkfa|en}}\n', '{{linkfa|de}}\n',
'{{linkfa|fr}}\n', '{{linkfa|bar}}']))
class TestExtractSections(DefaultDrySiteTestCase):
"""Test the extract_sections function."""
def test_no_sections_no_footer(self):
"""Test for text having no sections or footer."""
self.assertEqual(
extract_sections('text', self.site),
('text', [], '')
)
def test_no_sections_with_footer(self):
"""Test for text having footer but no section."""
self.assertEqual(
extract_sections('text\n\n[[Category:A]]', self.site),
('text\n\n', [], '[[Category:A]]')
)
def test_with_section_no_footer(self):
"""Test for text having sections but no footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content',
self.site),
('text\n\n', [('==title==', '\ncontent')], '')
)
def test_with_section_with_footer(self):
"""Test for text having sections and footer."""
self.assertEqual(
extract_sections(
'text\n\n'
'==title==\n'
'content\n'
'[[Category:A]]\n',
self.site),
('text\n\n', [('==title==', '\ncontent\n')], '[[Category:A]]\n')
)
def test_with_h1_and_h2_sections(self):
"""Test for text having h1 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'=first level=\n'
'foo\n'
'==title==\n'
'bar',
self.site),
('text\n\n',
[('=first level=', '\nfoo\n'), ('==title==', '\nbar')],
'')
)
def test_with_h4_and_h2_sections(self):
"""Test for text having h4 and h2 sections."""
self.assertEqual(
extract_sections(
'text\n\n'
'====title====\n'
'==title 2==\n'
'content',
self.site),
('text\n\n',
[('====title====', '\n'), ('==title 2==', '\ncontent')],
'')
)
def test_long_comment(self):
r"""Test for text having a long expanse of white space.
This is to catch certain regex issues caused by patterns like
r'(\s+)*$' (as found in older versions of extract_section).
They may not halt.
c.f.
https://www.regular-expressions.info/catastrophic.html
"""
text = '<!-- -->'
self.assertEqual(
extract_sections(text, self.site),
(text, [], '')
)
if __name__ == '__main__': # pragma: no cover
try:
unittest.main()
except SystemExit:
pass
|
mit
| -4,886,598,638,372,700,000
| 41.743228
| 79
| 0.479054
| false
| 4.052705
| true
| false
| false
|
mwclient/mwclient
|
mwclient/errors.py
|
1
|
2408
|
class MwClientError(RuntimeError):
pass
class MediaWikiVersionError(MwClientError):
pass
class APIDisabledError(MwClientError):
pass
class MaximumRetriesExceeded(MwClientError):
pass
class APIError(MwClientError):
def __init__(self, code, info, kwargs):
self.code = code
self.info = info
super(APIError, self).__init__(code, info, kwargs)
class InsufficientPermission(MwClientError):
pass
class UserBlocked(InsufficientPermission):
pass
class EditError(MwClientError):
pass
class ProtectedPageError(EditError, InsufficientPermission):
def __init__(self, page, code=None, info=None):
self.page = page
self.code = code
self.info = info
def __str__(self):
if self.info is not None:
return self.info
return 'You do not have the "edit" right.'
class FileExists(EditError):
pass
class LoginError(MwClientError):
def __init__(self, site, code, info):
super(LoginError, self).__init__(
site,
{'result': code, 'reason': info} # For backwards-compability
)
self.site = site
self.code = code
self.info = info
def __str__(self):
return self.info
class OAuthAuthorizationError(LoginError):
pass
class AssertUserFailedError(MwClientError):
def __init__(self):
super(AssertUserFailedError, self).__init__((
'By default, mwclient protects you from accidentally editing '
'without being logged in. If you actually want to edit without '
'logging in, you can set force_login on the Site object to False.'
))
def __str__(self):
return self.args[0]
class EmailError(MwClientError):
pass
class NoSpecifiedEmail(EmailError):
pass
class NoWriteApi(MwClientError):
pass
class InvalidResponse(MwClientError):
def __init__(self, response_text=None):
super(InvalidResponse, self).__init__((
'Did not get a valid JSON response from the server. Check that '
'you used the correct hostname. If you did, the server might '
'be wrongly configured or experiencing temporary problems.'),
response_text
)
self.response_text = response_text
def __str__(self):
return self.args[0]
class InvalidPageTitle(MwClientError):
pass
|
mit
| 992,671,874,082,866,000
| 20.122807
| 78
| 0.634136
| false
| 4.060708
| false
| false
| false
|
demisto/content
|
Packs/Base/Scripts/CommonServerPython/CommonServerPython.py
|
1
|
291007
|
"""Common functions script
This script will be appended to each server script before being executed.
Please notice that to add custom common code, add it to the CommonServerUserPython script.
Note that adding code to CommonServerUserPython can override functions in CommonServerPython
"""
from __future__ import print_function
import base64
import json
import logging
import os
import re
import socket
import sys
import time
import traceback
from random import randint
import xml.etree.cElementTree as ET
from collections import OrderedDict
from datetime import datetime, timedelta
from abc import abstractmethod
from distutils.version import LooseVersion
from threading import Lock
import demistomock as demisto
import warnings
class WarningsHandler(object):
# Wrapper to handle warnings. We use a class to cleanup after execution
@staticmethod
def handle_warning(message, category, filename, lineno, file=None, line=None):
try:
msg = warnings.formatwarning(message, category, filename, lineno, line)
demisto.info("python warning: " + msg)
except Exception:
# ignore the warning if it can't be handled for some reason
pass
def __init__(self):
self.org_handler = warnings.showwarning
warnings.showwarning = WarningsHandler.handle_warning
def __del__(self):
warnings.showwarning = self.org_handler
_warnings_handler = WarningsHandler()
# ignore warnings from logging as a result of not being setup
logging.raiseExceptions = False
# imports something that can be missed from docker image
try:
import requests
from requests.adapters import HTTPAdapter
from urllib3.util import Retry
from typing import Optional, Dict, List, Any, Union, Set
import dateparser
from datetime import timezone # type: ignore
except Exception:
if sys.version_info[0] < 3:
# in python 2 an exception in the imports might still be raised even though it is caught.
# for more info see https://cosmicpercolator.com/2016/01/13/exception-leaks-in-python-2-and-3/
sys.exc_clear()
CONTENT_RELEASE_VERSION = '0.0.0'
CONTENT_BRANCH_NAME = 'master'
IS_PY3 = sys.version_info[0] == 3
STIX_PREFIX = "STIX "
# pylint: disable=undefined-variable
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
if IS_PY3:
STRING_TYPES = (str, bytes) # type: ignore
STRING_OBJ_TYPES = (str,)
else:
STRING_TYPES = (str, unicode) # type: ignore # noqa: F821
STRING_OBJ_TYPES = STRING_TYPES # type: ignore
# pylint: enable=undefined-variable
# DEPRECATED - use EntryType enum instead
entryTypes = {
'note': 1,
'downloadAgent': 2,
'file': 3,
'error': 4,
'pinned': 5,
'userManagement': 6,
'image': 7,
'playgroundError': 8,
'entryInfoFile': 9,
'warning': 11,
'map': 15,
'widget': 17
}
ENDPOINT_STATUS_OPTIONS = [
'Online',
'Offline'
]
ENDPOINT_ISISOLATED_OPTIONS = [
'Yes',
'No',
'Pending isolation',
'Pending unisolation'
]
class EntryType(object):
"""
Enum: contains all the entry types (e.g. NOTE, ERROR, WARNING, FILE, etc.)
:return: None
:rtype: ``None``
"""
NOTE = 1
DOWNLOAD_AGENT = 2
FILE = 3
ERROR = 4
PINNED = 5
USER_MANAGEMENT = 6
IMAGE = 7
PLAYGROUND_ERROR = 8
ENTRY_INFO_FILE = 9
WARNING = 11
MAP_ENTRY_TYPE = 15
WIDGET = 17
class IncidentStatus(object):
"""
Enum: contains all the incidents status types (e.g. pending, active, done, archive)
:return: None
:rtype: ``None``
"""
PENDING = 0
ACTIVE = 1
DONE = 2
ARCHIVE = 3
class IncidentSeverity(object):
"""
Enum: contains all the incident severity types
:return: None
:rtype: ``None``
"""
UNKNOWN = 0
INFO = 0.5
LOW = 1
MEDIUM = 2
HIGH = 3
CRITICAL = 4
# DEPRECATED - use EntryFormat enum instead
formats = {
'html': 'html',
'table': 'table',
'json': 'json',
'text': 'text',
'dbotResponse': 'dbotCommandResponse',
'markdown': 'markdown'
}
class EntryFormat(object):
"""
Enum: contains all the entry formats (e.g. HTML, TABLE, JSON, etc.)
"""
HTML = 'html'
TABLE = 'table'
JSON = 'json'
TEXT = 'text'
DBOT_RESPONSE = 'dbotCommandResponse'
MARKDOWN = 'markdown'
@classmethod
def is_valid_type(cls, _type):
# type: (str) -> bool
return _type in (
EntryFormat.HTML,
EntryFormat.TABLE,
EntryFormat.JSON,
EntryFormat.TEXT,
EntryFormat.MARKDOWN,
EntryFormat.DBOT_RESPONSE
)
brands = {
'xfe': 'xfe',
'vt': 'virustotal',
'wf': 'WildFire',
'cy': 'cylance',
'cs': 'crowdstrike-intel'
}
providers = {
'xfe': 'IBM X-Force Exchange',
'vt': 'VirusTotal',
'wf': 'WildFire',
'cy': 'Cylance',
'cs': 'CrowdStrike'
}
thresholds = {
'xfeScore': 4,
'vtPositives': 10,
'vtPositiveUrlsForIP': 30
}
class DBotScoreType(object):
"""
Enum: contains all the indicator types
DBotScoreType.IP
DBotScoreType.FILE
DBotScoreType.DOMAIN
DBotScoreType.URL
DBotScoreType.CVE
DBotScoreType.ACCOUNT
DBotScoreType.CRYPTOCURRENCY
DBotScoreType.EMAIL
:return: None
:rtype: ``None``
"""
IP = 'ip'
FILE = 'file'
DOMAIN = 'domain'
URL = 'url'
CVE = 'cve'
ACCOUNT = 'account'
CIDR = 'cidr',
DOMAINGLOB = 'domainglob'
CERTIFICATE = 'certificate'
CRYPTOCURRENCY = 'cryptocurrency'
EMAIL = 'email'
def __init__(self):
# required to create __init__ for create_server_docs.py purpose
pass
@classmethod
def is_valid_type(cls, _type):
# type: (str) -> bool
return _type in (
DBotScoreType.IP,
DBotScoreType.FILE,
DBotScoreType.DOMAIN,
DBotScoreType.URL,
DBotScoreType.CVE,
DBotScoreType.ACCOUNT,
DBotScoreType.CIDR,
DBotScoreType.DOMAINGLOB,
DBotScoreType.CERTIFICATE,
DBotScoreType.CRYPTOCURRENCY,
DBotScoreType.EMAIL,
)
class DBotScoreReliability(object):
"""
Enum: Source reliability levels
Values are case sensitive
:return: None
:rtype: ``None``
"""
A_PLUS = 'A+ - 3rd party enrichment'
A = 'A - Completely reliable'
B = 'B - Usually reliable'
C = 'C - Fairly reliable'
D = 'D - Not usually reliable'
E = 'E - Unreliable'
F = 'F - Reliability cannot be judged'
def __init__(self):
# required to create __init__ for create_server_docs.py purpose
pass
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in (
DBotScoreReliability.A_PLUS,
DBotScoreReliability.A,
DBotScoreReliability.B,
DBotScoreReliability.C,
DBotScoreReliability.D,
DBotScoreReliability.E,
DBotScoreReliability.F,
)
@staticmethod
def get_dbot_score_reliability_from_str(reliability_str):
if reliability_str == DBotScoreReliability.A_PLUS:
return DBotScoreReliability.A_PLUS
elif reliability_str == DBotScoreReliability.A:
return DBotScoreReliability.A
elif reliability_str == DBotScoreReliability.B:
return DBotScoreReliability.B
elif reliability_str == DBotScoreReliability.C:
return DBotScoreReliability.C
elif reliability_str == DBotScoreReliability.D:
return DBotScoreReliability.D
elif reliability_str == DBotScoreReliability.E:
return DBotScoreReliability.E
elif reliability_str == DBotScoreReliability.F:
return DBotScoreReliability.F
raise Exception("Please use supported reliability only.")
INDICATOR_TYPE_TO_CONTEXT_KEY = {
'ip': 'Address',
'email': 'Address',
'url': 'Data',
'domain': 'Name',
'cve': 'ID',
'md5': 'file',
'sha1': 'file',
'sha256': 'file',
'crc32': 'file',
'sha512': 'file',
'ctph': 'file',
'ssdeep': 'file'
}
class FeedIndicatorType(object):
"""Type of Indicator (Reputations), used in TIP integrations"""
Account = "Account"
CVE = "CVE"
Domain = "Domain"
DomainGlob = "DomainGlob"
Email = "Email"
File = "File"
FQDN = "Domain"
Host = "Host"
IP = "IP"
CIDR = "CIDR"
IPv6 = "IPv6"
IPv6CIDR = "IPv6CIDR"
Registry = "Registry Key"
SSDeep = "ssdeep"
URL = "URL"
@staticmethod
def is_valid_type(_type):
return _type in (
FeedIndicatorType.Account,
FeedIndicatorType.CVE,
FeedIndicatorType.Domain,
FeedIndicatorType.DomainGlob,
FeedIndicatorType.Email,
FeedIndicatorType.File,
FeedIndicatorType.Host,
FeedIndicatorType.IP,
FeedIndicatorType.CIDR,
FeedIndicatorType.IPv6,
FeedIndicatorType.IPv6CIDR,
FeedIndicatorType.Registry,
FeedIndicatorType.SSDeep,
FeedIndicatorType.URL
)
@staticmethod
def list_all_supported_indicators():
indicator_types = []
for key, val in vars(FeedIndicatorType).items():
if not key.startswith('__') and type(val) == str:
indicator_types.append(val)
return indicator_types
@staticmethod
def ip_to_indicator_type(ip):
"""Returns the indicator type of the input IP.
:type ip: ``str``
:param ip: IP address to get it's indicator type.
:rtype: ``str``
:return:: Indicator type from FeedIndicatorType, or None if invalid IP address.
"""
if re.match(ipv4cidrRegex, ip):
return FeedIndicatorType.CIDR
elif re.match(ipv4Regex, ip):
return FeedIndicatorType.IP
elif re.match(ipv6cidrRegex, ip):
return FeedIndicatorType.IPv6CIDR
elif re.match(ipv6Regex, ip):
return FeedIndicatorType.IPv6
else:
return None
@staticmethod
def indicator_type_by_server_version(indicator_type):
"""Returns the indicator type of the input by the server version.
If the server version is 6.2 and greater, remove the STIX prefix of the type
:type indicator_type: ``str``
:param indicator_type: Type of an indicator.
:rtype: ``str``
:return:: Indicator type .
"""
if is_demisto_version_ge("6.2.0") and indicator_type.startswith(STIX_PREFIX):
return indicator_type[len(STIX_PREFIX):]
return indicator_type
# -------------------------------- Threat Intel Objects ----------------------------------- #
class ThreatIntel:
"""
XSOAR Threat Intel Objects
:return: None
:rtype: ``None``
"""
class ObjectsNames(object):
"""
Enum: Threat Intel Objects names.
:return: None
:rtype: ``None``
"""
CAMPAIGN = 'Campaign'
ATTACK_PATTERN = 'Attack Pattern'
REPORT = 'Report'
MALWARE = 'Malware'
COURSE_OF_ACTION = 'Course of Action'
INTRUSION_SET = 'Intrusion Set'
TOOL = 'Tool'
class ObjectsScore(object):
"""
Enum: Threat Intel Objects Score.
:return: None
:rtype: ``None``
"""
CAMPAIGN = 3
ATTACK_PATTERN = 2
REPORT = 3
MALWARE = 3
COURSE_OF_ACTION = 0
INTRUSION_SET = 3
TOOL = 2
class KillChainPhases(object):
"""
Enum: Kill Chain Phases names.
:return: None
:rtype: ``None``
"""
BUILD_CAPABILITIES = "Build Capabilities"
PRIVILEGE_ESCALATION = "Privilege Escalation"
ADVERSARY_OPSEC = "Adversary Opsec"
CREDENTIAL_ACCESS = "Credential Access"
EXFILTRATION = "Exfiltration"
LATERAL_MOVEMENT = "Lateral Movement"
DEFENSE_EVASION = "Defense Evasion"
PERSISTENCE = "Persistence"
COLLECTION = "Collection"
IMPACT = "Impact"
INITIAL_ACCESS = "Initial Access"
DISCOVERY = "Discovery"
EXECUTION = "Execution"
INSTALLATION = "Installation"
DELIVERY = "Delivery"
WEAPONIZATION = "Weaponization"
ACT_ON_OBJECTIVES = "Actions on Objectives"
COMMAND_AND_CONTROL = "Command \u0026 Control"
def is_debug_mode():
"""Return if this script/command was passed debug-mode=true option
:return: true if debug-mode is enabled
:rtype: ``bool``
"""
# use `hasattr(demisto, 'is_debug')` to ensure compatibility with server version <= 4.5
return hasattr(demisto, 'is_debug') and demisto.is_debug
def get_schedule_metadata(context):
"""
Get the entry schedule metadata if available
:type context: ``dict``
:param context: Context in which the command was executed.
:return: Dict with metadata of scheduled entry
:rtype: ``dict``
"""
schedule_metadata = {}
parent_entry = context.get('ParentEntry', {})
if parent_entry:
schedule_metadata = assign_params(
is_polling=True if parent_entry.get('polling') else False,
polling_command=parent_entry.get('pollingCommand'),
polling_args=parent_entry.get('pollingArgs'),
times_ran=int(parent_entry.get('timesRan', 0)) + 1,
start_date=parent_entry.get('startDate'),
end_date=parent_entry.get('endingDate')
)
return schedule_metadata
def auto_detect_indicator_type(indicator_value):
"""
Infer the type of the indicator.
:type indicator_value: ``str``
:param indicator_value: The indicator whose type we want to check. (required)
:return: The type of the indicator.
:rtype: ``str``
"""
try:
import tldextract
except Exception:
raise Exception("Missing tldextract module, In order to use the auto detect function please use a docker"
" image with it installed such as: demisto/jmespath")
if re.match(ipv4cidrRegex, indicator_value):
return FeedIndicatorType.CIDR
if re.match(ipv6cidrRegex, indicator_value):
return FeedIndicatorType.IPv6CIDR
if re.match(ipv4Regex, indicator_value):
return FeedIndicatorType.IP
if re.match(ipv6Regex, indicator_value):
return FeedIndicatorType.IPv6
if re.match(sha256Regex, indicator_value):
return FeedIndicatorType.File
if re.match(urlRegex, indicator_value):
return FeedIndicatorType.URL
if re.match(md5Regex, indicator_value):
return FeedIndicatorType.File
if re.match(sha1Regex, indicator_value):
return FeedIndicatorType.File
if re.match(emailRegex, indicator_value):
return FeedIndicatorType.Email
if re.match(cveRegex, indicator_value):
return FeedIndicatorType.CVE
if re.match(sha512Regex, indicator_value):
return FeedIndicatorType.File
try:
tldextract_version = tldextract.__version__
if LooseVersion(tldextract_version) < '3.0.0':
no_cache_extract = tldextract.TLDExtract(cache_file=False, suffix_list_urls=None)
else:
no_cache_extract = tldextract.TLDExtract(cache_dir=False, suffix_list_urls=None)
if no_cache_extract(indicator_value).suffix:
if '*' in indicator_value:
return FeedIndicatorType.DomainGlob
return FeedIndicatorType.Domain
except Exception:
demisto.debug('tldextract failed to detect indicator type. indicator value: {}'.format(indicator_value))
demisto.debug('Failed to detect indicator type. Indicator value: {}'.format(indicator_value))
return None
def handle_proxy(proxy_param_name='proxy', checkbox_default_value=False, handle_insecure=True,
insecure_param_name=None):
"""
Handle logic for routing traffic through the system proxy.
Should usually be called at the beginning of the integration, depending on proxy checkbox state.
Additionally will unset env variables REQUESTS_CA_BUNDLE and CURL_CA_BUNDLE if handle_insecure is speficied (default).
This is needed as when these variables are set and a requests.Session object is used, requests will ignore the
Sesssion.verify setting. See: https://github.com/psf/requests/blob/master/requests/sessions.py#L703
:type proxy_param_name: ``string``
:param proxy_param_name: name of the "use system proxy" integration parameter
:type checkbox_default_value: ``bool``
:param checkbox_default_value: Default value of the proxy param checkbox
:type handle_insecure: ``bool``
:param handle_insecure: Whether to check the insecure param and unset env variables
:type insecure_param_name: ``string``
:param insecure_param_name: Name of insecure param. If None will search insecure and unsecure
:rtype: ``dict``
:return: proxies dict for the 'proxies' parameter of 'requests' functions
"""
proxies = {} # type: dict
if demisto.params().get(proxy_param_name, checkbox_default_value):
proxies = {
'http': os.environ.get('HTTP_PROXY') or os.environ.get('http_proxy', ''),
'https': os.environ.get('HTTPS_PROXY') or os.environ.get('https_proxy', '')
}
else:
skip_proxy()
if handle_insecure:
if insecure_param_name is None:
param_names = ('insecure', 'unsecure')
else:
param_names = (insecure_param_name,) # type: ignore[assignment]
for p in param_names:
if demisto.params().get(p, False):
skip_cert_verification()
return proxies
def skip_proxy():
"""
The function deletes the proxy environment vars in order to http requests to skip routing through proxy
:return: None
:rtype: ``None``
"""
for k in ('HTTP_PROXY', 'HTTPS_PROXY', 'http_proxy', 'https_proxy'):
if k in os.environ:
del os.environ[k]
def skip_cert_verification():
"""
The function deletes the self signed certificate env vars in order to http requests to skip certificate validation.
:return: None
:rtype: ``None``
"""
for k in ('REQUESTS_CA_BUNDLE', 'CURL_CA_BUNDLE'):
if k in os.environ:
del os.environ[k]
def urljoin(url, suffix=""):
"""
Will join url and its suffix
Example:
"https://google.com/", "/" => "https://google.com/"
"https://google.com", "/" => "https://google.com/"
"https://google.com", "api" => "https://google.com/api"
"https://google.com", "/api" => "https://google.com/api"
"https://google.com/", "api" => "https://google.com/api"
"https://google.com/", "/api" => "https://google.com/api"
:type url: ``string``
:param url: URL string (required)
:type suffix: ``string``
:param suffix: the second part of the url
:rtype: ``string``
:return: Full joined url
"""
if url[-1:] != "/":
url = url + "/"
if suffix.startswith("/"):
suffix = suffix[1:]
return url + suffix
return url + suffix
def positiveUrl(entry):
"""
Checks if the given entry from a URL reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: URL entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe']:
return demisto.get(entry, 'Contents.url.result.score') > thresholds['xfeScore']
if entry['Brand'] == brands['vt']:
return demisto.get(entry, 'Contents.positives') > thresholds['vtPositives']
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def positiveFile(entry):
"""
Checks if the given entry from a file reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: File entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe'] and (demisto.get(entry, 'Contents.malware.family')
or demisto.gets(entry, 'Contents.malware.origins.external.family')):
return True
if entry['Brand'] == brands['vt']:
return demisto.get(entry, 'Contents.positives') > thresholds['vtPositives']
if entry['Brand'] == brands['wf']:
return demisto.get(entry, 'Contents.wildfire.file_info.malware') == 'yes'
if entry['Brand'] == brands['cy'] and demisto.get(entry, 'Contents'):
contents = demisto.get(entry, 'Contents')
k = contents.keys()
if k and len(k) > 0:
v = contents[k[0]]
if v and demisto.get(v, 'generalscore'):
return v['generalscore'] < -0.5
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def vtCountPositives(entry):
"""
Counts the number of detected URLs in the entry
:type entry: ``dict``
:param entry: Demisto entry (required)
:return: The number of detected URLs
:rtype: ``int``
"""
positives = 0
if demisto.get(entry, 'Contents.detected_urls'):
for detected in demisto.get(entry, 'Contents.detected_urls'):
if demisto.get(detected, 'positives') > thresholds['vtPositives']:
positives += 1
return positives
def positiveIp(entry):
"""
Checks if the given entry from a file reputation query is positive (known bad) (deprecated)
:type entry: ``dict``
:param entry: IP entry (required)
:return: True if bad, false otherwise
:rtype: ``bool``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['xfe']:
return demisto.get(entry, 'Contents.reputation.score') > thresholds['xfeScore']
if entry['Brand'] == brands['vt'] and demisto.get(entry, 'Contents.detected_urls'):
return vtCountPositives(entry) > thresholds['vtPositiveUrlsForIP']
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
return demisto.get(c, 'indicator') and demisto.get(c, 'malicious_confidence') in ['high', 'medium']
return False
def formatEpochDate(t):
"""
Convert a time expressed in seconds since the epoch to a string representing local time
:type t: ``int``
:param t: Time represented in seconds (required)
:return: A string representing local time
:rtype: ``str``
"""
if t:
return time.ctime(t)
return ''
def shortCrowdStrike(entry):
"""
Display CrowdStrike Intel results in Markdown (deprecated)
:type entry: ``dict``
:param entry: CrowdStrike result entry (required)
:return: A Demisto entry containing the shortened CrowdStrike info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
c = demisto.get(entry, 'Contents')[0]
csRes = '## CrowdStrike Falcon Intelligence'
csRes += '\n\n### Indicator - ' + demisto.gets(c, 'indicator')
labels = demisto.get(c, 'labels')
if labels:
csRes += '\n### Labels'
csRes += '\nName|Created|Last Valid'
csRes += '\n----|-------|----------'
for label in labels:
csRes += '\n' + demisto.gets(label, 'name') + '|' + \
formatEpochDate(demisto.get(label, 'created_on')) + '|' + \
formatEpochDate(demisto.get(label, 'last_valid_on'))
relations = demisto.get(c, 'relations')
if relations:
csRes += '\n### Relations'
csRes += '\nIndicator|Type|Created|Last Valid'
csRes += '\n---------|----|-------|----------'
for r in relations:
csRes += '\n' + demisto.gets(r, 'indicator') + '|' + demisto.gets(r, 'type') + '|' + \
formatEpochDate(demisto.get(label, 'created_date')) + '|' + \
formatEpochDate(demisto.get(label, 'last_valid_date'))
return {'ContentsFormat': formats['markdown'], 'Type': entryTypes['note'], 'Contents': csRes}
return entry
def shortUrl(entry):
"""
Formats a URL reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: URL result entry (required)
:return: A Demisto entry containing the shortened URL info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Country': c['country'], 'MalwareCount': demisto.get(c, 'malware.count'),
'A': demisto.gets(c, 'resolution.A'), 'AAAA': demisto.gets(c, 'resolution.AAAA'),
'Score': demisto.get(c, 'url.result.score'), 'Categories': demisto.gets(c, 'url.result.cats'),
'URL': demisto.get(c, 'url.result.url'), 'Provider': providers['xfe'],
'ProviderLink': 'https://exchange.xforce.ibmcloud.com/url/' + demisto.get(c, 'url.result.url')}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'ScanDate': c['scan_date'], 'Positives': c['positives'], 'Total': c['total'],
'URL': c['url'], 'Provider': providers['vt'], 'ProviderLink': c['permalink']}}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': 'text', 'Type': 4, 'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortFile(entry):
"""
Formats a file reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: File result entry (required)
:return: A Demisto entry containing the shortened file info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
cm = c['malware']
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Family': cm['family'], 'MIMEType': cm['mimetype'], 'MD5': cm['md5'][2:] if 'md5' in cm else '',
'CnCServers': demisto.get(cm, 'origins.CncServers.count'),
'DownloadServers': demisto.get(cm, 'origins.downloadServers.count'),
'Emails': demisto.get(cm, 'origins.emails.count'),
'ExternalFamily': demisto.gets(cm, 'origins.external.family'),
'ExternalCoverage': demisto.get(cm, 'origins.external.detectionCoverage'),
'Provider': providers['xfe'],
'ProviderLink': 'https://exchange.xforce.ibmcloud.com/malware/' + cm['md5'].replace('0x', '')}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'Resource': c['resource'], 'ScanDate': c['scan_date'], 'Positives': c['positives'],
'Total': c['total'], 'SHA1': c['sha1'], 'SHA256': c['sha256'], 'Provider': providers['vt'],
'ProviderLink': c['permalink']}}
if entry['Brand'] == brands['wf']:
c = demisto.get(entry, 'Contents.wildfire.file_info')
if c:
return {'Contents': {'Type': c['filetype'], 'Malware': c['malware'], 'MD5': c['md5'],
'SHA256': c['sha256'], 'Size': c['size'], 'Provider': providers['wf']},
'ContentsFormat': formats['table'], 'Type': entryTypes['note']}
if entry['Brand'] == brands['cy'] and demisto.get(entry, 'Contents'):
contents = demisto.get(entry, 'Contents')
k = contents.keys()
if k and len(k) > 0:
v = contents[k[0]]
if v and demisto.get(v, 'generalscore'):
return {'Contents': {'Status': v['status'], 'Code': v['statuscode'], 'Score': v['generalscore'],
'Classifiers': str(v['classifiers']), 'ConfirmCode': v['confirmcode'],
'Error': v['error'], 'Provider': providers['cy']},
'ContentsFormat': formats['table'], 'Type': entryTypes['note']}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortIp(entry):
"""
Formats an ip reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: IP result entry (required)
:return: A Demisto entry containing the shortened IP info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
c = entry['Contents']
if entry['Brand'] == brands['xfe']:
cr = c['reputation']
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'], 'Contents': {
'IP': cr['ip'], 'Score': cr['score'], 'Geo': str(cr['geo']), 'Categories': str(cr['cats']),
'Provider': providers['xfe']}}
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': {'Positive URLs': vtCountPositives(entry), 'Provider': providers['vt']}}
if entry['Brand'] == brands['cs'] and demisto.get(entry, 'Contents'):
return shortCrowdStrike(entry)
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def shortDomain(entry):
"""
Formats a domain reputation entry into a short table (deprecated)
:type entry: ``dict``
:param entry: Domain result entry (required)
:return: A Demisto entry containing the shortened domain info
:rtype: ``dict``
"""
if entry['Type'] != entryTypes['error'] and entry['ContentsFormat'] == formats['json']:
if entry['Brand'] == brands['vt']:
return {'ContentsFormat': formats['table'], 'Type': entryTypes['note'],
'Contents': {'Positive URLs': vtCountPositives(entry), 'Provider': providers['vt']}}
return {'ContentsFormat': formats['text'], 'Type': entryTypes['error'],
'Contents': 'Unknown provider for result: ' + entry['Brand']}
def get_error(execute_command_result):
"""
execute_command_result must contain error entry - check the result first with is_error function
if there is no error entry in the result then it will raise an Exception
:type execute_command_result: ``dict`` or ``list``
:param execute_command_result: result of demisto.executeCommand()
:return: Error message extracted from the demisto.executeCommand() result
:rtype: ``string``
"""
if not is_error(execute_command_result):
raise ValueError("execute_command_result has no error entry. before using get_error use is_error")
if isinstance(execute_command_result, dict):
return execute_command_result['Contents']
error_messages = []
for entry in execute_command_result:
is_error_entry = type(entry) == dict and entry['Type'] == entryTypes['error']
if is_error_entry:
error_messages.append(entry['Contents'])
return '\n'.join(error_messages)
def is_error(execute_command_result):
"""
Check if the given execute_command_result has an error entry
:type execute_command_result: ``dict`` or ``list``
:param execute_command_result: Demisto entry (required) or result of demisto.executeCommand()
:return: True if the execute_command_result has an error entry, false otherwise
:rtype: ``bool``
"""
if execute_command_result is None:
return False
if isinstance(execute_command_result, list):
if len(execute_command_result) > 0:
for entry in execute_command_result:
if type(entry) == dict and entry['Type'] == entryTypes['error']:
return True
return type(execute_command_result) == dict and execute_command_result['Type'] == entryTypes['error']
isError = is_error
def FormatADTimestamp(ts):
"""
Formats an Active Directory timestamp into human readable time representation
:type ts: ``int``
:param ts: The timestamp to be formatted (required)
:return: A string represeting the time
:rtype: ``str``
"""
return (datetime(year=1601, month=1, day=1) + timedelta(seconds=int(ts) / 10 ** 7)).ctime()
def PrettifyCompactedTimestamp(x):
"""
Formats a compacted timestamp string into human readable time representation
:type x: ``str``
:param x: The timestamp to be formatted (required)
:return: A string represeting the time
:rtype: ``str``
"""
return '%s-%s-%sT%s:%s:%s' % (x[:4], x[4:6], x[6:8], x[8:10], x[10:12], x[12:])
def NormalizeRegistryPath(strRegistryPath):
"""
Normalizes a registry path string
:type strRegistryPath: ``str``
:param strRegistryPath: The registry path (required)
:return: The normalized string
:rtype: ``str``
"""
dSub = {
'HKCR': 'HKEY_CLASSES_ROOT',
'HKCU': 'HKEY_CURRENT_USER',
'HKLM': 'HKEY_LOCAL_MACHINE',
'HKU': 'HKEY_USERS',
'HKCC': 'HKEY_CURRENT_CONFIG',
'HKPD': 'HKEY_PERFORMANCE_DATA'
}
for k in dSub:
if strRegistryPath[:len(k)] == k:
return dSub[k] + strRegistryPath[len(k):]
return strRegistryPath
def scoreToReputation(score):
"""
Converts score (in number format) to human readable reputation format
:type score: ``int``
:param score: The score to be formatted (required)
:return: The formatted score
:rtype: ``str``
"""
to_str = {
4: 'Critical',
3: 'Bad',
2: 'Suspicious',
1: 'Good',
0.5: 'Informational',
0: 'Unknown'
}
return to_str.get(score, 'None')
def b64_encode(text):
"""
Base64 encode a string. Wrapper function around base64.b64encode which will accept a string
In py3 will encode the string to binary using utf-8 encoding and return a string result decoded using utf-8
:param text: string to encode
:type text: str
:return: encoded string
:rtype: str
"""
if not text:
return ''
elif isinstance(text, bytes):
to_encode = text
else:
to_encode = text.encode('utf-8', 'ignore')
res = base64.b64encode(to_encode)
if IS_PY3:
res = res.decode('utf-8') # type: ignore
return res
def encode_string_results(text):
"""
Encode string as utf-8, if any unicode character exists.
:param text: string to encode
:type text: str
:return: encoded string
:rtype: str
"""
if not isinstance(text, STRING_OBJ_TYPES):
return text
try:
return str(text)
except UnicodeEncodeError:
return text.encode("utf8", "replace")
def safe_load_json(json_object):
"""
Safely loads a JSON object from an argument. Allows the argument to accept either a JSON in string form,
or an entry ID corresponding to a JSON file.
:param json_object: Entry ID or JSON string.
:type json_object: str
:return: Dictionary object from a parsed JSON file or string.
:rtype: dict
"""
safe_json = None
if isinstance(json_object, dict) or isinstance(json_object, list):
return json_object
if (json_object.startswith('{') and json_object.endswith('}')) or (
json_object.startswith('[') and json_object.endswith(']')):
try:
safe_json = json.loads(json_object)
except ValueError as e:
return_error(
'Unable to parse JSON string. Please verify the JSON is valid. - ' + str(e))
else:
try:
path = demisto.getFilePath(json_object)
with open(path['path'], 'rb') as data:
try:
safe_json = json.load(data)
except Exception: # lgtm [py/catch-base-exception]
safe_json = json.loads(data.read())
except Exception as e:
return_error('Unable to parse JSON file. Please verify the JSON is valid or the Entry'
'ID is correct. - ' + str(e))
return safe_json
def datetime_to_string(datetime_obj):
"""
Converts a datetime object into a string. When used with `json.dumps()` for the `default` parameter,
e.g. `json.dumps(response, default=datetime_to_string)` datetime_to_string allows entire JSON objects
to be safely added to context without causing any datetime marshalling errors.
:param datetime_obj: Datetime object.
:type datetime_obj: datetime.datetime
:return: String representation of a datetime object.
:rtype: str
"""
if isinstance(datetime_obj, datetime): # type: ignore
return datetime_obj.__str__()
def remove_empty_elements(d):
"""
Recursively remove empty lists, empty dicts, or None elements from a dictionary.
:param d: Input dictionary.
:type d: dict
:return: Dictionary with all empty lists, and empty dictionaries removed.
:rtype: dict
"""
def empty(x):
return x is None or x == {} or x == []
if not isinstance(d, (dict, list)):
return d
elif isinstance(d, list):
return [v for v in (remove_empty_elements(v) for v in d) if not empty(v)]
else:
return {k: v for k, v in ((k, remove_empty_elements(v)) for k, v in d.items()) if not empty(v)}
class SmartGetDict(dict):
"""A dict that when called with get(key, default) will return the default passed
value, even if there is a value of "None" in the place of the key. Example with built-in dict:
```
>>> d = {}
>>> d['test'] = None
>>> d.get('test', 1)
>>> print(d.get('test', 1))
None
```
Example with SmartGetDict:
```
>>> d = SmartGetDict()
>>> d['test'] = None
>>> d.get('test', 1)
>>> print(d.get('test', 1))
1
```
:return: SmartGetDict
:rtype: ``SmartGetDict``
"""
def get(self, key, default=None):
res = dict.get(self, key)
if res is not None:
return res
return default
if (not os.getenv('COMMON_SERVER_NO_AUTO_PARAMS_REMOVE_NULLS')) and hasattr(demisto, 'params') and demisto.params():
demisto.callingContext['params'] = SmartGetDict(demisto.params())
def aws_table_to_markdown(response, table_header):
"""
Converts a raw response from AWS into a markdown formatted table. This function checks to see if
there is only one nested dict in the top level of the dictionary and will use the nested data.
:param response: Raw response from AWS
:type response: dict
:param table_header: The header string to use for the table.
:type table_header: str
:return: Markdown formatted table as a string.
:rtype: str
"""
if isinstance(response, dict):
if len(response) == 1:
if isinstance(response[list(response.keys())[0]], dict) or isinstance(
response[list(response.keys())[0]], list):
if isinstance(response[list(response.keys())[0]], list):
list_response = response[list(response.keys())[0]]
if not list_response:
human_readable = tableToMarkdown(table_header, list_response)
elif isinstance(list_response[0], str):
human_readable = tableToMarkdown(
table_header, response)
else:
human_readable = tableToMarkdown(
table_header, response[list(response.keys())[0]])
else:
human_readable = tableToMarkdown(
table_header, response[list(response.keys())[0]])
else:
human_readable = tableToMarkdown(table_header, response)
else:
human_readable = tableToMarkdown(table_header, response)
else:
human_readable = tableToMarkdown(table_header, response)
return human_readable
def stringEscape(st):
"""
Escape newline chars in the given string.
:type st: ``str``
:param st: The string to be modified (required).
:return: A modified string.
:rtype: ``str``
"""
return st.replace('\r', '\\r').replace('\n', '\\n').replace('\t', '\\t')
def stringUnEscape(st):
"""
Unescape newline chars in the given string.
:type st: ``str``
:param st: The string to be modified (required).
:return: A modified string.
:rtype: ``str``
"""
return st.replace('\\r', '\r').replace('\\n', '\n').replace('\\t', '\t')
class IntegrationLogger(object):
"""
a logger for python integrations:
use LOG(<message>) to add a record to the logger (message can be any object with __str__)
use LOG.print_log(verbose=True/False) to display all records in War-Room (if verbose) and server log.
use add_replace_strs to add sensitive strings that should be replaced before going to the log.
:type message: ``str``
:param message: The message to be logged
:return: No data returned
:rtype: ``None``
"""
def __init__(self, debug_logging=False):
self.messages = [] # type: list
self.write_buf = [] # type: list
self.replace_strs = [] # type: list
self.curl = [] # type: list
self.buffering = True
self.debug_logging = debug_logging
# if for some reason you don't want to auto add credentials.password to replace strings
# set the os env COMMON_SERVER_NO_AUTO_REPLACE_STRS. Either in CommonServerUserPython, or docker env
if (not os.getenv('COMMON_SERVER_NO_AUTO_REPLACE_STRS') and hasattr(demisto, 'getParam')):
# add common params
sensitive_params = ('key', 'private', 'password', 'secret', 'token', 'credentials', 'service_account')
if demisto.params():
self._iter_sensistive_dict_obj(demisto.params(), sensitive_params)
def _iter_sensistive_dict_obj(self, dict_obj, sensitive_params):
for (k, v) in dict_obj.items():
if isinstance(v, dict): # credentials object case. recurse into the object
self._iter_sensistive_dict_obj(v, sensitive_params)
if v.get('identifier') and v.get('password'): # also add basic auth case
basic_auth = '{}:{}'.format(v.get('identifier'), v.get('password'))
self.add_replace_strs(b64_encode(basic_auth))
elif isinstance(v, STRING_OBJ_TYPES):
k_lower = k.lower()
for p in sensitive_params:
if p in k_lower:
self.add_replace_strs(v, b64_encode(v))
def encode(self, message):
try:
res = str(message)
except UnicodeEncodeError as exception:
# could not decode the message
# if message is an Exception, try encode the exception's message
if isinstance(message, Exception) and message.args and isinstance(message.args[0], STRING_OBJ_TYPES):
res = message.args[0].encode('utf-8', 'replace') # type: ignore
elif isinstance(message, STRING_OBJ_TYPES):
# try encode the message itself
res = message.encode('utf-8', 'replace') # type: ignore
else:
res = "Failed encoding message with error: {}".format(exception)
for s in self.replace_strs:
res = res.replace(s, '<XX_REPLACED>')
return res
def __call__(self, message):
text = self.encode(message)
if self.buffering:
self.messages.append(text)
if self.debug_logging:
demisto.debug(text)
else:
demisto.info(text)
return text
def add_replace_strs(self, *args):
'''
Add strings which will be replaced when logging.
Meant for avoiding passwords and so forth in the log.
'''
to_add = []
for a in args:
if a:
a = self.encode(a)
to_add.append(stringEscape(a))
to_add.append(stringUnEscape(a))
self.replace_strs.extend(to_add)
def set_buffering(self, state):
"""
set whether the logger buffers messages or writes staight to the demisto log
:param state: True/False
:type state: boolean
"""
self.buffering = state
def print_log(self, verbose=False):
if self.write_buf:
self.messages.append("".join(self.write_buf))
if self.messages:
text = 'Full Integration Log:\n' + '\n'.join(self.messages)
if verbose:
demisto.log(text)
if not self.debug_logging: # we don't print out if in debug_logging as already all message where printed
demisto.info(text)
self.messages = []
def build_curl(self, text):
"""
Parses the HTTP client "send" log messages and generates cURL queries out of them.
:type text: ``str``
:param text: The HTTP client log message.
:return: No data returned
:rtype: ``None``
"""
http_methods = ['GET', 'POST', 'PUT', 'DELETE', 'PATCH']
data = text.split("send: b'")[1]
if data and data[0] in {'{', '<'}:
# it is the request url query params/post body - will always come after we already have the url and headers
# `<` is for xml body
self.curl[-1] += "-d '{}".format(data)
elif any(http_method in data for http_method in http_methods):
method = ''
url = ''
headers = []
headers_to_skip = ['Content-Length', 'User-Agent', 'Accept-Encoding', 'Connection']
request_parts = repr(data).split('\\\\r\\\\n') # splitting lines on repr since data is a bytes-string
for line, part in enumerate(request_parts):
if line == 0:
method, url, _ = part[1:].split() # ignoring " at first char
elif line != len(request_parts) - 1: # ignoring the last line which is empty
if part.startswith('Host:'):
_, host = part.split('Host: ')
url = 'https://{}{}'.format(host, url)
else:
if any(header_to_skip in part for header_to_skip in headers_to_skip):
continue
headers.append(part)
curl_headers = ''
for header in headers:
if header:
curl_headers += '-H "{}" '.format(header)
curl = 'curl -X {} {} {}'.format(method, url, curl_headers)
if demisto.params().get('proxy'):
proxy_address = os.environ.get('https_proxy')
if proxy_address:
curl += '--proxy {} '.format(proxy_address)
else:
curl += '--noproxy "*" '
if demisto.params().get('insecure'):
curl += '-k '
self.curl.append(curl)
def write(self, msg):
# same as __call__ but allows IntegrationLogger to act as a File like object.
msg = self.encode(msg)
has_newline = False
if '\n' in msg:
has_newline = True
# if new line is last char we trim it out
if msg[-1] == '\n':
msg = msg[:-1]
self.write_buf.append(msg)
if has_newline:
text = "".join(self.write_buf)
if self.buffering:
self.messages.append(text)
else:
demisto.info(text)
if is_debug_mode() and text.startswith('send:'):
try:
self.build_curl(text)
except Exception as e: # should fail silently
demisto.debug('Failed generating curl - {}'.format(str(e)))
self.write_buf = []
def print_override(self, *args, **kwargs):
# print function that can be used to override print usage of internal modules
# will print to the log if the print target is stdout/stderr
try:
import __builtin__ # type: ignore
except ImportError:
# Python 3
import builtins as __builtin__ # type: ignore
file_ = kwargs.get('file')
if (not file_) or file_ == sys.stdout or file_ == sys.stderr:
kwargs['file'] = self
__builtin__.print(*args, **kwargs)
"""
a logger for python integrations:
use LOG(<message>) to add a record to the logger (message can be any object with __str__)
use LOG.print_log() to display all records in War-Room and server log.
"""
LOG = IntegrationLogger(debug_logging=is_debug_mode())
def formatAllArgs(args, kwds):
"""
makes a nice string representation of all the arguments
:type args: ``list``
:param args: function arguments (required)
:type kwds: ``dict``
:param kwds: function keyword arguments (required)
:return: string representation of all the arguments
:rtype: ``string``
"""
formattedArgs = ','.join([repr(a) for a in args]) + ',' + str(kwds).replace(':', "=").replace(" ", "")[1:-1]
return formattedArgs
def logger(func):
"""
decorator function to log the function call using LOG
:type func: ``function``
:param func: function to call (required)
:return: returns the func return value.
:rtype: ``any``
"""
def func_wrapper(*args, **kwargs):
LOG('calling {}({})'.format(func.__name__, formatAllArgs(args, kwargs)))
ret_val = func(*args, **kwargs)
if is_debug_mode():
LOG('Return value [{}]: {}'.format(func.__name__, str(ret_val)))
return ret_val
return func_wrapper
def formatCell(data, is_pretty=True):
"""
Convert a given object to md while decending multiple levels
:type data: ``str`` or ``list``
:param data: The cell content (required)
:type is_pretty: ``bool``
:param is_pretty: Should cell content be prettified (default is True)
:return: The formatted cell content as a string
:rtype: ``str``
"""
if isinstance(data, STRING_TYPES):
return data
elif isinstance(data, dict):
return '\n'.join([u'{}: {}'.format(k, flattenCell(v, is_pretty)) for k, v in data.items()])
else:
return flattenCell(data, is_pretty)
def flattenCell(data, is_pretty=True):
"""
Flattens a markdown table cell content into a single string
:type data: ``str`` or ``list``
:param data: The cell content (required)
:type is_pretty: ``bool``
:param is_pretty: Should cell content be pretified (default is True)
:return: A sting representation of the cell content
:rtype: ``str``
"""
indent = 4 if is_pretty else None
if isinstance(data, STRING_TYPES):
return data
elif isinstance(data, list):
string_list = []
for d in data:
try:
if IS_PY3 and isinstance(d, bytes):
string_list.append(d.decode('utf-8'))
else:
string_list.append(str(d))
except UnicodeEncodeError:
string_list.append(d.encode('utf-8'))
return ',\n'.join(string_list)
else:
return json.dumps(data, indent=indent, ensure_ascii=False)
def FormatIso8601(t):
"""
Convert a time expressed in seconds to ISO 8601 time format string
:type t: ``int``
:param t: Time expressed in seconds (required)
:return: An ISO 8601 time format string
:rtype: ``str``
"""
return t.strftime("%Y-%m-%dT%H:%M:%S")
def argToList(arg, separator=','):
"""
Converts a string representation of args to a python list
:type arg: ``str`` or ``list``
:param arg: Args to be converted (required)
:type separator: ``str``
:param separator: A string separator to separate the strings, the default is a comma.
:return: A python list of args
:rtype: ``list``
"""
if not arg:
return []
if isinstance(arg, list):
return arg
if isinstance(arg, STRING_TYPES):
if arg[0] == '[' and arg[-1] == ']':
return json.loads(arg)
return [s.strip() for s in arg.split(separator)]
return [arg]
def argToBoolean(value):
"""
Boolean-ish arguments that are passed through demisto.args() could be type bool or type string.
This command removes the guesswork and returns a value of type bool, regardless of the input value's type.
It will also return True for 'yes' and False for 'no'.
:param value: the value to evaluate
:type value: ``string|bool``
:return: a boolean representatation of 'value'
:rtype: ``bool``
"""
if isinstance(value, bool):
return value
if isinstance(value, STRING_OBJ_TYPES):
if value.lower() in ['true', 'yes']:
return True
elif value.lower() in ['false', 'no']:
return False
else:
raise ValueError('Argument does not contain a valid boolean-like value')
else:
raise ValueError('Argument is neither a string nor a boolean')
def appendContext(key, data, dedup=False):
"""
Append data to the investigation context
:type key: ``str``
:param key: The context path (required)
:type data: ``any``
:param data: Data to be added to the context (required)
:type dedup: ``bool``
:param dedup: True if de-duplication is required. Default is False.
:return: No data returned
:rtype: ``None``
"""
if data is None:
return
existing = demisto.get(demisto.context(), key)
if existing:
if isinstance(existing, STRING_TYPES):
if isinstance(data, STRING_TYPES):
new_val = data + ',' + existing
else:
new_val = data + existing # will raise a self explanatory TypeError
elif isinstance(existing, dict):
if isinstance(data, dict):
new_val = [existing, data] # type: ignore[assignment]
else:
new_val = data + existing # will raise a self explanatory TypeError
elif isinstance(existing, list):
if isinstance(data, list):
existing.extend(data)
else:
existing.append(data)
new_val = existing # type: ignore[assignment]
else:
new_val = [existing, data] # type: ignore[assignment]
if dedup and isinstance(new_val, list):
new_val = list(set(new_val))
demisto.setContext(key, new_val)
else:
demisto.setContext(key, data)
def url_to_clickable_markdown(data, url_keys):
"""
Turn the given urls fields in to clickable url, used for the markdown table.
:type data: ``[Union[str, List[Any], Dict[str, Any]]]``
:param data: a dictionary or a list containing data with some values that are urls
:type url_keys: ``List[str]``
:param url_keys: the keys of the url's wished to turn clickable
:return: markdown format for clickable url
:rtype: ``[Union[str, List[Any], Dict[str, Any]]]``
"""
if isinstance(data, list):
data = [url_to_clickable_markdown(item, url_keys) for item in data]
elif isinstance(data, dict):
data = {key: create_clickable_url(value) if key in url_keys else url_to_clickable_markdown(data[key], url_keys)
for key, value in data.items()}
return data
def create_clickable_url(url):
"""
Make the given url clickable when in markdown format by concatenating itself, with the proper brackets
:type url: ``Union[List[str], str]``
:param url: the url of interest or a list of urls
:return: markdown format for clickable url
:rtype: ``str``
"""
if not url:
return None
elif isinstance(url, list):
return ['[{}]({})'.format(item, item) for item in url]
return '[{}]({})'.format(url, url)
def tableToMarkdown(name, t, headers=None, headerTransform=None, removeNull=False, metadata=None, url_keys=None):
"""
Converts a demisto table in JSON form to a Markdown table
:type name: ``str``
:param name: The name of the table (required)
:type t: ``dict`` or ``list``
:param t: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:type headers: ``list`` or ``string``
:param headers: A list of headers to be presented in the output table (by order). If string will be passed
then table will have single header. Default will include all available headers.
:type headerTransform: ``function``
:param headerTransform: A function that formats the original data headers (optional)
:type removeNull: ``bool``
:param removeNull: Remove empty columns from the table. Default is False
:type metadata: ``str``
:param metadata: Metadata about the table contents
:type url_keys: ``list``
:param url_keys: a list of keys in the given JSON table that should be turned in to clickable
:return: A string representation of the markdown table
:rtype: ``str``
"""
# Turning the urls in the table to clickable
if url_keys:
t = url_to_clickable_markdown(t, url_keys)
mdResult = ''
if name:
mdResult = '### ' + name + '\n'
if metadata:
mdResult += metadata + '\n'
if not t or len(t) == 0:
mdResult += '**No entries.**\n'
return mdResult
if not isinstance(t, list):
t = [t]
if headers and isinstance(headers, STRING_TYPES):
headers = [headers]
if not isinstance(t[0], dict):
# the table contains only simple objects (strings, numbers)
# should be only one header
if headers and len(headers) > 0:
header = headers[0]
t = map(lambda item: dict((h, item) for h in [header]), t)
else:
raise Exception("Missing headers param for tableToMarkdown. Example: headers=['Some Header']")
# in case of headers was not provided (backward compatibility)
if not headers:
headers = list(t[0].keys())
headers.sort()
if removeNull:
headers_aux = headers[:]
for header in headers:
if all(obj.get(header) in ('', None, [], {}) for obj in t):
headers_aux.remove(header)
headers = headers_aux
if t and len(headers) > 0:
newHeaders = []
if headerTransform is None: # noqa
def headerTransform(s): return stringEscapeMD(s, True, True) # noqa
for header in headers:
newHeaders.append(headerTransform(header))
mdResult += '|'
if len(newHeaders) == 1:
mdResult += newHeaders[0]
else:
mdResult += '|'.join(newHeaders)
mdResult += '|\n'
sep = '---'
mdResult += '|' + '|'.join([sep] * len(headers)) + '|\n'
for entry in t:
vals = [stringEscapeMD((formatCell(entry.get(h, ''), False) if entry.get(h) is not None else ''),
True, True) for h in headers]
# this pipe is optional
mdResult += '| '
try:
mdResult += ' | '.join(vals)
except UnicodeDecodeError:
vals = [str(v) for v in vals]
mdResult += ' | '.join(vals)
mdResult += ' |\n'
else:
mdResult += '**No entries.**\n'
return mdResult
tblToMd = tableToMarkdown
def createContextSingle(obj, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type obj: ``dict`` or ``list``
:param obj: The data to be added to the context (required)
:type id: ``str``
:param id: The ID of the context entry
:type keyTransform: ``function``
:param keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:param removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
res = {} # type: dict
if keyTransform is None:
def keyTransform(s): return s # noqa
keys = obj.keys()
for key in keys:
if removeNull and obj[key] in ('', None, [], {}):
continue
values = key.split('.')
current = res
for v in values[:-1]:
current.setdefault(v, {})
current = current[v]
current[keyTransform(values[-1])] = obj[key]
if id is not None:
res.setdefault('ID', id)
return res
def createContext(data, id=None, keyTransform=None, removeNull=False):
"""Receives a dict with flattened key values, and converts them into nested dicts
:type data: ``dict`` or ``list``
:param data: The data to be added to the context (required)
:type id: ``str``
:param id: The ID of the context entry
:type keyTransform: ``function``
:param keyTransform: A formatting function for the markdown table headers
:type removeNull: ``bool``
:param removeNull: True if empty columns should be removed, false otherwise
:return: The converted context list
:rtype: ``list``
"""
if isinstance(data, (list, tuple)):
return [createContextSingle(d, id, keyTransform, removeNull) for d in data]
else:
return createContextSingle(data, id, keyTransform, removeNull)
def sectionsToMarkdown(root):
"""
Converts a list of Demisto JSON tables to markdown string of tables
:type root: ``dict`` or ``list``
:param root: The JSON table - List of dictionaries with the same keys or a single dictionary (required)
:return: A string representation of the markdown table
:rtype: ``str``
"""
mdResult = ''
if isinstance(root, dict):
for section in root:
data = root[section]
if isinstance(data, dict):
data = [data]
data = [{k: formatCell(row[k]) for k in row} for row in data]
mdResult += tblToMd(section, data)
return mdResult
def fileResult(filename, data, file_type=None):
"""
Creates a file from the given data
:type filename: ``str``
:param filename: The name of the file to be created (required)
:type data: ``str`` or ``bytes``
:param data: The file data (required)
:type file_type: ``str``
:param file_type: one of the entryTypes file or entryInfoFile (optional)
:return: A Demisto war room entry
:rtype: ``dict``
"""
if file_type is None:
file_type = entryTypes['file']
temp = demisto.uniqueFile()
# pylint: disable=undefined-variable
if (IS_PY3 and isinstance(data, str)) or (not IS_PY3 and isinstance(data, unicode)): # type: ignore # noqa: F821
data = data.encode('utf-8')
# pylint: enable=undefined-variable
with open(demisto.investigation()['id'] + '_' + temp, 'wb') as f:
f.write(data)
return {'Contents': '', 'ContentsFormat': formats['text'], 'Type': file_type, 'File': filename, 'FileID': temp}
def hash_djb2(s, seed=5381):
"""
Hash string with djb2 hash function
:type s: ``str``
:param s: The input string to hash
:type seed: ``int``
:param seed: The seed for the hash function (default is 5381)
:return: The hashed value
:rtype: ``int``
"""
hash_name = seed
for x in s:
hash_name = ((hash_name << 5) + hash_name) + ord(x)
return hash_name & 0xFFFFFFFF
def file_result_existing_file(filename, saveFilename=None):
"""
Rename an existing file
:type filename: ``str``
:param filename: The name of the file to be modified (required)
:type saveFilename: ``str``
:param saveFilename: The new file name
:return: A Demisto war room entry
:rtype: ``dict``
"""
temp = demisto.uniqueFile()
os.rename(filename, demisto.investigation()['id'] + '_' + temp)
return {'Contents': '', 'ContentsFormat': formats['text'], 'Type': entryTypes['file'],
'File': saveFilename if saveFilename else filename, 'FileID': temp}
def flattenRow(rowDict):
"""
Flatten each element in the given rowDict
:type rowDict: ``dict``
:param rowDict: The dict to be flattened (required)
:return: A flattened dict
:rtype: ``dict``
"""
return {k: formatCell(rowDict[k]) for k in rowDict}
def flattenTable(tableDict):
"""
Flatten each row in the given tableDict
:type tableDict: ``dict``
:param tableDict: The table to be flattened (required)
:return: A flattened table
:rtype: ``dict``
"""
return [flattenRow(row) for row in tableDict]
MARKDOWN_CHARS = r"\`*_{}[]()#+-!|"
def stringEscapeMD(st, minimal_escaping=False, escape_multiline=False):
"""
Escape any chars that might break a markdown string
:type st: ``str``
:param st: The string to be modified (required)
:type minimal_escaping: ``bool``
:param minimal_escaping: Whether replace all special characters or table format only (optional)
:type escape_multiline: ``bool``
:param escape_multiline: Whether convert line-ending characters (optional)
:return: A modified string
:rtype: ``str``
"""
if escape_multiline:
st = st.replace('\r\n', '<br>') # Windows
st = st.replace('\r', '<br>') # old Mac
st = st.replace('\n', '<br>') # Unix
if minimal_escaping:
for c in '|':
st = st.replace(c, '\\' + c)
else:
st = "".join(["\\" + str(c) if c in MARKDOWN_CHARS else str(c) for c in st])
return st
def raiseTable(root, key):
newInternal = {}
if key in root and isinstance(root[key], dict):
for sub in root[key]:
if sub not in root:
root[sub] = root[key][sub]
else:
newInternal[sub] = root[key][sub]
if newInternal:
root[key] = newInternal
else:
del root[key]
def zoomField(item, fieldName):
if isinstance(item, dict) and fieldName in item:
return item[fieldName]
else:
return item
def isCommandAvailable(cmd):
"""
Check the list of available modules to see whether a command is currently available to be run.
:type cmd: ``str``
:param cmd: The command to check (required)
:return: True if command is available, False otherwise
:rtype: ``bool``
"""
modules = demisto.getAllSupportedCommands()
for m in modules:
if modules[m] and isinstance(modules[m], list):
for c in modules[m]:
if c['name'] == cmd:
return True
return False
def epochToTimestamp(epoch):
return datetime.utcfromtimestamp(epoch / 1000.0).strftime("%Y-%m-%d %H:%M:%S")
def formatTimeColumns(data, timeColumnNames):
for row in data:
for k in timeColumnNames:
row[k] = epochToTimestamp(row[k])
def strip_tag(tag):
split_array = tag.split('}')
if len(split_array) > 1:
strip_ns_tag = split_array[1]
tag = strip_ns_tag
return tag
def elem_to_internal(elem, strip_ns=1, strip=1):
"""Convert an Element into an internal dictionary (not JSON!)."""
d = OrderedDict() # type: dict
elem_tag = elem.tag
if strip_ns:
elem_tag = strip_tag(elem.tag)
for key, value in list(elem.attrib.items()):
d['@' + key] = value
# loop over subelements to merge them
for subelem in elem:
v = elem_to_internal(subelem, strip_ns=strip_ns, strip=strip)
tag = subelem.tag
if strip_ns:
tag = strip_tag(subelem.tag)
value = v[tag]
try:
# add to existing list for this tag
d[tag].append(value)
except AttributeError:
# turn existing entry into a list
d[tag] = [d[tag], value]
except KeyError:
# add a new non-list entry
d[tag] = value
text = elem.text
tail = elem.tail
if strip:
# ignore leading and trailing whitespace
if text:
text = text.strip()
if tail:
tail = tail.strip()
if tail:
d['#tail'] = tail
if d:
# use #text element if other attributes exist
if text:
d["#text"] = text
else:
# text is the value if no attributes
d = text or None # type: ignore
return {elem_tag: d}
def internal_to_elem(pfsh, factory=ET.Element):
"""Convert an internal dictionary (not JSON!) into an Element.
Whatever Element implementation we could import will be
used by default; if you want to use something else, pass the
Element class as the factory parameter.
"""
attribs = OrderedDict() # type: dict
text = None
tail = None
sublist = []
tag = list(pfsh.keys())
if len(tag) != 1:
raise ValueError("Illegal structure with multiple tags: %s" % tag)
tag = tag[0]
value = pfsh[tag]
if isinstance(value, dict):
for k, v in list(value.items()):
if k[:1] == "@":
attribs[k[1:]] = v
elif k == "#text":
text = v
elif k == "#tail":
tail = v
elif isinstance(v, list):
for v2 in v:
sublist.append(internal_to_elem({k: v2}, factory=factory))
else:
sublist.append(internal_to_elem({k: v}, factory=factory))
else:
text = value
e = factory(tag, attribs)
for sub in sublist:
e.append(sub)
e.text = text
e.tail = tail
return e
def elem2json(elem, options, strip_ns=1, strip=1):
"""Convert an ElementTree or Element into a JSON string."""
if hasattr(elem, 'getroot'):
elem = elem.getroot()
if 'pretty' in options:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip), indent=4, separators=(',', ': '))
else:
return json.dumps(elem_to_internal(elem, strip_ns=strip_ns, strip=strip))
def json2elem(json_data, factory=ET.Element):
"""Convert a JSON string into an Element.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
return internal_to_elem(json.loads(json_data), factory)
def xml2json(xmlstring, options={}, strip_ns=1, strip=1):
"""
Convert an XML string into a JSON string.
:type xmlstring: ``str``
:param xmlstring: The string to be converted (required)
:return: The converted JSON
:rtype: ``dict`` or ``list``
"""
elem = ET.fromstring(xmlstring)
return elem2json(elem, options, strip_ns=strip_ns, strip=strip)
def json2xml(json_data, factory=ET.Element):
"""Convert a JSON string into an XML string.
Whatever Element implementation we could import will be used by
default; if you want to use something else, pass the Element class
as the factory parameter.
"""
if not isinstance(json_data, dict):
json_data = json.loads(json_data)
elem = internal_to_elem(json_data, factory)
return ET.tostring(elem, encoding='utf-8')
def get_hash_type(hash_file):
"""
Checks the type of the given hash. Returns 'md5', 'sha1', 'sha256' or 'Unknown'.
:type hash_file: ``str``
:param hash_file: The hash to be checked (required)
:return: The hash type
:rtype: ``str``
"""
hash_len = len(hash_file)
if (hash_len == 32):
return 'md5'
elif (hash_len == 40):
return 'sha1'
elif (hash_len == 64):
return 'sha256'
elif (hash_len == 128):
return 'sha512'
else:
return 'Unknown'
def is_mac_address(mac):
"""
Test for valid mac address
:type mac: ``str``
:param mac: MAC address in the form of AA:BB:CC:00:11:22
:return: True/False
:rtype: ``bool``
"""
if re.search(r'([0-9A-F]{2}[:]){5}([0-9A-F]){2}', mac.upper()) is not None:
return True
else:
return False
def is_ipv6_valid(address):
"""
Checks if the given string represents a valid IPv6 address.
:type address: str
:param address: The string to check.
:return: True if the given string represents a valid IPv6 address.
:rtype: ``bool``
"""
try:
socket.inet_pton(socket.AF_INET6, address)
except socket.error: # not a valid address
return False
return True
def is_ip_valid(s, accept_v6_ips=False):
"""
Checks if the given string represents a valid IP address.
By default, will only return 'True' for IPv4 addresses.
:type s: ``str``
:param s: The string to be checked (required)
:type accept_v6_ips: ``bool``
:param accept_v6_ips: A boolean determining whether the
function should accept IPv6 addresses
:return: True if the given string represents a valid IP address, False otherwise
:rtype: ``bool``
"""
a = s.split('.')
if accept_v6_ips and is_ipv6_valid(s):
return True
elif len(a) != 4:
return False
else:
for x in a:
if not x.isdigit():
return False
i = int(x)
if i < 0 or i > 255:
return False
return True
def get_integration_name():
"""
Getting calling integration's name
:return: Calling integration's name
:rtype: ``str``
"""
return demisto.callingContext.get('IntegrationBrand')
class Common(object):
class Indicator(object):
"""
interface class
"""
@abstractmethod
def to_context(self):
pass
class DBotScore(object):
"""
DBotScore class
:type indicator: ``str``
:param indicator: indicator value, ip, hash, domain, url, etc
:type indicator_type: ``DBotScoreType``
:param indicator_type: use DBotScoreType class
:type integration_name: ``str``
:param integration_name: integration name
:type score: ``DBotScore``
:param score: DBotScore.NONE, DBotScore.GOOD, DBotScore.SUSPICIOUS, DBotScore.BAD
:type malicious_description: ``str``
:param malicious_description: if the indicator is malicious and have explanation for it then set it to this field
:type reliability: ``DBotScoreReliability``
:param reliability: use DBotScoreReliability class
:return: None
:rtype: ``None``
"""
NONE = 0
GOOD = 1
SUSPICIOUS = 2
BAD = 3
CONTEXT_PATH = 'DBotScore(val.Indicator && val.Indicator == obj.Indicator && val.Vendor == obj.Vendor ' \
'&& val.Type == obj.Type)'
CONTEXT_PATH_PRIOR_V5_5 = 'DBotScore'
def __init__(self, indicator, indicator_type, integration_name, score, malicious_description=None,
reliability=None):
if not DBotScoreType.is_valid_type(indicator_type):
raise TypeError('indicator_type must be of type DBotScoreType enum')
if not Common.DBotScore.is_valid_score(score):
raise TypeError('indicator_type must be of type DBotScore enum')
if reliability and not DBotScoreReliability.is_valid_type(reliability):
raise TypeError('reliability must be of type DBotScoreReliability enum')
self.indicator = indicator
self.indicator_type = indicator_type
self.integration_name = integration_name or get_integration_name()
self.score = score
self.malicious_description = malicious_description
self.reliability = reliability
@staticmethod
def is_valid_score(score):
return score in (
Common.DBotScore.NONE,
Common.DBotScore.GOOD,
Common.DBotScore.SUSPICIOUS,
Common.DBotScore.BAD
)
@staticmethod
def get_context_path():
if is_demisto_version_ge('5.5.0'):
return Common.DBotScore.CONTEXT_PATH
else:
return Common.DBotScore.CONTEXT_PATH_PRIOR_V5_5
def to_context(self):
dbot_context = {
'Indicator': self.indicator,
'Type': self.indicator_type,
'Vendor': self.integration_name,
'Score': self.score
}
if self.reliability:
dbot_context['Reliability'] = self.reliability
ret_value = {
Common.DBotScore.get_context_path(): dbot_context
}
return ret_value
class IP(Indicator):
"""
IP indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#ip
:type ip: ``str``
:param ip: IP address
:type asn: ``str``
:param asn: The autonomous system name for the IP address, for example: "AS8948".
:type as_owner: ``str``
:param as_owner: The autonomous system owner of the IP.
:type region: ``str``
:param region: The region in which the IP is located.
:type port: ``str``
:param port: Ports that are associated with the IP.
:type internal: ``bool``
:param internal: Whether or not the IP is internal or external.
:type updated_date: ``date``
:param updated_date: The date that the IP was last updated.
:type registrar_abuse_name: ``str``
:param registrar_abuse_name: The name of the contact for reporting abuse.
:type registrar_abuse_address: ``str``
:param registrar_abuse_address: The address of the contact for reporting abuse.
:type registrar_abuse_country: ``str``
:param registrar_abuse_country: The country of the contact for reporting abuse.
:type registrar_abuse_network: ``str``
:param registrar_abuse_network: The network of the contact for reporting abuse.
:type registrar_abuse_phone: ``str``
:param registrar_abuse_phone: The phone number of the contact for reporting abuse.
:type registrar_abuse_email: ``str``
:param registrar_abuse_email: The email address of the contact for reporting abuse.
:type campaign: ``str``
:param campaign: The campaign associated with the IP.
:type traffic_light_protocol: ``str``
:param traffic_light_protocol: The Traffic Light Protocol (TLP) color that is suitable for the IP.
:type community_notes: ``CommunityNotes``
:param community_notes: Notes on the IP that were given by the community.
:type publications: ``Publications``
:param publications: Publications on the ip that was published.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type hostname: ``str``
:param hostname: The hostname that is mapped to this IP address.
:type geo_latitude: ``str``
:param geo_latitude: The geolocation where the IP address is located, in the format: latitude
:type geo_longitude: ``str``
:param geo_longitude: The geolocation where the IP address is located, in the format: longitude.
:type geo_country: ``str``
:param geo_country: The country in which the IP address is located.
:type geo_description: ``str``
:param geo_description: Additional information about the location.
:type detection_engines: ``int``
:param detection_engines: The total number of engines that checked the indicator.
:type positive_engines: ``int``
:param positive_engines: The number of engines that positively detected the indicator as malicious.
:type organization_name: ``str``
:param organization_name: The organization of the IP
:type organization_type: ``str``
:param organization_type:The organization type of the IP
:type tags: ``str``
:param tags: Tags of the IP.
:type malware_family: ``str``
:param malware_family: The malware family associated with the IP.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the IP.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If IP has a score then create and set a DBotScore object.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'IP(val.Address && val.Address == obj.Address)'
def __init__(self, ip, dbot_score, asn=None, as_owner=None, region=None, port=None, internal=None,
updated_date=None, registrar_abuse_name=None, registrar_abuse_address=None,
registrar_abuse_country=None, registrar_abuse_network=None, registrar_abuse_phone=None,
registrar_abuse_email=None, campaign=None, traffic_light_protocol=None,
community_notes=None, publications=None, threat_types=None,
hostname=None, geo_latitude=None, geo_longitude=None,
geo_country=None, geo_description=None, detection_engines=None, positive_engines=None,
organization_name=None, organization_type=None, feed_related_indicators=None, tags=None,
malware_family=None, relationships=None):
self.ip = ip
self.asn = asn
self.as_owner = as_owner
self.region = region
self.port = port
self.internal = internal
self.updated_date = updated_date
self.registrar_abuse_name = registrar_abuse_name
self.registrar_abuse_address = registrar_abuse_address
self.registrar_abuse_country = registrar_abuse_country
self.registrar_abuse_network = registrar_abuse_network
self.registrar_abuse_phone = registrar_abuse_phone
self.registrar_abuse_email = registrar_abuse_email
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.community_notes = community_notes
self.publications = publications
self.threat_types = threat_types
self.hostname = hostname
self.geo_latitude = geo_latitude
self.geo_longitude = geo_longitude
self.geo_country = geo_country
self.geo_description = geo_description
self.detection_engines = detection_engines
self.positive_engines = positive_engines
self.organization_name = organization_name
self.organization_type = organization_type
self.feed_related_indicators = feed_related_indicators
self.tags = tags
self.malware_family = malware_family
self.relationships = relationships
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
self.dbot_score = dbot_score
def to_context(self):
ip_context = {
'Address': self.ip
}
if self.asn:
ip_context['ASN'] = self.asn
if self.as_owner:
ip_context['ASOwner'] = self.as_owner
if self.region:
ip_context['Region'] = self.region
if self.port:
ip_context['Port'] = self.port
if self.internal:
ip_context['Internal'] = self.internal
if self.updated_date:
ip_context['UpdatedDate'] = self.updated_date
if self.registrar_abuse_name or self.registrar_abuse_address or self.registrar_abuse_country or \
self.registrar_abuse_network or self.registrar_abuse_phone or self.registrar_abuse_email:
ip_context['Registrar'] = {'Abuse': {}}
if self.registrar_abuse_name:
ip_context['Registrar']['Abuse']['Name'] = self.registrar_abuse_name
if self.registrar_abuse_address:
ip_context['Registrar']['Abuse']['Address'] = self.registrar_abuse_address
if self.registrar_abuse_country:
ip_context['Registrar']['Abuse']['Country'] = self.registrar_abuse_country
if self.registrar_abuse_network:
ip_context['Registrar']['Abuse']['Network'] = self.registrar_abuse_network
if self.registrar_abuse_phone:
ip_context['Registrar']['Abuse']['Phone'] = self.registrar_abuse_phone
if self.registrar_abuse_email:
ip_context['Registrar']['Abuse']['Email'] = self.registrar_abuse_email
if self.campaign:
ip_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
ip_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
ip_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
ip_context['Publications'] = publications
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
ip_context['ThreatTypes'] = threat_types
if self.hostname:
ip_context['Hostname'] = self.hostname
if self.geo_latitude or self.geo_country or self.geo_description:
ip_context['Geo'] = {}
if self.geo_latitude and self.geo_longitude:
ip_context['Geo']['Location'] = '{}:{}'.format(self.geo_latitude, self.geo_longitude)
if self.geo_country:
ip_context['Geo']['Country'] = self.geo_country
if self.geo_description:
ip_context['Geo']['Description'] = self.geo_description
if self.organization_name or self.organization_type:
ip_context['Organization'] = {}
if self.organization_name:
ip_context['Organization']['Name'] = self.organization_name
if self.organization_type:
ip_context['Organization']['Type'] = self.organization_type
if self.detection_engines is not None:
ip_context['DetectionEngines'] = self.detection_engines
if self.positive_engines is not None:
ip_context['PositiveDetections'] = self.positive_engines
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
ip_context['FeedRelatedIndicators'] = feed_related_indicators
if self.tags:
ip_context['Tags'] = self.tags
if self.malware_family:
ip_context['MalwareFamily'] = self.malware_family
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
ip_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
ip_context['Relationships'] = relationships_context
ret_value = {
Common.IP.CONTEXT_PATH: ip_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class FileSignature(object):
"""
FileSignature class
:type authentihash: ``str``
:param authentihash: The authentication hash.
:type copyright: ``str``
:param copyright: Copyright information.
:type description: ``str``
:param description: A description of the signature.
:type file_version: ``str``
:param file_version: The file version.
:type internal_name: ``str``
:param internal_name: The internal name of the file.
:type original_name: ``str``
:param original_name: The original name of the file.
:return: None
:rtype: ``None``
"""
def __init__(self, authentihash, copyright, description, file_version, internal_name, original_name):
self.authentihash = authentihash
self.copyright = copyright
self.description = description
self.file_version = file_version
self.internal_name = internal_name
self.original_name = original_name
def to_context(self):
return {
'Authentihash': self.authentihash,
'Copyright': self.copyright,
'Description': self.description,
'FileVersion': self.file_version,
'InternalName': self.internal_name,
'OriginalName': self.original_name,
}
class FeedRelatedIndicators(object):
"""
FeedRelatedIndicators class
Implements Subject Indicators that are associated with Another indicator
:type value: ``str``
:param value: Indicators that are associated with the indicator.
:type indicator_type: ``str``
:param indicator_type: The type of the indicators that are associated with the indicator.
:type description: ``str``
:param description: The description of the indicators that are associated with the indicator.
:return: None
:rtype: ``None``
"""
def __init__(self, value=None, indicator_type=None, description=None):
self.value = value
self.indicator_type = indicator_type
self.description = description
def to_context(self):
return {
'value': self.value,
'type': self.indicator_type,
'description': self.description
}
class CommunityNotes(object):
"""
CommunityNotes class
Implements Subject Community Notes of a indicator
:type note: ``str``
:param note: Notes on the indicator that were given by the community.
:type timestamp: ``Timestamp``
:param timestamp: The time in which the note was published.
:return: None
:rtype: ``None``
"""
def __init__(self, note=None, timestamp=None):
self.note = note
self.timestamp = timestamp
def to_context(self):
return {
'note': self.note,
'timestamp': self.timestamp,
}
class Publications(object):
"""
Publications class
Implements Subject Publications of a indicator
:type source: ``str``
:param source: The source in which the article was published.
:type title: ``str``
:param title: The name of the article.
:type link: ``str``
:param link: A link to the original article.
:type timestamp: ``Timestamp``
:param timestamp: The time in which the article was published.
:return: None
:rtype: ``None``
"""
def __init__(self, source=None, title=None, link=None, timestamp=None):
self.source = source
self.title = title
self.link = link
self.timestamp = timestamp
def to_context(self):
return {
'source': self.source,
'title': self.title,
'link': self.link,
'timestamp': self.timestamp,
}
class Behaviors(object):
"""
Behaviors class
Implements Subject Behaviors of a indicator
:type details: ``str``
:param details:
:type action: ``str``
:param action:
:return: None
:rtype: ``None``
"""
def __init__(self, details=None, action=None):
self.details = details
self.action = action
def to_context(self):
return {
'details': self.details,
'title': self.action,
}
class ThreatTypes(object):
"""
ThreatTypes class
Implements Subject ThreatTypes of a indicator
:type threat_category: ``str``
:param threat_category: The threat category associated to this indicator by the source vendor. For example,
Phishing, Control, TOR, etc.
:type threat_category_confidence: ``str``
:param threat_category_confidence: Threat Category Confidence is the confidence level provided by the vendor
for the threat type category
For example a confidence of 90 for threat type category "malware" means that the vendor rates that this
is 90% confidence of being a malware.
:return: None
:rtype: ``None``
"""
def __init__(self, threat_category=None, threat_category_confidence=None):
self.threat_category = threat_category
self.threat_category_confidence = threat_category_confidence
def to_context(self):
return {
'threatcategory': self.threat_category,
'threatcategoryconfidence': self.threat_category_confidence,
}
class File(Indicator):
"""
File indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#file
:type name: ``str``
:param name: The full file name (including file extension).
:type entry_id: ``str``
:param entry_id: The ID for locating the file in the War Room.
:type size: ``int``
:param size: The size of the file in bytes.
:type md5: ``str``
:param md5: The MD5 hash of the file.
:type sha1: ``str``
:param sha1: The SHA1 hash of the file.
:type sha256: ``str``
:param sha256: The SHA256 hash of the file.
:type sha512: ``str``
:param sha512: The SHA512 hash of the file.
:type ssdeep: ``str``
:param ssdeep: The ssdeep hash of the file (same as displayed in file entries).
:type extension: ``str``
:param extension: The file extension, for example: "xls".
:type file_type: ``str``
:param file_type: The file type, as determined by libmagic (same as displayed in file entries).
:type hostname: ``str``
:param hostname: The name of the host where the file was found. Should match Path.
:type path: ``str``
:param path: The path where the file is located.
:type company: ``str``
:param company: The name of the company that released a binary.
:type product_name: ``str``
:param product_name: The name of the product to which this file belongs.
:type digital_signature__publisher: ``str``
:param digital_signature__publisher: The publisher of the digital signature for the file.
:type signature: ``FileSignature``
:param signature: File signature class
:type actor: ``str``
:param actor: The actor reference.
:type tags: ``str``
:param tags: Tags of the file.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the file.
:type malware_family: ``str``
:param malware_family: The malware family associated with the File.
:type campaign: ``str``
:param campaign:
:type traffic_light_protocol: ``str``
:param traffic_light_protocol:
:type community_notes: ``CommunityNotes``
:param community_notes: Notes on the file that were given by the community.
:type publications: ``Publications``
:param publications: Publications on the file that was published.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type imphash: ``str``
:param imphash: The Imphash hash of the file.
:type quarantined: ``bool``
:param quarantined: Is the file quarantined or not.
:type organization: ``str``
:param organization: The organization of the file.
:type associated_file_names: ``str``
:param associated_file_names: File names that are known as associated to the file.
:type behaviors: ``Behaviors``
:param behaviors: list of behaviors associated with the file.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If file has a score then create and set a DBotScore object
:rtype: ``None``
:return: None
"""
CONTEXT_PATH = 'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || ' \
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 || ' \
'val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH || ' \
'val.SSDeep && val.SSDeep == obj.SSDeep)'
def __init__(self, dbot_score, name=None, entry_id=None, size=None, md5=None, sha1=None, sha256=None,
sha512=None, ssdeep=None, extension=None, file_type=None, hostname=None, path=None, company=None,
product_name=None, digital_signature__publisher=None, signature=None, actor=None, tags=None,
feed_related_indicators=None, malware_family=None, imphash=None, quarantined=None, campaign=None,
associated_file_names=None, traffic_light_protocol=None, organization=None, community_notes=None,
publications=None, threat_types=None, behaviors=None, relationships=None):
self.name = name
self.entry_id = entry_id
self.size = size
self.md5 = md5
self.sha1 = sha1
self.sha256 = sha256
self.sha512 = sha512
self.ssdeep = ssdeep
self.extension = extension
self.file_type = file_type
self.hostname = hostname
self.path = path
self.company = company
self.product_name = product_name
self.digital_signature__publisher = digital_signature__publisher
self.signature = signature
self.actor = actor
self.tags = tags
self.feed_related_indicators = feed_related_indicators
self.malware_family = malware_family
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.community_notes = community_notes
self.publications = publications
self.threat_types = threat_types
self.imphash = imphash
self.quarantined = quarantined
self.organization = organization
self.associated_file_names = associated_file_names
self.behaviors = behaviors
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
file_context = {}
if self.name:
file_context['Name'] = self.name
if self.entry_id:
file_context['EntryID'] = self.entry_id
if self.size:
file_context['Size'] = self.size
if self.md5:
file_context['MD5'] = self.md5
if self.sha1:
file_context['SHA1'] = self.sha1
if self.sha256:
file_context['SHA256'] = self.sha256
if self.sha512:
file_context['SHA512'] = self.sha512
if self.ssdeep:
file_context['SSDeep'] = self.ssdeep
if self.extension:
file_context['Extension'] = self.extension
if self.file_type:
file_context['Type'] = self.file_type
if self.hostname:
file_context['Hostname'] = self.hostname
if self.path:
file_context['Path'] = self.path
if self.company:
file_context['Company'] = self.company
if self.product_name:
file_context['ProductName'] = self.product_name
if self.digital_signature__publisher:
file_context['DigitalSignature'] = {
'Published': self.digital_signature__publisher
}
if self.signature:
file_context['Signature'] = self.signature.to_context()
if self.actor:
file_context['Actor'] = self.actor
if self.tags:
file_context['Tags'] = self.tags
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
file_context['FeedRelatedIndicators'] = feed_related_indicators
if self.malware_family:
file_context['MalwareFamily'] = self.malware_family
if self.campaign:
file_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
file_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
file_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
file_context['Publications'] = publications
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
file_context['ThreatTypes'] = threat_types
if self.imphash:
file_context['Imphash'] = self.imphash
if self.quarantined:
file_context['Quarantined'] = self.quarantined
if self.organization:
file_context['Organization'] = self.organization
if self.associated_file_names:
file_context['AssociatedFileNames'] = self.associated_file_names
if self.behaviors:
behaviors = []
for behavior in self.behaviors:
behaviors.append(behavior.to_context())
file_context['Behavior'] = behaviors
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
file_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
file_context['Relationships'] = relationships_context
ret_value = {
Common.File.CONTEXT_PATH: file_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class CVE(Indicator):
"""
CVE indicator class - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#cve
:type id: ``str``
:param id: The ID of the CVE, for example: "CVE-2015-1653".
:type cvss: ``str``
:param cvss: The CVSS of the CVE, for example: "10.0".
:type published: ``str``
:param published: The timestamp of when the CVE was published.
:type modified: ``str``
:param modified: The timestamp of when the CVE was last modified.
:type description: ``str``
:param description: A description of the CVE.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'CVE(val.ID && val.ID == obj.ID)'
def __init__(self, id, cvss, published, modified, description, relationships=None):
# type (str, str, str, str, str) -> None
self.id = id
self.cvss = cvss
self.published = published
self.modified = modified
self.description = description
self.dbot_score = Common.DBotScore(
indicator=id,
indicator_type=DBotScoreType.CVE,
integration_name=None,
score=Common.DBotScore.NONE
)
self.relationships = relationships
def to_context(self):
cve_context = {
'ID': self.id
}
if self.cvss:
cve_context['CVSS'] = self.cvss
if self.published:
cve_context['Published'] = self.published
if self.modified:
cve_context['Modified'] = self.modified
if self.description:
cve_context['Description'] = self.description
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
cve_context['Relationships'] = relationships_context
ret_value = {
Common.CVE.CONTEXT_PATH: cve_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class EMAIL(Indicator):
"""
EMAIL indicator class
:type address ``str``
:param address: The email's address.
:type domain: ``str``
:param domain: The domain of the Email.
:type blocked: ``bool``
:param blocked: Whether the email address is blocked.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Email(val.Address && val.Address == obj.Address)'
def __init__(self, address, dbot_score, domain=None, blocked=None, relationships=None):
# type (str, str, bool) -> None
self.address = address
self.domain = domain
self.blocked = blocked
self.dbot_score = dbot_score
self.relationships = relationships
def to_context(self):
email_context = {
'Address': self.address
}
if self.domain:
email_context['Domain'] = self.domain
if self.blocked:
email_context['Blocked'] = self.blocked
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
email_context['Relationships'] = relationships_context
ret_value = {
Common.EMAIL.CONTEXT_PATH: email_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class URL(Indicator):
"""
URL indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#url
:type url: ``str``
:param url: The URL
:type detection_engines: ``int``
:param detection_engines: The total number of engines that checked the indicator.
:type positive_detections: ``int``
:param positive_detections: The number of engines that positively detected the indicator as malicious.
:type category: ``str``
:param category: The category associated with the indicator.
:type feed_related_indicators: ``FeedRelatedIndicators``
:param feed_related_indicators: List of indicators that are associated with the URL.
:type malware_family: ``str``
:param malware_family: The malware family associated with the URL.
:type tags: ``str``
:param tags: Tags of the URL.
:type port: ``str``
:param port: Ports that are associated with the URL.
:type internal: ``bool``
:param internal: Whether or not the URL is internal or external.
:type campaign: ``str``
:param campaign: The campaign associated with the URL.
:type traffic_light_protocol: ``str``
:param traffic_light_protocol: The Traffic Light Protocol (TLP) color that is suitable for the URL.
:type threat_types: ``ThreatTypes``
:param threat_types: Threat types that are associated with the file.
:type asn: ``str``
:param asn: The autonomous system name for the URL, for example: 'AS8948'.
:type as_owner: ``str``
:param as_owner: The autonomous system owner of the URL.
:type geo_country: ``str``
:param geo_country: The country in which the URL is located.
:type organization: ``str``
:param organization: The organization of the URL.
:type community_notes: ``CommunityNotes``
:param community_notes: List of notes on the URL that were given by the community.
:type publications: ``Publications``
:param publications: List of publications on the URL that was published.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type dbot_score: ``DBotScore``
:param dbot_score: If URL has reputation then create DBotScore object
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'URL(val.Data && val.Data == obj.Data)'
def __init__(self, url, dbot_score, detection_engines=None, positive_detections=None, category=None,
feed_related_indicators=None, tags=None, malware_family=None, port=None, internal=None,
campaign=None, traffic_light_protocol=None, threat_types=None, asn=None, as_owner=None,
geo_country=None, organization=None, community_notes=None, publications=None, relationships=None):
self.url = url
self.detection_engines = detection_engines
self.positive_detections = positive_detections
self.category = category
self.feed_related_indicators = feed_related_indicators
self.tags = tags
self.malware_family = malware_family
self.port = port
self.internal = internal
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.threat_types = threat_types
self.asn = asn
self.as_owner = as_owner
self.geo_country = geo_country
self.organization = organization
self.community_notes = community_notes
self.publications = publications
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
url_context = {
'Data': self.url
}
if self.detection_engines is not None:
url_context['DetectionEngines'] = self.detection_engines
if self.positive_detections is not None:
url_context['PositiveDetections'] = self.positive_detections
if self.category:
url_context['Category'] = self.category
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
url_context['FeedRelatedIndicators'] = feed_related_indicators
if self.tags:
url_context['Tags'] = self.tags
if self.malware_family:
url_context['MalwareFamily'] = self.malware_family
if self.port:
url_context['Port'] = self.port
if self.internal:
url_context['Internal'] = self.internal
if self.campaign:
url_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
url_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
url_context['ThreatTypes'] = threat_types
if self.asn:
url_context['ASN'] = self.asn
if self.as_owner:
url_context['ASOwner'] = self.as_owner
if self.geo_country:
url_context['Geo'] = {'Country': self.geo_country}
if self.organization:
url_context['Organization'] = self.organization
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
url_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
url_context['Publications'] = publications
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
url_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
url_context['Relationships'] = relationships_context
ret_value = {
Common.URL.CONTEXT_PATH: url_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Domain(Indicator):
""" ignore docstring
Domain indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#domain
"""
CONTEXT_PATH = 'Domain(val.Name && val.Name == obj.Name)'
def __init__(self, domain, dbot_score, dns=None, detection_engines=None, positive_detections=None,
organization=None, sub_domains=None, creation_date=None, updated_date=None, expiration_date=None,
domain_status=None, name_servers=None, feed_related_indicators=None, malware_family=None,
registrar_name=None, registrar_abuse_email=None, registrar_abuse_phone=None,
registrant_name=None, registrant_email=None, registrant_phone=None, registrant_country=None,
admin_name=None, admin_email=None, admin_phone=None, admin_country=None, tags=None,
domain_idn_name=None, port=None,
internal=None, category=None, campaign=None, traffic_light_protocol=None, threat_types=None,
community_notes=None, publications=None, geo_location=None, geo_country=None,
geo_description=None, tech_country=None, tech_name=None, tech_email=None, tech_organization=None,
billing=None, relationships=None):
self.domain = domain
self.dns = dns
self.detection_engines = detection_engines
self.positive_detections = positive_detections
self.organization = organization
self.sub_domains = sub_domains
self.creation_date = creation_date
self.updated_date = updated_date
self.expiration_date = expiration_date
self.registrar_name = registrar_name
self.registrar_abuse_email = registrar_abuse_email
self.registrar_abuse_phone = registrar_abuse_phone
self.registrant_name = registrant_name
self.registrant_email = registrant_email
self.registrant_phone = registrant_phone
self.registrant_country = registrant_country
self.admin_name = admin_name
self.admin_email = admin_email
self.admin_phone = admin_phone
self.admin_country = admin_country
self.tags = tags
self.domain_status = domain_status
self.name_servers = name_servers
self.feed_related_indicators = feed_related_indicators
self.malware_family = malware_family
self.domain_idn_name = domain_idn_name
self.port = port
self.internal = internal
self.category = category
self.campaign = campaign
self.traffic_light_protocol = traffic_light_protocol
self.threat_types = threat_types
self.community_notes = community_notes
self.publications = publications
self.geo_location = geo_location
self.geo_country = geo_country
self.geo_description = geo_description
self.tech_country = tech_country
self.tech_name = tech_name
self.tech_organization = tech_organization
self.tech_email = tech_email
self.billing = billing
self.relationships = relationships
self.dbot_score = dbot_score
def to_context(self):
domain_context = {
'Name': self.domain
}
whois_context = {}
if self.dns:
domain_context['DNS'] = self.dns
if self.detection_engines is not None:
domain_context['DetectionEngines'] = self.detection_engines
if self.positive_detections is not None:
domain_context['PositiveDetections'] = self.positive_detections
if self.registrar_name or self.registrar_abuse_email or self.registrar_abuse_phone:
domain_context['Registrar'] = {
'Name': self.registrar_name,
'AbuseEmail': self.registrar_abuse_email,
'AbusePhone': self.registrar_abuse_phone
}
whois_context['Registrar'] = domain_context['Registrar']
if self.registrant_name or self.registrant_phone or self.registrant_email or self.registrant_country:
domain_context['Registrant'] = {
'Name': self.registrant_name,
'Email': self.registrant_email,
'Phone': self.registrant_phone,
'Country': self.registrant_country
}
whois_context['Registrant'] = domain_context['Registrant']
if self.admin_name or self.admin_email or self.admin_phone or self.admin_country:
domain_context['Admin'] = {
'Name': self.admin_name,
'Email': self.admin_email,
'Phone': self.admin_phone,
'Country': self.admin_country
}
whois_context['Admin'] = domain_context['Admin']
if self.organization:
domain_context['Organization'] = self.organization
if self.sub_domains:
domain_context['Subdomains'] = self.sub_domains
if self.domain_status:
domain_context['DomainStatus'] = self.domain_status
whois_context['DomainStatus'] = domain_context['DomainStatus']
if self.creation_date:
domain_context['CreationDate'] = self.creation_date
whois_context['CreationDate'] = domain_context['CreationDate']
if self.updated_date:
domain_context['UpdatedDate'] = self.updated_date
whois_context['UpdatedDate'] = domain_context['UpdatedDate']
if self.expiration_date:
domain_context['ExpirationDate'] = self.expiration_date
whois_context['ExpirationDate'] = domain_context['ExpirationDate']
if self.name_servers:
domain_context['NameServers'] = self.name_servers
whois_context['NameServers'] = domain_context['NameServers']
if self.tags:
domain_context['Tags'] = self.tags
if self.feed_related_indicators:
feed_related_indicators = []
for feed_related_indicator in self.feed_related_indicators:
feed_related_indicators.append(feed_related_indicator.to_context())
domain_context['FeedRelatedIndicators'] = feed_related_indicators
if self.malware_family:
domain_context['MalwareFamily'] = self.malware_family
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
domain_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.domain_idn_name:
domain_context['DomainIDNName'] = self.domain_idn_name
if self.port:
domain_context['Port'] = self.port
if self.internal:
domain_context['Internal'] = self.internal
if self.category:
domain_context['Category'] = self.category
if self.campaign:
domain_context['Campaign'] = self.campaign
if self.traffic_light_protocol:
domain_context['TrafficLightProtocol'] = self.traffic_light_protocol
if self.threat_types:
threat_types = []
for threat_type in self.threat_types:
threat_types.append(threat_type.to_context())
domain_context['ThreatTypes'] = threat_types
if self.community_notes:
community_notes = []
for community_note in self.community_notes:
community_notes.append(community_note.to_context())
domain_context['CommunityNotes'] = community_notes
if self.publications:
publications = []
for publication in self.publications:
publications.append(publication.to_context())
domain_context['Publications'] = publications
if self.geo_location or self.geo_country or self.geo_description:
domain_context['Geo'] = {}
if self.geo_location:
domain_context['Geo']['Location'] = self.geo_location
if self.geo_country:
domain_context['Geo']['Country'] = self.geo_country
if self.geo_description:
domain_context['Geo']['Description'] = self.geo_description
if self.tech_country or self.tech_name or self.tech_organization or self.tech_email:
domain_context['Tech'] = {}
if self.tech_country:
domain_context['Tech']['Country'] = self.tech_country
if self.tech_name:
domain_context['Tech']['Name'] = self.tech_name
if self.tech_organization:
domain_context['Tech']['Organization'] = self.tech_organization
if self.tech_email:
domain_context['Tech']['Email'] = self.tech_email
if self.billing:
domain_context['Billing'] = self.billing
if whois_context:
domain_context['WHOIS'] = whois_context
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
domain_context['Relationships'] = relationships_context
ret_value = {
Common.Domain.CONTEXT_PATH: domain_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Endpoint(Indicator):
""" ignore docstring
Endpoint indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#endpoint
"""
CONTEXT_PATH = 'Endpoint(val.ID && val.ID == obj.ID)'
def __init__(self, id, hostname=None, ip_address=None, domain=None, mac_address=None,
os=None, os_version=None, dhcp_server=None, bios_version=None, model=None,
memory=None, processors=None, processor=None, relationships=None, vendor=None, status=None,
is_isolated=None):
self.id = id
self.hostname = hostname
self.ip_address = ip_address
self.domain = domain
self.mac_address = mac_address
self.os = os
self.os_version = os_version
self.dhcp_server = dhcp_server
self.bios_version = bios_version
self.model = model
self.memory = memory
self.processors = processors
self.processor = processor
self.vendor = vendor
self.status = status
self.is_isolated = is_isolated
self.relationships = relationships
def to_context(self):
endpoint_context = {
'ID': self.id
}
if self.hostname:
endpoint_context['Hostname'] = self.hostname
if self.ip_address:
endpoint_context['IPAddress'] = self.ip_address
if self.domain:
endpoint_context['Domain'] = self.domain
if self.mac_address:
endpoint_context['MACAddress'] = self.mac_address
if self.os:
endpoint_context['OS'] = self.os
if self.os_version:
endpoint_context['OSVersion'] = self.os_version
if self.dhcp_server:
endpoint_context['DHCPServer'] = self.dhcp_server
if self.bios_version:
endpoint_context['BIOSVersion'] = self.bios_version
if self.model:
endpoint_context['Model'] = self.model
if self.memory:
endpoint_context['Memory'] = self.memory
if self.processors:
endpoint_context['Processors'] = self.processors
if self.processor:
endpoint_context['Processor'] = self.processor
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
endpoint_context['Relationships'] = relationships_context
if self.vendor:
endpoint_context['Vendor'] = self.vendor
if self.status:
if self.status not in ENDPOINT_STATUS_OPTIONS:
raise ValueError('Status does not have a valid value such as: Online or Offline')
endpoint_context['Status'] = self.status
if self.is_isolated:
if self.is_isolated not in ENDPOINT_ISISOLATED_OPTIONS:
raise ValueError('Is Isolated does not have a valid value such as: Yes, No, Pending'
' isolation or Pending unisolation')
endpoint_context['IsIsolated'] = self.is_isolated
ret_value = {
Common.Endpoint.CONTEXT_PATH: endpoint_context
}
return ret_value
class Account(Indicator):
"""
Account indicator - https://xsoar.pan.dev/docs/integrations/context-standards-recommended#account
:type dbot_score: ``DBotScore``
:param dbot_score: If account has reputation then create DBotScore object
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Account(val.id && val.id == obj.id)'
def __init__(self, id, type=None, username=None, display_name=None, groups=None,
domain=None, email_address=None, telephone_number=None, office=None, job_title=None,
department=None, country=None, state=None, city=None, street=None, is_enabled=None,
dbot_score=None, relationships=None):
self.id = id
self.type = type
self.username = username
self.display_name = display_name
self.groups = groups
self.domain = domain
self.email_address = email_address
self.telephone_number = telephone_number
self.office = office
self.job_title = job_title
self.department = department
self.country = country
self.state = state
self.city = city
self.street = street
self.is_enabled = is_enabled
self.relationships = relationships
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
self.dbot_score = dbot_score
def to_context(self):
account_context = {
'Id': self.id
}
if self.type:
account_context['Type'] = self.type
irrelevent = ['CONTEXT_PATH', 'to_context', 'dbot_score', 'Id']
details = [detail for detail in dir(self) if not detail.startswith('__') and detail not in irrelevent]
for detail in details:
if self.__getattribute__(detail):
if detail == 'email_address':
account_context['Email'] = {
'Address': self.email_address
}
else:
Detail = camelize_string(detail, '_')
account_context[Detail] = self.__getattribute__(detail)
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
account_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
if self.relationships:
relationships_context = [relationship.to_context() for relationship in self.relationships if
relationship.to_context()]
account_context['Relationships'] = relationships_context
ret_value = {
Common.Account.CONTEXT_PATH: account_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class Cryptocurrency(Indicator):
"""
Cryptocurrency indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#cryptocurrency
:type address: ``str``
:param address: The Cryptocurrency address
:type address_type: ``str``
:param address_type: The Cryptocurrency type - e.g. `bitcoin`.
:type dbot_score: ``DBotScore``
:param dbot_score: If the address has reputation then create DBotScore object.
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Cryptocurrency(val.Address && val.Address == obj.Address)'
def __init__(self, address, address_type, dbot_score):
self.address = address
self.address_type = address_type
self.dbot_score = dbot_score
def to_context(self):
crypto_context = {
'Address': self.address,
'AddressType': self.address_type
}
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
crypto_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
ret_value = {
Common.Cryptocurrency.CONTEXT_PATH: crypto_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class CertificatePublicKey(object):
"""
CertificatePublicKey class
Defines an X509 PublicKey used in Common.Certificate
:type algorithm: ``str``
:param algorithm: The encryption algorithm: DSA, RSA, EC or UNKNOWN (Common.CertificatePublicKey.Algorithm enum)
:type length: ``int``
:param length: The length of the public key
:type publickey: ``Optional[str]``
:param publickey: publickey
:type p: ``Optional[str]``
:param p: P parameter used in DSA algorithm
:type q: ``Optional[str]``
:param q: Q parameter used in DSA algorithm
:type g: ``Optional[str]``
:param g: G parameter used in DSA algorithm
:type modulus: ``Optional[str]``
:param modulus: modulus parameter used in RSA algorithm
:type modulus: ``Optional[int]``
:param modulus: exponent parameter used in RSA algorithm
:type x: ``Optional[str]``
:param x: X parameter used in EC algorithm
:type y: ``Optional[str]``
:param y: Y parameter used in EC algorithm
:type curve: ``Optional[str]``
:param curve: curve parameter used in EC algorithm
:return: None
:rtype: ``None``
"""
class Algorithm(object):
"""
Algorithm class to enumerate available algorithms
:return: None
:rtype: ``None``
"""
DSA = "DSA"
RSA = "RSA"
EC = "EC"
UNKNOWN = "Unknown Algorithm"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificatePublicKey.Algorithm.DSA,
Common.CertificatePublicKey.Algorithm.RSA,
Common.CertificatePublicKey.Algorithm.EC,
Common.CertificatePublicKey.Algorithm.UNKNOWN
)
def __init__(
self,
algorithm, # type: str
length, # type: int
publickey=None, # type: str
p=None, # type: str
q=None, # type: str
g=None, # type: str
modulus=None, # type: str
exponent=None, # type: int
x=None, # type: str
y=None, # type: str
curve=None # type: str
):
if not Common.CertificatePublicKey.Algorithm.is_valid_type(algorithm):
raise TypeError('algorithm must be of type Common.CertificatePublicKey.Algorithm enum')
self.algorithm = algorithm
self.length = length
self.publickey = publickey
self.p = p
self.q = q
self.g = g
self.modulus = modulus
self.exponent = exponent
self.x = x
self.y = y
self.curve = curve
def to_context(self):
publickey_context = {
'Algorithm': self.algorithm,
'Length': self.length
}
if self.publickey:
publickey_context['PublicKey'] = self.publickey
if self.algorithm == Common.CertificatePublicKey.Algorithm.DSA:
if self.p:
publickey_context['P'] = self.p
if self.q:
publickey_context['Q'] = self.q
if self.g:
publickey_context['G'] = self.g
elif self.algorithm == Common.CertificatePublicKey.Algorithm.RSA:
if self.modulus:
publickey_context['Modulus'] = self.modulus
if self.exponent:
publickey_context['Exponent'] = self.exponent
elif self.algorithm == Common.CertificatePublicKey.Algorithm.EC:
if self.x:
publickey_context['X'] = self.x
if self.y:
publickey_context['Y'] = self.y
if self.curve:
publickey_context['Curve'] = self.curve
elif self.algorithm == Common.CertificatePublicKey.Algorithm.UNKNOWN:
pass
return publickey_context
class GeneralName(object):
"""
GeneralName class
Implements GeneralName interface from rfc5280
Enumerates the available General Name Types
:type gn_type: ``str``
:param gn_type: General Name Type
:type gn_value: ``str``
:param gn_value: General Name Value
:return: None
:rtype: ``None``
"""
OTHERNAME = 'otherName'
RFC822NAME = 'rfc822Name'
DNSNAME = 'dNSName'
DIRECTORYNAME = 'directoryName'
UNIFORMRESOURCEIDENTIFIER = 'uniformResourceIdentifier'
IPADDRESS = 'iPAddress'
REGISTEREDID = 'registeredID'
@staticmethod
def is_valid_type(_type):
return _type in (
Common.GeneralName.OTHERNAME,
Common.GeneralName.RFC822NAME,
Common.GeneralName.DNSNAME,
Common.GeneralName.DIRECTORYNAME,
Common.GeneralName.UNIFORMRESOURCEIDENTIFIER,
Common.GeneralName.IPADDRESS,
Common.GeneralName.REGISTEREDID
)
def __init__(
self,
gn_value, # type: str
gn_type # type: str
):
if not Common.GeneralName.is_valid_type(gn_type):
raise TypeError(
'gn_type must be of type Common.GeneralName enum'
)
self.gn_type = gn_type
self.gn_value = gn_value
def to_context(self):
return {
'Type': self.gn_type,
'Value': self.gn_value
}
def get_value(self):
return self.gn_value
class CertificateExtension(object):
"""
CertificateExtension class
Defines an X509 Certificate Extensions used in Common.Certificate
:type extension_type: ``str``
:param extension_type: The type of Extension (from Common.CertificateExtension.ExtensionType enum, or "Other)
:type critical: ``bool``
:param critical: Whether the extension is marked as critical
:type extension_name: ``Optional[str]``
:param extension_name: Name of the extension
:type oid: ``Optional[str]``
:param oid: OID of the extension
:type subject_alternative_names: ``Optional[List[Common.CertificateExtension.SubjectAlternativeName]]``
:param subject_alternative_names: Subject Alternative Names
:type authority_key_identifier: ``Optional[Common.CertificateExtension.AuthorityKeyIdentifier]``
:param authority_key_identifier: Authority Key Identifier
:type digest: ``Optional[str]``
:param digest: digest for Subject Key Identifier extension
:type digital_signature: ``Optional[bool]``
:param digital_signature: Digital Signature usage for Key Usage extension
:type content_commitment: ``Optional[bool]``
:param content_commitment: Content Commitment usage for Key Usage extension
:type key_encipherment: ``Optional[bool]``
:param key_encipherment: Key Encipherment usage for Key Usage extension
:type data_encipherment: ``Optional[bool]``
:param data_encipherment: Data Encipherment usage for Key Usage extension
:type key_agreement: ``Optional[bool]``
:param key_agreement: Key Agreement usage for Key Usage extension
:type key_cert_sign: ``Optional[bool]``
:param key_cert_sign: Key Cert Sign usage for Key Usage extension
:type usages: ``Optional[List[str]]``
:param usages: Usages for Extended Key Usage extension
:type distribution_points: ``Optional[List[Common.CertificateExtension.DistributionPoint]]``
:param distribution_points: Distribution Points
:type certificate_policies: ``Optional[List[Common.CertificateExtension.CertificatePolicy]]``
:param certificate_policies: Certificate Policies
:type authority_information_access: ``Optional[List[Common.CertificateExtension.AuthorityInformationAccess]]``
:param authority_information_access: Authority Information Access
:type basic_constraints: ``Optional[Common.CertificateExtension.BasicConstraints]``
:param basic_constraints: Basic Constraints
:type signed_certificate_timestamps: ``Optional[List[Common.CertificateExtension.SignedCertificateTimestamp]]``
:param signed_certificate_timestamps: (PreCertificate)Signed Certificate Timestamps
:type value: ``Optional[Union[str, List[Any], Dict[str, Any]]]``
:param value: Raw value of the Extension (used for "Other" type)
:return: None
:rtype: ``None``
"""
class SubjectAlternativeName(object):
"""
SubjectAlternativeName class
Implements Subject Alternative Name extension interface
:type gn: ``Optional[Common.GeneralName]``
:param gn: General Name Type provided as Common.GeneralName
:type gn_type: ``Optional[str]``
:param gn_type: General Name Type provided as string
:type gn_value: ``Optional[str]``
:param gn_value: General Name Value provided as string
:return: None
:rtype: ``None``
"""
def __init__(
self,
gn=None, # type: Optional[Common.GeneralName]
gn_type=None, # type: Optional[str]
gn_value=None # type: Optional[str]
):
if gn:
self.gn = gn
elif gn_type and gn_value:
self.gn = Common.GeneralName(
gn_value=gn_value,
gn_type=gn_type
)
else:
raise ValueError('either GeneralName or gn_type/gn_value required to inizialize SubjectAlternativeName')
def to_context(self):
return self.gn.to_context()
def get_value(self):
return self.gn.get_value()
class AuthorityKeyIdentifier(object):
"""
AuthorityKeyIdentifier class
Implements Authority Key Identifier extension interface
:type issuer: ``Optional[List[Common.GeneralName]]``
:param issuer: Issuer list
:type serial_number: ``Optional[str]``
:param serial_number: Serial Number
:type key_identifier: ``Optional[str]``
:param key_identifier: Key Identifier
:return: None
:rtype: ``None``
"""
def __init__(
self,
issuer=None, # type: Optional[List[Common.GeneralName]]
serial_number=None, # type: Optional[str]
key_identifier=None # type: Optional[str]
):
self.issuer = issuer
self.serial_number = serial_number
self.key_identifier = key_identifier
def to_context(self):
authority_key_identifier_context = {} # type: Dict[str, Any]
if self.issuer:
authority_key_identifier_context['Issuer'] = self.issuer,
if self.serial_number:
authority_key_identifier_context["SerialNumber"] = self.serial_number
if self.key_identifier:
authority_key_identifier_context["KeyIdentifier"] = self.key_identifier
return authority_key_identifier_context
class DistributionPoint(object):
"""
DistributionPoint class
Implements Distribution Point extension interface
:type full_name: ``Optional[List[Common.GeneralName]]``
:param full_name: Full Name list
:type relative_name: ``Optional[str]``
:param relative_name: Relative Name
:type crl_issuer: ``Optional[List[Common.GeneralName]]``
:param crl_issuer: CRL Issuer
:type reasons: ``Optional[List[str]]``
:param reasons: Reason list
:return: None
:rtype: ``None``
"""
def __init__(
self,
full_name=None, # type: Optional[List[Common.GeneralName]]
relative_name=None, # type: Optional[str]
crl_issuer=None, # type: Optional[List[Common.GeneralName]]
reasons=None # type: Optional[List[str]]
):
self.full_name = full_name
self.relative_name = relative_name
self.crl_issuer = crl_issuer
self.reasons = reasons
def to_context(self):
distribution_point_context = {} # type: Dict[str, Union[List, str]]
if self.full_name:
distribution_point_context["FullName"] = [fn.to_context() for fn in self.full_name]
if self.relative_name:
distribution_point_context["RelativeName"] = self.relative_name
if self.crl_issuer:
distribution_point_context["CRLIssuer"] = [ci.to_context() for ci in self.crl_issuer]
if self.reasons:
distribution_point_context["Reasons"] = self.reasons
return distribution_point_context
class CertificatePolicy(object):
"""
CertificatePolicy class
Implements Certificate Policy extension interface
:type policy_identifier: ``str``
:param policy_identifier: Policy Identifier
:type policy_qualifiers: ``Optional[List[str]]``
:param policy_qualifiers: Policy Qualifier list
:return: None
:rtype: ``None``
"""
def __init__(
self,
policy_identifier, # type: str
policy_qualifiers=None # type: Optional[List[str]]
):
self.policy_identifier = policy_identifier
self.policy_qualifiers = policy_qualifiers
def to_context(self):
certificate_policies_context = {
"PolicyIdentifier": self.policy_identifier
} # type: Dict[str, Union[List, str]]
if self.policy_qualifiers:
certificate_policies_context["PolicyQualifiers"] = self.policy_qualifiers
return certificate_policies_context
class AuthorityInformationAccess(object):
"""
AuthorityInformationAccess class
Implements Authority Information Access extension interface
:type access_method: ``str``
:param access_method: Access Method
:type access_location: ``Common.GeneralName``
:param access_location: Access Location
:return: None
:rtype: ``None``
"""
def __init__(
self,
access_method, # type: str
access_location # type: Common.GeneralName
):
self.access_method = access_method
self.access_location = access_location
def to_context(self):
return {
"AccessMethod": self.access_method,
"AccessLocation": self.access_location.to_context()
}
class BasicConstraints(object):
"""
BasicConstraints class
Implements Basic Constraints extension interface
:type ca: ``bool``
:param ca: Certificate Authority
:type path_length: ``int``
:param path_length: Path Length
:return: None
:rtype: ``None``
"""
def __init__(
self,
ca, # type: bool
path_length=None # type: int
):
self.ca = ca
self.path_length = path_length
def to_context(self):
basic_constraints_context = {
"CA": self.ca
} # type: Dict[str, Union[str, int]]
if self.path_length:
basic_constraints_context["PathLength"] = self.path_length
return basic_constraints_context
class SignedCertificateTimestamp(object):
"""
SignedCertificateTimestamp class
Implementsinterface for "SignedCertificateTimestamp" extensions
:type entry_type: ``str``
:param entry_type: Entry Type (from Common.CertificateExtension.SignedCertificateTimestamp.EntryType enum)
:type version: ``str``
:param version: Version
:type log_id: ``str``
:param log_id: Log ID
:type timestamp: ``str``
:param timestamp: Timestamp (ISO8601 string representation in UTC)
:return: None
:rtype: ``None``
"""
class EntryType(object):
"""
EntryType class
Enumerates Entry Types for SignedCertificateTimestamp class
:return: None
:rtype: ``None``
"""
PRECERTIFICATE = "PreCertificate"
X509CERTIFICATE = "X509Certificate"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificateExtension.SignedCertificateTimestamp.EntryType.PRECERTIFICATE,
Common.CertificateExtension.SignedCertificateTimestamp.EntryType.X509CERTIFICATE
)
def __init__(
self,
entry_type, # type: str
version, # type: int
log_id, # type: str
timestamp # type: str
):
if not Common.CertificateExtension.SignedCertificateTimestamp.EntryType.is_valid_type(entry_type):
raise TypeError(
'entry_type must be of type Common.CertificateExtension.SignedCertificateTimestamp.EntryType enum'
)
self.entry_type = entry_type
self.version = version
self.log_id = log_id
self.timestamp = timestamp
def to_context(self):
timestamps_context = {} # type: Dict[str, Any]
timestamps_context['Version'] = self.version
timestamps_context["LogId"] = self.log_id
timestamps_context["Timestamp"] = self.timestamp
timestamps_context["EntryType"] = self.entry_type
return timestamps_context
class ExtensionType(object):
"""
ExtensionType class
Enumerates Extension Types for Common.CertificatExtension class
:return: None
:rtype: ``None``
"""
SUBJECTALTERNATIVENAME = "SubjectAlternativeName"
AUTHORITYKEYIDENTIFIER = "AuthorityKeyIdentifier"
SUBJECTKEYIDENTIFIER = "SubjectKeyIdentifier"
KEYUSAGE = "KeyUsage"
EXTENDEDKEYUSAGE = "ExtendedKeyUsage"
CRLDISTRIBUTIONPOINTS = "CRLDistributionPoints"
CERTIFICATEPOLICIES = "CertificatePolicies"
AUTHORITYINFORMATIONACCESS = "AuthorityInformationAccess"
BASICCONSTRAINTS = "BasicConstraints"
SIGNEDCERTIFICATETIMESTAMPS = "SignedCertificateTimestamps"
PRESIGNEDCERTIFICATETIMESTAMPS = "PreCertSignedCertificateTimestamps"
OTHER = "Other"
@staticmethod
def is_valid_type(_type):
return _type in (
Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME,
Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER,
Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER,
Common.CertificateExtension.ExtensionType.KEYUSAGE,
Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE,
Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS,
Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES,
Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS,
Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS,
Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.OTHER # for extensions that are not handled explicitly
)
def __init__(
self,
extension_type, # type: str
critical, # type: bool
oid=None, # type: Optional[str]
extension_name=None, # type: Optional[str]
subject_alternative_names=None, # type: Optional[List[Common.CertificateExtension.SubjectAlternativeName]]
authority_key_identifier=None, # type: Optional[Common.CertificateExtension.AuthorityKeyIdentifier]
digest=None, # type: str
digital_signature=None, # type: Optional[bool]
content_commitment=None, # type: Optional[bool]
key_encipherment=None, # type: Optional[bool]
data_encipherment=None, # type: Optional[bool]
key_agreement=None, # type: Optional[bool]
key_cert_sign=None, # type: Optional[bool]
crl_sign=None, # type: Optional[bool]
usages=None, # type: Optional[List[str]]
distribution_points=None, # type: Optional[List[Common.CertificateExtension.DistributionPoint]]
certificate_policies=None, # type: Optional[List[Common.CertificateExtension.CertificatePolicy]]
authority_information_access=None, # type: Optional[List[Common.CertificateExtension.AuthorityInformationAccess]]
basic_constraints=None, # type: Optional[Common.CertificateExtension.BasicConstraints]
signed_certificate_timestamps=None, # type: Optional[List[Common.CertificateExtension.SignedCertificateTimestamp]]
value=None # type: Optional[Union[str, List[Any], Dict[str, Any]]]
):
if not Common.CertificateExtension.ExtensionType.is_valid_type(extension_type):
raise TypeError('algorithm must be of type Common.CertificateExtension.ExtensionType enum')
self.extension_type = extension_type
self.critical = critical
if self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME:
self.subject_alternative_names = subject_alternative_names
self.oid = "2.5.29.17"
self.extension_name = "subjectAltName"
elif self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER:
if not digest:
raise ValueError('digest is mandatory for SubjectKeyIdentifier extension')
self.digest = digest
self.oid = "2.5.29.14"
self.extension_name = "subjectKeyIdentifier"
elif self.extension_type == Common.CertificateExtension.ExtensionType.KEYUSAGE:
self.digital_signature = digital_signature
self.content_commitment = content_commitment
self.key_encipherment = key_encipherment
self.data_encipherment = data_encipherment
self.key_agreement = key_agreement
self.key_cert_sign = key_cert_sign
self.crl_sign = crl_sign
self.oid = "2.5.29.15"
self.extension_name = "keyUsage"
elif self.extension_type == Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE:
if not usages:
raise ValueError('usages is mandatory for ExtendedKeyUsage extension')
self.usages = usages
self.oid = "2.5.29.37"
self.extension_name = "extendedKeyUsage"
elif self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER:
self.authority_key_identifier = authority_key_identifier
self.oid = "2.5.29.35"
self.extension_name = "authorityKeyIdentifier"
elif self.extension_type == Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS:
self.distribution_points = distribution_points
self.oid = "2.5.29.31"
self.extension_name = "cRLDistributionPoints"
elif self.extension_type == Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES:
self.certificate_policies = certificate_policies
self.oid = "2.5.29.32"
self.extension_name = "certificatePolicies"
elif self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS:
self.authority_information_access = authority_information_access
self.oid = "1.3.6.1.5.5.7.1.1"
self.extension_name = "authorityInfoAccess"
elif self.extension_type == Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS:
self.basic_constraints = basic_constraints
self.oid = "2.5.29.19"
self.extension_name = "basicConstraints"
elif self.extension_type == Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS:
self.signed_certificate_timestamps = signed_certificate_timestamps
self.oid = "1.3.6.1.4.1.11129.2.4.2"
self.extension_name = "signedCertificateTimestampList"
elif self.extension_type == Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS:
self.signed_certificate_timestamps = signed_certificate_timestamps
self.oid = "1.3.6.1.4.1.11129.2.4.5"
self.extension_name = "signedCertificateTimestampList"
elif self.extension_type == Common.CertificateExtension.ExtensionType.OTHER:
self.value = value
# override oid, extension_name if provided as inputs
if oid:
self.oid = oid
if extension_name:
self.extension_name = extension_name
def to_context(self):
extension_context = {
"OID": self.oid,
"Name": self.extension_name,
"Critical": self.critical
} # type: Dict[str, Any]
if (
self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME
and self.subject_alternative_names is not None
):
extension_context["Value"] = [san.to_context() for san in self.subject_alternative_names]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYKEYIDENTIFIER
and self.authority_key_identifier is not None
):
extension_context["Value"] = self.authority_key_identifier.to_context()
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTKEYIDENTIFIER
and self.digest is not None
):
extension_context["Value"] = {
"Digest": self.digest
}
elif self.extension_type == Common.CertificateExtension.ExtensionType.KEYUSAGE:
key_usage = {} # type: Dict[str, bool]
if self.digital_signature:
key_usage["DigitalSignature"] = self.digital_signature
if self.content_commitment:
key_usage["ContentCommitment"] = self.content_commitment
if self.key_encipherment:
key_usage["KeyEncipherment"] = self.key_encipherment
if self.data_encipherment:
key_usage["DataEncipherment"] = self.data_encipherment
if self.key_agreement:
key_usage["KeyAgreement"] = self.key_agreement
if self.key_cert_sign:
key_usage["KeyCertSign"] = self.key_cert_sign
if self.crl_sign:
key_usage["CrlSign"] = self.crl_sign
if key_usage:
extension_context["Value"] = key_usage
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.EXTENDEDKEYUSAGE
and self.usages is not None
):
extension_context["Value"] = {
"Usages": [u for u in self.usages]
}
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.CRLDISTRIBUTIONPOINTS
and self.distribution_points is not None
):
extension_context["Value"] = [dp.to_context() for dp in self.distribution_points]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.CERTIFICATEPOLICIES
and self.certificate_policies is not None
):
extension_context["Value"] = [cp.to_context() for cp in self.certificate_policies]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.AUTHORITYINFORMATIONACCESS
and self.authority_information_access is not None
):
extension_context["Value"] = [aia.to_context() for aia in self.authority_information_access]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.BASICCONSTRAINTS
and self.basic_constraints is not None
):
extension_context["Value"] = self.basic_constraints.to_context()
elif (
self.extension_type in [
Common.CertificateExtension.ExtensionType.SIGNEDCERTIFICATETIMESTAMPS,
Common.CertificateExtension.ExtensionType.PRESIGNEDCERTIFICATETIMESTAMPS
]
and self.signed_certificate_timestamps is not None
):
extension_context["Value"] = [sct.to_context() for sct in self.signed_certificate_timestamps]
elif (
self.extension_type == Common.CertificateExtension.ExtensionType.OTHER
and self.value is not None
):
extension_context["Value"] = self.value
return extension_context
class Certificate(Indicator):
"""
Implements the X509 Certificate interface
Certificate indicator - https://xsoar.pan.dev/docs/integrations/context-standards-mandatory#certificate
:type subject_dn: ``str``
:param subject_dn: Subject Distinguished Name
:type dbot_score: ``DBotScore``
:param dbot_score: If Certificate has a score then create and set a DBotScore object.
:type name: ``Optional[Union[str, List[str]]]``
:param name: Name (if not provided output is calculated from SubjectDN and SAN)
:type issuer_dn: ``Optional[str]``
:param issuer_dn: Issuer Distinguished Name
:type serial_number: ``Optional[str]``
:param serial_number: Serial Number
:type validity_not_after: ``Optional[str]``
:param validity_not_after: Certificate Expiration Timestamp (ISO8601 string representation)
:type validity_not_before: ``Optional[str]``
:param validity_not_before: Initial Certificate Validity Timestamp (ISO8601 string representation)
:type sha512: ``Optional[str]``
:param sha512: The SHA-512 hash of the certificate in binary encoded format (DER)
:type sha256: ``Optional[str]``
:param sha256: The SHA-256 hash of the certificate in binary encoded format (DER)
:type sha1: ``Optional[str]``
:param sha1: The SHA-1 hash of the certificate in binary encoded format (DER)
:type md5: ``Optional[str]``
:param md5: The MD5 hash of the certificate in binary encoded format (DER)
:type publickey: ``Optional[Common.CertificatePublicKey]``
:param publickey: Certificate Public Key
:type spki_sha256: ``Optional[str]``
:param sha1: The SHA-256 hash of the SPKI
:type signature_algorithm: ``Optional[str]``
:param signature_algorithm: Signature Algorithm
:type signature: ``Optional[str]``
:param signature: Certificate Signature
:type subject_alternative_name: \
``Optional[List[Union[str,Dict[str, str],Common.CertificateExtension.SubjectAlternativeName]]]``
:param subject_alternative_name: Subject Alternative Name list
:type extensions: ``Optional[List[Common.CertificateExtension]]`
:param extensions: Certificate Extension List
:type pem: ``Optional[str]``
:param pem: PEM encoded certificate
:return: None
:rtype: ``None``
"""
CONTEXT_PATH = 'Certificate(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || ' \
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512)'
def __init__(
self,
subject_dn, # type: str
dbot_score=None, # type: Optional[Common.DBotScore]
name=None, # type: Optional[Union[str, List[str]]]
issuer_dn=None, # type: Optional[str]
serial_number=None, # type: Optional[str]
validity_not_after=None, # type: Optional[str]
validity_not_before=None, # type: Optional[str]
sha512=None, # type: Optional[str]
sha256=None, # type: Optional[str]
sha1=None, # type: Optional[str]
md5=None, # type: Optional[str]
publickey=None, # type: Optional[Common.CertificatePublicKey]
spki_sha256=None, # type: Optional[str]
signature_algorithm=None, # type: Optional[str]
signature=None, # type: Optional[str]
subject_alternative_name=None, \
# type: Optional[List[Union[str,Dict[str, str],Common.CertificateExtension.SubjectAlternativeName]]]
extensions=None, # type: Optional[List[Common.CertificateExtension]]
pem=None # type: Optional[str]
):
self.subject_dn = subject_dn
self.dbot_score = dbot_score
self.name = None
if name:
if isinstance(name, str):
self.name = [name]
elif isinstance(name, list):
self.name = name
else:
raise TypeError('certificate name must be of type str or List[str]')
self.issuer_dn = issuer_dn
self.serial_number = serial_number
self.validity_not_after = validity_not_after
self.validity_not_before = validity_not_before
self.sha512 = sha512
self.sha256 = sha256
self.sha1 = sha1
self.md5 = md5
if publickey and not isinstance(publickey, Common.CertificatePublicKey):
raise TypeError('publickey must be of type Common.CertificatePublicKey')
self.publickey = publickey
self.spki_sha256 = spki_sha256
self.signature_algorithm = signature_algorithm
self.signature = signature
# if subject_alternative_name is set and is a list
# make sure it is a list of strings, dicts of strings or SAN Extensions
if (
subject_alternative_name
and isinstance(subject_alternative_name, list)
and not all(
isinstance(san, str)
or isinstance(san, dict)
or isinstance(san, Common.CertificateExtension.SubjectAlternativeName)
for san in subject_alternative_name)
):
raise TypeError(
'subject_alternative_name must be list of str or Common.CertificateExtension.SubjectAlternativeName'
)
self.subject_alternative_name = subject_alternative_name
if (
extensions
and not isinstance(extensions, list)
and any(isinstance(e, Common.CertificateExtension) for e in extensions)
):
raise TypeError('extensions must be of type List[Common.CertificateExtension]')
self.extensions = extensions
self.pem = pem
if not isinstance(dbot_score, Common.DBotScore):
raise ValueError('dbot_score must be of type DBotScore')
def to_context(self):
certificate_context = {
"SubjectDN": self.subject_dn
} # type: Dict[str, Any]
san_list = [] # type: List[Dict[str, str]]
if self.subject_alternative_name:
for san in self.subject_alternative_name:
if isinstance(san, str):
san_list.append({
'Value': san
})
elif isinstance(san, dict):
san_list.append(san)
elif(isinstance(san, Common.CertificateExtension.SubjectAlternativeName)):
san_list.append(san.to_context())
elif self.extensions: # autogenerate it from extensions
for ext in self.extensions:
if (
ext.extension_type == Common.CertificateExtension.ExtensionType.SUBJECTALTERNATIVENAME
and ext.subject_alternative_names is not None
):
for san in ext.subject_alternative_names:
san_list.append(san.to_context())
if san_list:
certificate_context['SubjectAlternativeName'] = san_list
if self.name:
certificate_context["Name"] = self.name
else: # autogenerate it
name = set() # type: Set[str]
# add subject alternative names
if san_list:
name = set([
sn['Value'] for sn in san_list
if (
'Value' in sn
and (
'Type' not in sn
or sn['Type'] in (Common.GeneralName.DNSNAME, Common.GeneralName.IPADDRESS)
)
)
])
# subject_dn is RFC4515 escaped
# replace \, and \+ with the long escaping \2c and \2b
long_escaped_subject_dn = self.subject_dn.replace("\\,", "\\2c")
long_escaped_subject_dn = long_escaped_subject_dn.replace("\\+", "\\2b")
# we then split RDN (separated by ,) and multi-valued RDN (sep by +)
rdns = long_escaped_subject_dn.replace('+', ',').split(',')
cn = next((rdn for rdn in rdns if rdn.startswith('CN=')), None)
if cn:
name.add(cn.split('=', 1)[-1])
if name:
certificate_context["Name"] = sorted(list(name))
if self.issuer_dn:
certificate_context["IssuerDN"] = self.issuer_dn
if self.serial_number:
certificate_context["SerialNumber"] = self.serial_number
if self.validity_not_before:
certificate_context["ValidityNotBefore"] = self.validity_not_before
if self.validity_not_after:
certificate_context["ValidityNotAfter"] = self.validity_not_after
if self.sha512:
certificate_context["SHA512"] = self.sha512
if self.sha256:
certificate_context["SHA256"] = self.sha256
if self.sha1:
certificate_context["SHA1"] = self.sha1
if self.md5:
certificate_context["MD5"] = self.md5
if self.publickey and isinstance(self.publickey, Common.CertificatePublicKey):
certificate_context["PublicKey"] = self.publickey.to_context()
if self.spki_sha256:
certificate_context["SPKISHA256"] = self.spki_sha256
sig = {} # type: Dict[str, str]
if self.signature_algorithm:
sig["Algorithm"] = self.signature_algorithm
if self.signature:
sig["Signature"] = self.signature
if sig:
certificate_context["Signature"] = sig
if self.extensions:
certificate_context["Extension"] = [e.to_context() for e in self.extensions]
if self.pem:
certificate_context["PEM"] = self.pem
if self.dbot_score and self.dbot_score.score == Common.DBotScore.BAD:
certificate_context['Malicious'] = {
'Vendor': self.dbot_score.integration_name,
'Description': self.dbot_score.malicious_description
}
ret_value = {
Common.Certificate.CONTEXT_PATH: certificate_context
}
if self.dbot_score:
ret_value.update(self.dbot_score.to_context())
return ret_value
class ScheduledCommand:
"""
ScheduledCommand configuration class
Holds the scheduled command configuration for the command result - managing the way the command should be polled.
:type command: ``str``
:param command: The command that'll run after next_run_in_seconds has passed.
:type next_run_in_seconds: ``int``
:param next_run_in_seconds: How long to wait before executing the command.
:type args: ``Optional[Dict[str, Any]]``
:param args: Arguments to use when executing the command.
:type timeout_in_seconds: ``Optional[int]``
:param timeout_in_seconds: Number of seconds until the polling sequence will timeout.
:return: None
:rtype: ``None``
"""
VERSION_MISMATCH_ERROR = 'This command is not supported by this XSOAR server version. Please update your server ' \
'version to 6.2.0 or later.'
def __init__(
self,
command, # type: str
next_run_in_seconds, # type: int
args=None, # type: Optional[Dict[str, Any]]
timeout_in_seconds=None, # type: Optional[int]
):
self.raise_error_if_not_supported()
self._command = command
if next_run_in_seconds < 10:
demisto.info('ScheduledCommandConfiguration provided value for next_run_in_seconds: '
'{} is '.format(next_run_in_seconds) + 'too low - minimum interval is 10 seconds. '
'next_run_in_seconds was set to 10 seconds.')
next_run_in_seconds = 10
self._next_run = str(next_run_in_seconds)
self._args = args
self._timeout = str(timeout_in_seconds) if timeout_in_seconds else None
@staticmethod
def raise_error_if_not_supported():
if not is_demisto_version_ge('6.2.0'):
raise DemistoException(ScheduledCommand.VERSION_MISMATCH_ERROR)
def to_results(self):
"""
Returns the result dictionary of the polling command
"""
return assign_params(
PollingCommand=self._command,
NextRun=self._next_run,
PollingArgs=self._args,
Timeout=self._timeout
)
def camelize_string(src_str, delim='_', upper_camel=True):
"""
Transform snake_case to CamelCase
:type src_str: ``str``
:param src_str: snake_case string to convert.
:type delim: ``str``
:param delim: indicator category.
:type upper_camel: ``bool``
:param upper_camel: When True then transforms string to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: A CammelCase string.
:rtype: ``str``
"""
if not src_str: # empty string
return ""
components = src_str.split(delim)
camelize_without_first_char = ''.join(map(lambda x: x.title(), components[1:]))
if upper_camel:
return components[0].title() + camelize_without_first_char
else:
return components[0].lower() + camelize_without_first_char
class IndicatorsTimeline:
"""
IndicatorsTimeline class - use to return Indicator Timeline object to be used in CommandResults
:type indicators: ``list``
:param indicators: expects a list of indicators.
:type category: ``str``
:param category: indicator category.
:type message: ``str``
:param message: indicator message.
:return: None
:rtype: ``None``
"""
def __init__(self, indicators=None, category=None, message=None):
# type: (list, str, str) -> None
if indicators is None:
indicators = []
# check if we are running from an integration or automation
try:
_ = demisto.params()
default_category = 'Integration Update'
except AttributeError:
default_category = 'Automation Update'
timelines = []
timeline = {}
for indicator in indicators:
timeline['Value'] = indicator
if category:
timeline['Category'] = category
else:
timeline['Category'] = default_category
if message:
timeline['Message'] = message
timelines.append(timeline)
self.indicators_timeline = timelines
def arg_to_number(arg, arg_name=None, required=False):
# type: (Any, Optional[str], bool) -> Optional[int]
"""Converts an XSOAR argument to a Python int
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``int`` type. It will throw a ValueError
if the input is invalid. If the input is None, it will throw a ValueError
if required is ``True``, or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:return:
returns an ``int`` if arg can be converted
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[int]``
"""
if arg is None or arg == '':
if required is True:
if arg_name:
raise ValueError('Missing "{}"'.format(arg_name))
else:
raise ValueError('Missing required argument')
return None
if isinstance(arg, str):
if arg.isdigit():
return int(arg)
try:
return int(float(arg))
except Exception:
if arg_name:
raise ValueError('Invalid number: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid number'.format(arg))
if isinstance(arg, int):
return arg
if arg_name:
raise ValueError('Invalid number: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid number'.format(arg))
def arg_to_datetime(arg, arg_name=None, is_utc=True, required=False, settings=None):
# type: (Any, Optional[str], bool, bool, dict) -> Optional[datetime]
"""Converts an XSOAR argument to a datetime
This function is used to quickly validate an argument provided to XSOAR
via ``demisto.args()`` into an ``datetime``. It will throw a ValueError if the input is invalid.
If the input is None, it will throw a ValueError if required is ``True``,
or ``None`` if required is ``False.
:type arg: ``Any``
:param arg: argument to convert
:type arg_name: ``str``
:param arg_name: argument name
:type is_utc: ``bool``
:param is_utc: if True then date converted as utc timezone, otherwise will convert with local timezone.
:type required: ``bool``
:param required:
throws exception if ``True`` and argument provided is None
:type settings: ``dict``
:param settings: If provided, passed to dateparser.parse function.
:return:
returns an ``datetime`` if conversion works
returns ``None`` if arg is ``None`` and required is set to ``False``
otherwise throws an Exception
:rtype: ``Optional[datetime]``
"""
if arg is None:
if required is True:
if arg_name:
raise ValueError('Missing "{}"'.format(arg_name))
else:
raise ValueError('Missing required argument')
return None
if isinstance(arg, str) and arg.isdigit() or isinstance(arg, (int, float)):
# timestamp is a str containing digits - we just convert it to int
ms = float(arg)
if ms > 2000000000.0:
# in case timestamp was provided as unix time (in milliseconds)
ms = ms / 1000.0
if is_utc:
return datetime.utcfromtimestamp(ms).replace(tzinfo=timezone.utc)
else:
return datetime.fromtimestamp(ms)
if isinstance(arg, str):
# we use dateparser to handle strings either in ISO8601 format, or
# relative time stamps.
# For example: format 2019-10-23T00:00:00 or "3 days", etc
if settings:
date = dateparser.parse(arg, settings=settings)
else:
date = dateparser.parse(arg, settings={'TIMEZONE': 'UTC'})
if date is None:
# if d is None it means dateparser failed to parse it
if arg_name:
raise ValueError('Invalid date: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid date'.format(arg))
return date
if arg_name:
raise ValueError('Invalid date: "{}"="{}"'.format(arg_name, arg))
else:
raise ValueError('"{}" is not a valid date'.format(arg))
# -------------------------------- Relationships----------------------------------- #
class EntityRelationship:
"""
XSOAR entity relationship.
:type name: ``str``
:param name: Relationship name.
:type relationship_type: ``str``
:param relationship_type: Relationship type. (e.g. IndicatorToIndicator...).
:type entity_a: ``str``
:param entity_a: A value, A aka source of the relationship.
:type entity_a_family: ``str``
:param entity_a_family: Entity family of A, A aka source of the relationship. (e.g. Indicator...)
:type entity_a_type: ``str``
:param entity_a_type: Entity A type, A aka source of the relationship. (e.g. IP/URL/...).
:type entity_b: ``str``
:param entity_b: B value, B aka destination of the relationship.
:type entity_b_family: ``str``
:param entity_b_family: Entity family of B, B aka destination of the relationship. (e.g. Indicator...)
:type entity_b_type: ``str``
:param entity_b_type: Entity B type, B aka destination of the relationship. (e.g. IP/URL/...).
:type source_reliability: ``str``
:param source_reliability: Source reliability.
:type fields: ``dict``
:param fields: Custom fields. (Optional)
:type brand: ``str``
:param brand: Source brand name. (Optional)
:return: None
:rtype: ``None``
"""
class RelationshipsTypes(object):
"""
Relationships Types objects.
:return: None
:rtype: ``None``
"""
# dict which keys is a relationship type and the value is the reverse type.
RELATIONSHIP_TYPES = ['IndicatorToIndicator']
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in EntityRelationship.RelationshipsTypes.RELATIONSHIP_TYPES
class RelationshipsFamily(object):
"""
Relationships Family object list.
:return: None
:rtype: ``None``
"""
INDICATOR = ["Indicator"]
@staticmethod
def is_valid_type(_type):
# type: (str) -> bool
return _type in EntityRelationship.RelationshipsFamily.INDICATOR
class Relationships(object):
"""
Enum: Relations names and their reverse
:return: None
:rtype: ``None``
"""
APPLIED = 'applied'
ATTACHMENT_OF = 'attachment-of'
ATTACHES = 'attaches'
ATTRIBUTE_OF = 'attribute-of'
ATTRIBUTED_BY = 'attributed-by'
ATTRIBUTED_TO = 'attributed-to'
AUTHORED_BY = 'authored-by'
BEACONS_TO = 'beacons-to'
BUNDLED_IN = 'bundled-in'
BUNDLES = 'bundles'
COMMUNICATED_WITH = 'communicated-with'
COMMUNICATED_BY = 'communicated-by'
COMMUNICATES_WITH = 'communicates-with'
COMPROMISES = 'compromises'
CONTAINS = 'contains'
CONTROLS = 'controls'
CREATED_BY = 'created-by'
CREATES = 'creates'
DELIVERED_BY = 'delivered-by'
DELIVERS = 'delivers'
DOWNLOADS = 'downloads'
DOWNLOADS_FROM = 'downloads-from'
DROPPED_BY = 'dropped-by'
DROPS = 'drops'
DUPLICATE_OF = 'duplicate-of'
EMBEDDED_IN = 'embedded-in'
EMBEDS = 'embeds'
EXECUTED = 'executed'
EXECUTED_BY = 'executed-by'
EXFILTRATES_TO = 'exfiltrates-to'
EXPLOITS = 'exploits'
HAS = 'has'
HOSTED_ON = 'hosted-on'
HOSTS = 'hosts'
IMPERSONATES = 'impersonates'
INDICATED_BY = 'indicated-by'
INDICATOR_OF = 'indicator-of'
INJECTED_FROM = 'injected-from'
INJECTS_INTO = 'injects-into'
INVESTIGATES = 'investigates'
IS_ALSO = 'is-also'
MITIGATED_BY = 'mitigated-by'
MITIGATES = 'mitigates'
ORIGINATED_FROM = 'originated-from'
OWNED_BY = 'owned-by'
OWNS = 'owns'
PART_OF = 'part-of'
RELATED_TO = 'related-to'
REMEDIATES = 'remediates'
RESOLVED_BY = 'resolved-by'
RESOLVED_FROM = 'resolved-from'
RESOLVES_TO = 'resolves-to'
SEEN_ON = 'seen-on'
SENT = 'sent'
SENT_BY = 'sent-by'
SENT_FROM = 'sent-from'
SENT_TO = 'sent-to'
SIMILAR_TO = 'similar-to'
SUB_DOMAIN_OF = 'sub-domain-of'
SUB_TECHNIQUE_OF = 'subtechnique-of'
PARENT_TECHNIQUE_OF = 'parent-technique-of'
SUPRA_DOMAIN_OF = 'supra-domain-of'
TARGETED_BY = 'targeted-by'
TARGETS = 'targets'
TYPES = 'Types'
UPLOADED_TO = 'uploaded-to'
USED_BY = 'used-by'
USED_ON = 'used-on'
USES = 'uses'
VARIANT_OF = 'variant-of'
RELATIONSHIPS_NAMES = {'applied': 'applied-on',
'attachment-of': 'attaches',
'attaches': 'attachment-of',
'attribute-of': 'owns',
'attributed-by': 'attributed-to',
'attributed-to': 'attributed-by',
'authored-by': 'author-of',
'beacons-to': 'communicated-by',
'bundled-in': 'bundles',
'bundles': 'bundled-in',
'communicated-with': 'communicated-by',
'communicated-by': 'communicates-with',
'communicates-with': 'communicated-by',
'compromises': 'compromised-by',
'contains': 'part-of',
'controls': 'controlled-by',
'created-by': 'creates',
'creates': 'created-by',
'delivered-by': 'delivers',
'delivers': 'delivered-by',
'downloads': 'downloaded-by',
'downloads-from': 'hosts',
'dropped-by': 'drops',
'drops': 'dropped-by',
'duplicate-of': 'duplicate-of',
'embedded-in': 'embeds',
'embeds': 'embedded-on',
'executed': 'executed-by',
'executed-by': 'executes',
'exfiltrates-to': 'exfiltrated-from',
'exploits': 'exploited-by',
'has': 'seen-on',
'hosted-on': 'hosts',
'hosts': 'hosted-on',
'impersonates': 'impersonated-by',
'indicated-by': 'indicator-of',
'indicator-of': 'indicated-by',
'injected-from': 'injects-into',
'injects-into': 'injected-from',
'investigates': 'investigated-by',
'is-also': 'is-also',
'mitigated-by': 'mitigates',
'mitigates': 'mitigated-by',
'originated-from': 'source-of',
'owned-by': 'owns',
'owns': 'owned-by',
'part-of': 'contains',
'related-to': 'related-to',
'remediates': 'remediated-by',
'resolved-by': 'resolves-to',
'resolved-from': 'resolves-to',
'resolves-to': 'resolved-from',
'seen-on': 'has',
'sent': 'attached-to',
'sent-by': 'sent',
'sent-from': 'received-by',
'sent-to': 'received-by',
'similar-to': 'similar-to',
'sub-domain-of': 'supra-domain-of',
'supra-domain-of': 'sub-domain-of',
'subtechnique-of': 'parent-technique-of',
'parent-technique-of': 'subtechnique-of',
'targeted-by': 'targets',
'targets': 'targeted-by',
'Types': 'Reverse',
'uploaded-to': 'hosts',
'used-by': 'uses',
'used-on': 'targeted-by',
'uses': 'used-by',
'variant-of': 'variant-of'}
@staticmethod
def is_valid(_type):
# type: (str) -> bool
return _type in EntityRelationship.Relationships.RELATIONSHIPS_NAMES.keys()
@staticmethod
def get_reverse(name):
# type: (str) -> str
return EntityRelationship.Relationships.RELATIONSHIPS_NAMES[name]
def __init__(self, name, entity_a, entity_a_type, entity_b, entity_b_type,
reverse_name='', relationship_type='IndicatorToIndicator', entity_a_family='Indicator',
entity_b_family='Indicator', source_reliability="", fields=None, brand=""):
# Relationship
if not EntityRelationship.Relationships.is_valid(name):
raise ValueError("Invalid relationship: " + name)
self._name = name
if reverse_name:
if not EntityRelationship.Relationships.is_valid(reverse_name):
raise ValueError("Invalid reverse relationship: " + reverse_name)
self._reverse_name = reverse_name
else:
self._reverse_name = EntityRelationship.Relationships.get_reverse(name)
if not EntityRelationship.RelationshipsTypes.is_valid_type(relationship_type):
raise ValueError("Invalid relationship type: " + relationship_type)
self._relationship_type = relationship_type
# Entity A - Source
self._entity_a = entity_a
self._entity_a_type = entity_a_type
if not EntityRelationship.RelationshipsFamily.is_valid_type(entity_a_family):
raise ValueError("Invalid entity A Family type: " + entity_a_family)
self._entity_a_family = entity_a_family
# Entity B - Destination
if not entity_b:
demisto.info(
"WARNING: Invalid entity B - Relationships will not be created to entity A {} with relationship name {}".format(
str(entity_a), str(name)))
self._entity_b = entity_b
self._entity_b_type = entity_b_type
if not EntityRelationship.RelationshipsFamily.is_valid_type(entity_b_family):
raise ValueError("Invalid entity B Family type: " + entity_b_family)
self._entity_b_family = entity_b_family
# Custom fields
if fields:
self._fields = fields
else:
self._fields = {}
# Source
if brand:
self._brand = brand
else:
self._brand = ''
if source_reliability:
if not DBotScoreReliability.is_valid_type(source_reliability):
raise ValueError("Invalid source reliability value", source_reliability)
self._source_reliability = source_reliability
else:
self._source_reliability = ''
def to_entry(self):
""" Convert object to XSOAR entry
:rtype: ``dict``
:return: XSOAR entry representation.
"""
entry = {}
if self._entity_b:
entry = {
"name": self._name,
"reverseName": self._reverse_name,
"type": self._relationship_type,
"entityA": self._entity_a,
"entityAFamily": self._entity_a_family,
"entityAType": self._entity_a_type,
"entityB": self._entity_b,
"entityBFamily": self._entity_b_family,
"entityBType": self._entity_b_type,
"fields": self._fields,
}
if self._source_reliability:
entry["reliability"] = self._source_reliability
if self._brand:
entry["brand"] = self._brand
return entry
def to_indicator(self):
""" Convert object to XSOAR entry
:rtype: ``dict``
:return: XSOAR entry representation.
"""
indicator_relationship = {}
if self._entity_b:
indicator_relationship = {
"name": self._name,
"reverseName": self._reverse_name,
"type": self._relationship_type,
"entityA": self._entity_a,
"entityAFamily": self._entity_a_family,
"entityAType": self._entity_a_type,
"entityB": self._entity_b,
"entityBFamily": self._entity_b_family,
"entityBType": self._entity_b_type,
"fields": self._fields,
}
return indicator_relationship
def to_context(self):
""" Convert object to XSOAR context
:rtype: ``dict``
:return: XSOAR context representation.
"""
indicator_relationship_context = {}
if self._entity_b:
indicator_relationship_context = {
"Relationship": self._name,
"EntityA": self._entity_a,
"EntityAType": self._entity_a_type,
"EntityB": self._entity_b,
"EntityBType": self._entity_b_type,
}
return indicator_relationship_context
class CommandResults:
"""
CommandResults class - use to return results to warroom
:type outputs_prefix: ``str``
:param outputs_prefix: should be identical to the prefix in the yml contextPath in yml file. for example:
CortexXDR.Incident
:type outputs_key_field: ``str`` or ``list[str]``
:param outputs_key_field: primary key field in the main object. If the command returns Incidents, and of the
properties of Incident is incident_id, then outputs_key_field='incident_id'. If object has multiple
unique keys, then list of strings is supported outputs_key_field=['id1', 'id2']
:type outputs: ``list`` or ``dict``
:param outputs: the data to be returned and will be set to context
:type indicators: ``list``
:param indicators: DEPRECATED: use 'indicator' instead.
:type indicator: ``Common.Indicator``
:param indicator: single indicator like Common.IP, Common.URL, Common.File, etc.
:type readable_output: ``str``
:param readable_output: (Optional) markdown string that will be presented in the warroom, should be human readable -
(HumanReadable) - if not set, readable output will be generated
:type raw_response: ``dict`` | ``list``
:param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original
raw response from the 3rd party service (originally Contents)
:type indicators_timeline: ``IndicatorsTimeline``
:param indicators_timeline: must be an IndicatorsTimeline. used by the server to populate an indicator's timeline.
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: must be a boolean, default value is False. Used to prevent AutoExtract on output.
:type relationships: ``list of EntityRelationship``
:param relationships: List of relationships of the indicator.
:type mark_as_note: ``bool``
:param mark_as_note: must be a boolean, default value is False. Used to mark entry as note.
:type entry_type: ``int`` code of EntryType
:param entry_type: type of return value, see EntryType
:type scheduled_command: ``ScheduledCommand``
:param scheduled_command: manages the way the command should be polled.
:return: None
:rtype: ``None``
"""
def __init__(self, outputs_prefix=None, outputs_key_field=None, outputs=None, indicators=None, readable_output=None,
raw_response=None, indicators_timeline=None, indicator=None, ignore_auto_extract=False,
mark_as_note=False, scheduled_command=None, relationships=None, entry_type=None):
# type: (str, object, object, list, str, object, IndicatorsTimeline, Common.Indicator, bool, bool, ScheduledCommand, list, int) -> None # noqa: E501
if raw_response is None:
raw_response = outputs
if outputs is not None and not isinstance(outputs, dict) and not outputs_prefix:
raise ValueError('outputs_prefix is missing')
if indicators and indicator:
raise ValueError('indicators is DEPRECATED, use only indicator')
if entry_type is None:
entry_type = EntryType.NOTE
self.indicators = indicators # type: Optional[List[Common.Indicator]]
self.indicator = indicator # type: Optional[Common.Indicator]
self.entry_type = entry_type # type: int
self.outputs_prefix = outputs_prefix
# this is public field, it is used by a lot of unit tests, so I don't change it
self.outputs_key_field = outputs_key_field
self._outputs_key_field = None # type: Optional[List[str]]
if not outputs_key_field:
self._outputs_key_field = None
elif isinstance(outputs_key_field, STRING_TYPES):
self._outputs_key_field = [outputs_key_field]
elif isinstance(outputs_key_field, list):
self._outputs_key_field = outputs_key_field
else:
raise TypeError('outputs_key_field must be of type str or list')
self.outputs = outputs
self.raw_response = raw_response
self.readable_output = readable_output
self.indicators_timeline = indicators_timeline
self.ignore_auto_extract = ignore_auto_extract
self.mark_as_note = mark_as_note
self.scheduled_command = scheduled_command
self.relationships = relationships
def to_context(self):
outputs = {} # type: dict
relationships = [] # type: list
if self.readable_output:
human_readable = self.readable_output
else:
human_readable = None # type: ignore[assignment]
raw_response = None # type: ignore[assignment]
indicators_timeline = [] # type: ignore[assignment]
ignore_auto_extract = False # type: bool
mark_as_note = False # type: bool
indicators = [self.indicator] if self.indicator else self.indicators
if indicators:
for indicator in indicators:
context_outputs = indicator.to_context()
for key, value in context_outputs.items():
if key not in outputs:
outputs[key] = []
outputs[key].append(value)
if self.raw_response:
raw_response = self.raw_response
if self.ignore_auto_extract:
ignore_auto_extract = True
if self.mark_as_note:
mark_as_note = True
if self.indicators_timeline:
indicators_timeline = self.indicators_timeline.indicators_timeline
if self.outputs is not None and self.outputs != []:
if not self.readable_output:
# if markdown is not provided then create table by default
human_readable = tableToMarkdown('Results', self.outputs)
if self.outputs_prefix and self._outputs_key_field:
# if both prefix and key field provided then create DT key
formatted_outputs_key = ' && '.join(['val.{0} && val.{0} == obj.{0}'.format(key_field)
for key_field in self._outputs_key_field])
outputs_key = '{0}({1})'.format(self.outputs_prefix, formatted_outputs_key)
outputs[outputs_key] = self.outputs
elif self.outputs_prefix:
outputs_key = '{}'.format(self.outputs_prefix)
outputs[outputs_key] = self.outputs
else:
outputs.update(self.outputs) # type: ignore[call-overload]
if self.relationships:
relationships = [relationship.to_entry() for relationship in self.relationships if relationship.to_entry()]
content_format = EntryFormat.JSON
if isinstance(raw_response, STRING_TYPES) or isinstance(raw_response, int):
content_format = EntryFormat.TEXT
return_entry = {
'Type': self.entry_type,
'ContentsFormat': content_format,
'Contents': raw_response,
'HumanReadable': human_readable,
'EntryContext': outputs,
'IndicatorTimeline': indicators_timeline,
'IgnoreAutoExtract': True if ignore_auto_extract else False,
'Note': mark_as_note,
'Relationships': relationships,
}
if self.scheduled_command:
return_entry.update(self.scheduled_command.to_results())
return return_entry
def return_results(results):
"""
This function wraps the demisto.results(), supports.
:type results: ``CommandResults`` or ``str`` or ``dict`` or ``BaseWidget`` or ``list``
:param results: A result object to return as a War-Room entry.
:return: None
:rtype: ``None``
"""
if results is None:
# backward compatibility reasons
demisto.results(None)
return
elif results and isinstance(results, list):
result_list = []
for result in results:
if isinstance(result, (dict, str)):
# Results of type dict or str are of the old results format and work with demisto.results()
result_list.append(result)
else:
# The rest are of the new format and have a corresponding function (to_context, to_display, etc...)
return_results(result)
if result_list:
demisto.results(result_list)
elif isinstance(results, CommandResults):
demisto.results(results.to_context())
elif isinstance(results, BaseWidget):
demisto.results(results.to_display())
elif isinstance(results, GetMappingFieldsResponse):
demisto.results(results.extract_mapping())
elif isinstance(results, GetRemoteDataResponse):
demisto.results(results.extract_for_local())
elif isinstance(results, GetModifiedRemoteDataResponse):
demisto.results(results.to_entry())
elif hasattr(results, 'to_entry'):
demisto.results(results.to_entry())
else:
demisto.results(results)
# deprecated
def return_outputs(readable_output, outputs=None, raw_response=None, timeline=None, ignore_auto_extract=False):
"""
DEPRECATED: use return_results() instead
This function wraps the demisto.results(), makes the usage of returning results to the user more intuitively.
:type readable_output: ``str`` | ``int``
:param readable_output: markdown string that will be presented in the warroom, should be human readable -
(HumanReadable)
:type outputs: ``dict``
:param outputs: the outputs that will be returned to playbook/investigation context (originally EntryContext)
:type raw_response: ``dict`` | ``list`` | ``str``
:param raw_response: must be dictionary, if not provided then will be equal to outputs. usually must be the original
raw response from the 3rd party service (originally Contents)
:type timeline: ``dict`` | ``list``
:param timeline: expects a list, if a dict is passed it will be put into a list. used by server to populate an
indicator's timeline. if the 'Category' field is not present in the timeline dict(s), it will automatically
be be added to the dict(s) with its value set to 'Integration Update'.
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: expects a bool value. if true then the warroom entry readable_output will not be auto enriched.
:return: None
:rtype: ``None``
"""
timeline_list = [timeline] if isinstance(timeline, dict) else timeline
if timeline_list:
for tl_obj in timeline_list:
if 'Category' not in tl_obj.keys():
tl_obj['Category'] = 'Integration Update'
return_entry = {
"Type": entryTypes["note"],
"HumanReadable": readable_output,
"ContentsFormat": formats["text"] if isinstance(raw_response, STRING_TYPES) else formats['json'],
"Contents": raw_response,
"EntryContext": outputs,
'IgnoreAutoExtract': ignore_auto_extract,
"IndicatorTimeline": timeline_list
}
# Return 'readable_output' only if needed
if readable_output and not outputs and not raw_response:
return_entry["Contents"] = readable_output
return_entry["ContentsFormat"] = formats["text"]
elif outputs and raw_response is None:
# if raw_response was not provided but outputs were provided then set Contents as outputs
return_entry["Contents"] = outputs
demisto.results(return_entry)
def return_error(message, error='', outputs=None):
"""
Returns error entry with given message and exits the script
:type message: ``str``
:param message: The message to return in the entry (required)
:type error: ``str`` or Exception
:param error: The raw error message to log (optional)
:type outputs: ``dict or None``
:param outputs: the outputs that will be returned to playbook/investigation context (optional)
:return: Error entry object
:rtype: ``dict``
"""
is_command = hasattr(demisto, 'command')
is_server_handled = is_command and demisto.command() in ('fetch-incidents',
'fetch-credentials',
'long-running-execution',
'fetch-indicators')
if is_debug_mode() and not is_server_handled and any(sys.exc_info()): # Checking that an exception occurred
message = "{}\n\n{}".format(message, traceback.format_exc())
message = LOG(message)
if error:
LOG(str(error))
LOG.print_log()
if not isinstance(message, str):
message = message.encode('utf8') if hasattr(message, 'encode') else str(message)
if is_command and demisto.command() == 'get-modified-remote-data':
if (error and not isinstance(error, NotImplementedError)) or sys.exc_info()[0] != NotImplementedError:
message = 'skip update. error: ' + message
if is_server_handled:
raise Exception(message)
else:
demisto.results({
'Type': entryTypes['error'],
'ContentsFormat': formats['text'],
'Contents': message,
'EntryContext': outputs
})
sys.exit(0)
def return_warning(message, exit=False, warning='', outputs=None, ignore_auto_extract=False):
"""
Returns a warning entry with the specified message, and exits the script.
:type message: ``str``
:param message: The message to return in the entry (required).
:type exit: ``bool``
:param exit: Determines if the program will terminate after the command is executed. Default is False.
:type warning: ``str``
:param warning: The warning message (raw) to log (optional).
:type outputs: ``dict or None``
:param outputs: The outputs that will be returned to playbook/investigation context (optional).
:type ignore_auto_extract: ``bool``
:param ignore_auto_extract: Determines if the War Room entry will be auto-enriched. Default is false.
:return: Warning entry object
:rtype: ``dict``
"""
LOG(message)
if warning:
LOG(warning)
LOG.print_log()
demisto.results({
'Type': entryTypes['warning'],
'ContentsFormat': formats['text'],
'IgnoreAutoExtract': ignore_auto_extract,
'Contents': str(message),
"EntryContext": outputs
})
if exit:
sys.exit(0)
def execute_command(command, args, extract_contents=True):
"""
Runs the `demisto.executeCommand()` function and checks for errors.
:type command: ``str``
:param command: The command to run. (required)
:type args: ``dict``
:param args: The command arguments. (required)
:type extract_contents: ``bool``
:param extract_contents: Whether to return only the Contents part of the results. Default is True.
:return: The command results.
:rtype: ``list`` or ``dict`` or ``str``
"""
if not hasattr(demisto, 'executeCommand'):
raise DemistoException('Cannot run demisto.executeCommand() from integrations.')
res = demisto.executeCommand(command, args)
if is_error(res):
return_error('Failed to execute {}. Error details:\n{}'.format(command, get_error(res)))
if not extract_contents:
return res
contents = [entry.get('Contents', {}) for entry in res]
return contents[0] if len(contents) == 1 else contents
def camelize(src, delim=' ', upper_camel=True):
"""
Convert all keys of a dictionary (or list of dictionaries) to CamelCase (with capital first letter)
:type src: ``dict`` or ``list``
:param src: The dictionary (or list of dictionaries) to convert the keys for. (required)
:type delim: ``str``
:param delim: The delimiter between two words in the key (e.g. delim=' ' for "Start Date"). Default ' '.
:type upper_camel: ``bool``
:param upper_camel: When True then transforms dictionary keys to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: The dictionary (or list of dictionaries) with the keys in CamelCase.
:rtype: ``dict`` or ``list``
"""
def camelize_str(src_str):
if callable(getattr(src_str, "decode", None)):
src_str = src_str.decode('utf-8')
components = src_str.split(delim)
camelize_without_first_char = ''.join(map(lambda x: x.title(), components[1:]))
if upper_camel:
return components[0].title() + camelize_without_first_char
else:
return components[0].lower() + camelize_without_first_char
if isinstance(src, list):
return [camelize(phrase, delim, upper_camel=upper_camel) for phrase in src]
return {camelize_str(key): value for key, value in src.items()}
# Constants for common merge paths
outputPaths = {
'file': 'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || '
'val.SHA256 && val.SHA256 == obj.SHA256 || val.SHA512 && val.SHA512 == obj.SHA512 || '
'val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTPH || '
'val.SSDeep && val.SSDeep == obj.SSDeep)',
'ip': 'IP(val.Address && val.Address == obj.Address)',
'url': 'URL(val.Data && val.Data == obj.Data)',
'domain': 'Domain(val.Name && val.Name == obj.Name)',
'cve': 'CVE(val.ID && val.ID == obj.ID)',
'email': 'Account.Email(val.Address && val.Address == obj.Address)',
'dbotscore': 'DBotScore'
}
def replace_in_keys(src, existing='.', new='_'):
"""
Replace a substring in all of the keys of a dictionary (or list of dictionaries)
:type src: ``dict`` or ``list``
:param src: The dictionary (or list of dictionaries) with keys that need replacement. (required)
:type existing: ``str``
:param existing: substring to replace.
:type new: ``str``
:param new: new substring that will replace the existing substring.
:return: The dictionary (or list of dictionaries) with keys after substring replacement.
:rtype: ``dict`` or ``list``
"""
def replace_str(src_str):
if callable(getattr(src_str, "decode", None)):
src_str = src_str.decode('utf-8')
return src_str.replace(existing, new)
if isinstance(src, list):
return [replace_in_keys(x, existing, new) for x in src]
return {replace_str(k): v for k, v in src.items()}
# ############################## REGEX FORMATTING ###############################
regexFlags = re.M # Multi line matching
# for the global(/g) flag use re.findall({regex_format},str)
# else, use re.match({regex_format},str)
ipv4Regex = r'\b((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\b([^\/]|$)'
ipv4cidrRegex = r'\b(?:(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])(?:\[\.\]|\.)){3}(?:25[0-5]|2[0-4][0-9]|1[0-9][0-9]|[1-9]?[0-9])(\/([0-9]|[1-2][0-9]|3[0-2]))\b' # noqa: E501
ipv6Regex = r'\b(?:(?:[0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,7}:|(?:[0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|(?:[0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|(?:[0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|(?:[0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|(?:[0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:(?:(:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))\b' # noqa: E501
ipv6cidrRegex = r'\b(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))(\/(12[0-8]|1[0-1][0-9]|[1-9][0-9]|[0-9]))\b' # noqa: E501
emailRegex = r'\b[^@]+@[^@]+\.[^@]+\b'
hashRegex = r'\b[0-9a-fA-F]+\b'
urlRegex = r'(?:(?:https?|ftp|hxxps?):\/\/|www\[?\.\]?|ftp\[?\.\]?)(?:[-\w\d]+\[?\.\]?)+[-\w\d]+(?::\d+)?' \
r'(?:(?:\/|\?)[-\w\d+&@#\/%=~_$?!\-:,.\(\);]*[\w\d+&@#\/%=~_$\(\);])?'
cveRegex = r'(?i)^cve-\d{4}-([1-9]\d{4,}|\d{4})$'
md5Regex = re.compile(r'\b[0-9a-fA-F]{32}\b', regexFlags)
sha1Regex = re.compile(r'\b[0-9a-fA-F]{40}\b', regexFlags)
sha256Regex = re.compile(r'\b[0-9a-fA-F]{64}\b', regexFlags)
sha512Regex = re.compile(r'\b[0-9a-fA-F]{128}\b', regexFlags)
pascalRegex = re.compile('([A-Z]?[a-z]+)')
# ############################## REGEX FORMATTING end ###############################
def underscoreToCamelCase(s, upper_camel=True):
"""
Convert an underscore separated string to camel case
:type s: ``str``
:param s: The string to convert (e.g. hello_world) (required)
:type upper_camel: ``bool``
:param upper_camel: When True then transforms dictionarykeys to camel case with the first letter capitalised
(for example: demisto_content to DemistoContent), otherwise the first letter will not be capitalised
(for example: demisto_content to demistoContent).
:return: The converted string (e.g. HelloWorld)
:rtype: ``str``
"""
if not isinstance(s, STRING_OBJ_TYPES):
return s
components = s.split('_')
camel_without_first_char = ''.join(x.title() for x in components[1:])
if upper_camel:
return components[0].title() + camel_without_first_char
else:
return components[0].lower() + camel_without_first_char
def camel_case_to_underscore(s):
"""Converts a camelCase string to snake_case
:type s: ``str``
:param s: The string to convert (e.g. helloWorld) (required)
:return: The converted string (e.g. hello_world)
:rtype: ``str``
"""
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def snakify(src):
"""Convert all keys of a dictionary to snake_case (underscored separated)
:type src: ``dict``
:param src: The dictionary to convert the keys for. (required)
:return: The dictionary (or list of dictionaries) with the keys in CamelCase.
:rtype: ``dict``
"""
return {camel_case_to_underscore(k): v for k, v in src.items()}
def pascalToSpace(s):
"""
Converts pascal strings to human readable (e.g. "ThreatScore" -> "Threat Score", "thisIsIPAddressName" ->
"This Is IP Address Name"). Could be used as headerTransform
:type s: ``str``
:param s: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if not isinstance(s, STRING_OBJ_TYPES):
return s
tokens = pascalRegex.findall(s)
for t in tokens:
# double space to handle capital words like IP/URL/DNS that not included in the regex
s = s.replace(t, ' {} '.format(t.title()))
# split and join: to remove double spacing caused by previous workaround
s = ' '.join(s.split())
return s
def string_to_table_header(string):
"""
Checks if string, change underscores to spaces, capitalize every word.
Example: "one_two" to "One Two"
:type string: ``str``
:param string: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if isinstance(string, STRING_OBJ_TYPES):
return " ".join(word.capitalize() for word in string.replace("_", " ").split())
else:
raise Exception('The key is not a string: {}'.format(string))
def string_to_context_key(string):
"""
Checks if string, removes underscores, capitalize every word.
Example: "one_two" to "OneTwo"
:type string: ``str``
:param string: The string to be converted (required)
:return: The converted string
:rtype: ``str``
"""
if isinstance(string, STRING_OBJ_TYPES):
return "".join(word.capitalize() for word in string.split('_'))
else:
raise Exception('The key is not a string: {}'.format(string))
def parse_date_range(date_range, date_format=None, to_timestamp=False, timezone=0, utc=True):
"""
THIS FUNCTTION IS DEPRECATED - USE dateparser.parse instead
Parses date_range string to a tuple date strings (start, end). Input must be in format 'number date_range_unit')
Examples: (2 hours, 4 minutes, 6 month, 1 day, etc.)
:type date_range: ``str``
:param date_range: The date range to be parsed (required)
:type date_format: ``str``
:param date_format: Date format to convert the date_range to. (optional)
:type to_timestamp: ``bool``
:param to_timestamp: If set to True, then will return time stamp rather than a datetime.datetime. (optional)
:type timezone: ``int``
:param timezone: timezone should be passed in hours (e.g if +0300 then pass 3, if -0200 then pass -2).
:type utc: ``bool``
:param utc: If set to True, utc time will be used, otherwise local time.
:return: The parsed date range.
:rtype: ``(datetime.datetime, datetime.datetime)`` or ``(int, int)`` or ``(str, str)``
"""
range_split = date_range.strip().split(' ')
if len(range_split) != 2:
return_error('date_range must be "number date_range_unit", examples: (2 hours, 4 minutes,6 months, 1 day, '
'etc.)')
try:
number = int(range_split[0])
except ValueError:
return_error('The time value is invalid. Must be an integer.')
unit = range_split[1].lower()
if unit not in ['minute', 'minutes',
'hour', 'hours',
'day', 'days',
'month', 'months',
'year', 'years',
]:
return_error('The unit of date_range is invalid. Must be minutes, hours, days, months or years.')
if not isinstance(timezone, (int, float)):
return_error('Invalid timezone "{}" - must be a number (of type int or float).'.format(timezone))
if utc:
end_time = datetime.utcnow() + timedelta(hours=timezone)
start_time = datetime.utcnow() + timedelta(hours=timezone)
else:
end_time = datetime.now() + timedelta(hours=timezone)
start_time = datetime.now() + timedelta(hours=timezone)
if 'minute' in unit:
start_time = end_time - timedelta(minutes=number)
elif 'hour' in unit:
start_time = end_time - timedelta(hours=number)
elif 'day' in unit:
start_time = end_time - timedelta(days=number)
elif 'month' in unit:
start_time = end_time - timedelta(days=number * 30)
elif 'year' in unit:
start_time = end_time - timedelta(days=number * 365)
if to_timestamp:
return date_to_timestamp(start_time), date_to_timestamp(end_time)
if date_format:
return datetime.strftime(start_time, date_format), datetime.strftime(end_time, date_format)
return start_time, end_time
def timestamp_to_datestring(timestamp, date_format="%Y-%m-%dT%H:%M:%S.000Z", is_utc=False):
"""
Parses timestamp (milliseconds) to a date string in the provided date format (by default: ISO 8601 format)
Examples: (1541494441222, 1541495441000, etc.)
:type timestamp: ``int`` or ``str``
:param timestamp: The timestamp to be parsed (required)
:type date_format: ``str``
:param date_format: The date format the timestamp should be parsed to. (optional)
:type is_utc: ``bool``
:param is_utc: Should the string representation of the timestamp use UTC time or the local machine time
:return: The parsed timestamp in the date_format
:rtype: ``str``
"""
use_utc_time = is_utc or date_format.endswith('Z')
if use_utc_time:
return datetime.utcfromtimestamp(int(timestamp) / 1000.0).strftime(date_format)
return datetime.fromtimestamp(int(timestamp) / 1000.0).strftime(date_format)
def date_to_timestamp(date_str_or_dt, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses date_str_or_dt in the given format (default: %Y-%m-%dT%H:%M:%S) to milliseconds
Examples: ('2018-11-06T08:56:41', '2018-11-06T08:56:41', etc.)
:type date_str_or_dt: ``str`` or ``datetime.datetime``
:param date_str_or_dt: The date to be parsed. (required)
:type date_format: ``str``
:param date_format: The date format of the date string (will be ignored if date_str_or_dt is of type
datetime.datetime). (optional)
:return: The parsed timestamp.
:rtype: ``int``
"""
if isinstance(date_str_or_dt, STRING_OBJ_TYPES):
return int(time.mktime(time.strptime(date_str_or_dt, date_format)) * 1000)
# otherwise datetime.datetime
return int(time.mktime(date_str_or_dt.timetuple()) * 1000)
def remove_nulls_from_dictionary(data):
"""
Remove Null values from a dictionary. (updating the given dictionary)
:type data: ``dict``
:param data: The data to be added to the context (required)
:return: No data returned
:rtype: ``None``
"""
list_of_keys = list(data.keys())[:]
for key in list_of_keys:
if data[key] in ('', None, [], {}, ()):
del data[key]
def assign_params(keys_to_ignore=None, values_to_ignore=None, **kwargs):
"""Creates a dictionary from given kwargs without empty values.
empty values are: None, '', [], {}, ()
` Examples:
>>> assign_params(a='1', b=True, c=None, d='')
{'a': '1', 'b': True}
>>> since_time = 'timestamp'
>>> assign_params(values_to_ignore=(15, ), sinceTime=since_time, b=15)
{'sinceTime': 'timestamp'}
>>> item_id = '1236654'
>>> assign_params(keys_to_ignore=['rnd'], ID=item_id, rnd=15)
{'ID': '1236654'}
:type keys_to_ignore: ``tuple`` or ``list``
:param keys_to_ignore: Keys to ignore if exists
:type values_to_ignore: ``tuple`` or ``list``
:param values_to_ignore: Values to ignore if exists
:type kwargs: ``kwargs``
:param kwargs: kwargs to filter
:return: dict without empty values
:rtype: ``dict``
"""
if values_to_ignore is None:
values_to_ignore = (None, '', [], {}, ())
if keys_to_ignore is None:
keys_to_ignore = tuple()
return {
key: value for key, value in kwargs.items()
if value not in values_to_ignore and key not in keys_to_ignore
}
class GetDemistoVersion:
"""
Callable class to replace get_demisto_version function
"""
def __init__(self):
self._version = None
def __call__(self):
"""Returns the Demisto version and build number.
:return: Demisto version object if Demisto class has attribute demistoVersion, else raises AttributeError
:rtype: ``dict``
"""
if self._version is None:
if hasattr(demisto, 'demistoVersion'):
self._version = demisto.demistoVersion()
else:
raise AttributeError('demistoVersion attribute not found.')
return self._version
get_demisto_version = GetDemistoVersion()
def get_demisto_version_as_str():
"""Get the Demisto Server version as a string <version>-<build>. If unknown will return: 'Unknown'.
Meant to be use in places where we want to display the version. If you want to perform logic based upon vesrion
use: is_demisto_version_ge.
:return: Demisto version as string
:rtype: ``dict``
"""
try:
ver_obj = get_demisto_version()
return '{}-{}'.format(ver_obj.get('version', 'Unknown'),
ver_obj.get("buildNumber", 'Unknown'))
except AttributeError:
return "Unknown"
def is_demisto_version_ge(version, build_number=''):
"""Utility function to check if current running integration is at a server greater or equal to the passed version
:type version: ``str``
:param version: Version to check
:type build_number: ``str``
:param build_number: Build number to check
:return: True if running within a Server version greater or equal than the passed version
:rtype: ``bool``
"""
server_version = {}
try:
server_version = get_demisto_version()
if server_version.get('version') > version:
return True
elif server_version.get('version') == version:
if build_number:
return int(server_version.get('buildNumber')) >= int(build_number) # type: ignore[arg-type]
return True # No build number
else:
return False
except AttributeError:
# demistoVersion was added in 5.0.0. We are currently running in 4.5.0 and below
if version >= "5.0.0":
return False
raise
except ValueError:
# dev editions are not comparable
demisto.log(
'is_demisto_version_ge: ValueError. \n '
'input: server version: {} build number: {}\n'
'server version: {}'.format(version, build_number, server_version)
)
return True
class DemistoHandler(logging.Handler):
"""
Handler to route logging messages to an IntegrationLogger or demisto.debug if not supplied
"""
def __init__(self, int_logger=None):
logging.Handler.__init__(self)
self.int_logger = int_logger
def emit(self, record):
msg = self.format(record)
try:
if self.int_logger:
self.int_logger(msg)
else:
demisto.debug(msg)
except Exception: # noqa: disable=broad-except
pass
class DebugLogger(object):
"""
Wrapper to initiate logging at logging.DEBUG level.
Is used when `debug-mode=True`.
"""
def __init__(self):
self.handler = None # just in case our http_client code throws an exception. so we don't error in the __del__
self.int_logger = IntegrationLogger()
self.int_logger.set_buffering(False)
self.http_client_print = None
self.http_client = None
if IS_PY3:
# pylint: disable=import-error
import http.client as http_client
# pylint: enable=import-error
self.http_client = http_client
self.http_client.HTTPConnection.debuglevel = 1
self.http_client_print = getattr(http_client, 'print', None) # save in case someone else patched it already
setattr(http_client, 'print', self.int_logger.print_override)
self.handler = DemistoHandler(self.int_logger)
demisto_formatter = logging.Formatter(fmt='python logging: %(levelname)s [%(name)s] - %(message)s', datefmt=None)
self.handler.setFormatter(demisto_formatter)
self.root_logger = logging.getLogger()
self.prev_log_level = self.root_logger.getEffectiveLevel()
self.root_logger.setLevel(logging.DEBUG)
self.org_handlers = list()
if self.root_logger.handlers:
self.org_handlers.extend(self.root_logger.handlers)
for h in self.org_handlers:
self.root_logger.removeHandler(h)
self.root_logger.addHandler(self.handler)
def __del__(self):
if self.handler:
self.root_logger.setLevel(self.prev_log_level)
self.root_logger.removeHandler(self.handler)
self.handler.flush()
self.handler.close()
if self.org_handlers:
for h in self.org_handlers:
self.root_logger.addHandler(h)
if self.http_client:
self.http_client.HTTPConnection.debuglevel = 0
if self.http_client_print:
setattr(self.http_client, 'print', self.http_client_print)
else:
delattr(self.http_client, 'print')
if self.int_logger.curl:
for curl in self.int_logger.curl:
demisto.info('cURL:\n' + curl)
def log_start_debug(self):
"""
Utility function to log start of debug mode logging
"""
msg = "debug-mode started.\n#### http client print found: {}.\n#### Env {}.".format(self.http_client_print is not None,
os.environ)
if hasattr(demisto, 'params'):
msg += "\n#### Params: {}.".format(json.dumps(demisto.params(), indent=2))
calling_context = demisto.callingContext.get('context', {})
msg += "\n#### Docker image: [{}]".format(calling_context.get('DockerImage'))
brand = calling_context.get('IntegrationBrand')
if brand:
msg += "\n#### Integration: brand: [{}] instance: [{}]".format(brand, calling_context.get('IntegrationInstance'))
sm = get_schedule_metadata(context=calling_context)
if sm.get('is_polling'):
msg += "\n#### Schedule Metadata: scheduled command: [{}] args: [{}] times ran: [{}] scheduled: [{}] end " \
"date: [{}]".format(sm.get('polling_command'),
sm.get('polling_args'),
sm.get('times_ran'),
sm.get('start_date'),
sm.get('end_date')
)
self.int_logger.write(msg)
_requests_logger = None
try:
if is_debug_mode():
_requests_logger = DebugLogger()
_requests_logger.log_start_debug()
except Exception as ex:
# Should fail silently so that if there is a problem with the logger it will
# not affect the execution of commands and playbooks
demisto.info('Failed initializing DebugLogger: {}'.format(ex))
def parse_date_string(date_string, date_format='%Y-%m-%dT%H:%M:%S'):
"""
Parses the date_string function to the corresponding datetime object.
Note: If possible (e.g. running Python 3), it is suggested to use
dateutil.parser.parse or dateparser.parse functions instead.
Examples:
>>> parse_date_string('2019-09-17T06:16:39Z')
datetime.datetime(2019, 9, 17, 6, 16, 39)
>>> parse_date_string('2019-09-17T06:16:39.22Z')
datetime.datetime(2019, 9, 17, 6, 16, 39, 220000)
>>> parse_date_string('2019-09-17T06:16:39.4040+05:00', '%Y-%m-%dT%H:%M:%S+02:00')
datetime.datetime(2019, 9, 17, 6, 16, 39, 404000)
:type date_string: ``str``
:param date_string: The date string to parse. (required)
:type date_format: ``str``
:param date_format:
The date format of the date string. If the date format is known, it should be provided. (optional)
:return: The parsed datetime.
:rtype: ``(datetime.datetime, datetime.datetime)``
"""
try:
return datetime.strptime(date_string, date_format)
except ValueError as e:
error_message = str(e)
date_format = '%Y-%m-%dT%H:%M:%S'
time_data_regex = r'time data \'(.*?)\''
time_data_match = re.findall(time_data_regex, error_message)
sliced_time_data = ''
if time_data_match:
# found time date which does not match date format
# example of caught error message:
# "time data '2019-09-17T06:16:39Z' does not match format '%Y-%m-%dT%H:%M:%S.%fZ'"
time_data = time_data_match[0]
# removing YYYY-MM-DDThh:mm:ss from the time data to keep only milliseconds and time zone
sliced_time_data = time_data[19:]
else:
unconverted_data_remains_regex = r'unconverted data remains: (.*)'
unconverted_data_remains_match = re.findall(unconverted_data_remains_regex, error_message)
if unconverted_data_remains_match:
# found unconverted_data_remains
# example of caught error message:
# "unconverted data remains: 22Z"
sliced_time_data = unconverted_data_remains_match[0]
if not sliced_time_data:
# did not catch expected error
raise ValueError(e)
if '.' in sliced_time_data:
# found milliseconds - appending ".%f" to date format
date_format += '.%f'
timezone_regex = r'[Zz+-].*'
time_zone = re.findall(timezone_regex, sliced_time_data)
if time_zone:
# found timezone - appending it to the date format
date_format += time_zone[0]
return datetime.strptime(date_string, date_format)
def build_dbot_entry(indicator, indicator_type, vendor, score, description=None, build_malicious=True):
"""Build a dbot entry. if score is 3 adds malicious
Examples:
>>> build_dbot_entry('user@example.com', 'Email', 'Vendor', 1)
{'DBotScore': {'Indicator': 'user@example.com', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 1}}
>>> build_dbot_entry('user@example.com', 'Email', 'Vendor', 3, build_malicious=False)
{'DBotScore': {'Indicator': 'user@example.com', 'Type': 'email', 'Vendor': 'Vendor', 'Score': 3}}
>>> build_dbot_entry('user@example.com', 'email', 'Vendor', 3, 'Malicious email')
{'DBotScore': {'Vendor': 'Vendor', 'Indicator': 'user@example.com', 'Score': 3, 'Type': 'email'}, \
'Account.Email(val.Address && val.Address == obj.Address)': {'Malicious': {'Vendor': 'Vendor', 'Description': \
'Malicious email'}, 'Address': 'user@example.com'}}
>>> build_dbot_entry('md5hash', 'md5', 'Vendor', 1)
{'DBotScore': {'Indicator': 'md5hash', 'Type': 'file', 'Vendor': 'Vendor', 'Score': 1}}
:type indicator: ``str``
:param indicator: indicator field. if using file hashes, can be dict
:type indicator_type: ``str``
:param indicator_type:
type of indicator ('url, 'domain', 'ip', 'cve', 'email', 'md5', 'sha1', 'sha256', 'crc32', 'sha512', 'ctph')
:type vendor: ``str``
:param vendor: Integration ID
:type score: ``int``
:param score: DBot score (0-3)
:type description: ``str`` or ``None``
:param description: description (will be added to malicious if dbot_score is 3). can be None
:type build_malicious: ``bool``
:param build_malicious: if True, will add a malicious entry
:return: dbot entry
:rtype: ``dict``
"""
if not 0 <= score <= 3:
raise DemistoException('illegal DBot score, expected 0-3, got `{}`'.format(score))
indicator_type_lower = indicator_type.lower()
if indicator_type_lower not in INDICATOR_TYPE_TO_CONTEXT_KEY:
raise DemistoException('illegal indicator type, expected one of {}, got `{}`'.format(
INDICATOR_TYPE_TO_CONTEXT_KEY.keys(), indicator_type_lower
))
# handle files
if INDICATOR_TYPE_TO_CONTEXT_KEY[indicator_type_lower] == 'file':
indicator_type_lower = 'file'
dbot_entry = {
outputPaths['dbotscore']: {
'Indicator': indicator,
'Type': indicator_type_lower,
'Vendor': vendor,
'Score': score
}
}
if score == 3 and build_malicious:
dbot_entry.update(build_malicious_dbot_entry(indicator, indicator_type, vendor, description))
return dbot_entry
def build_malicious_dbot_entry(indicator, indicator_type, vendor, description=None):
""" Build Malicious dbot entry
Examples:
>>> build_malicious_dbot_entry('8.8.8.8', 'ip', 'Vendor', 'Google DNS')
{'IP(val.Address && val.Address == obj.Address)': {'Malicious': {'Vendor': 'Vendor', 'Description': 'Google DNS\
'}, 'Address': '8.8.8.8'}}
>>> build_malicious_dbot_entry('md5hash', 'MD5', 'Vendor', 'Malicious File')
{'File(val.MD5 && val.MD5 == obj.MD5 || val.SHA1 && val.SHA1 == obj.SHA1 || val.SHA256 && val.SHA256 == obj.SHA\
256 || val.SHA512 && val.SHA512 == obj.SHA512 || val.CRC32 && val.CRC32 == obj.CRC32 || val.CTPH && val.CTPH == obj.CTP\
H || val.SSDeep && val.SSDeep == obj.SSDeep)': {'Malicious': {'Vendor': 'Vendor', 'Description': 'Malicious File'}\
, 'MD5': 'md5hash'}}
:type indicator: ``str``
:param indicator: Value (e.g. 8.8.8.8)
:type indicator_type: ``str``
:param indicator_type: e.g. 'IP'
:type vendor: ``str``
:param vendor: Integration ID
:type description: ``str``
:param description: Why it's malicious
:return: A malicious DBot entry
:rtype: ``dict``
"""
indicator_type_lower = indicator_type.lower()
if indicator_type_lower in INDICATOR_TYPE_TO_CONTEXT_KEY:
key = INDICATOR_TYPE_TO_CONTEXT_KEY[indicator_type_lower]
# `file` indicator works a little different
if key == 'file':
entry = {
indicator_type.upper(): indicator,
'Malicious': {
'Vendor': vendor,
'Description': description
}
}
return {outputPaths[key]: entry}
else:
entry = {
key: indicator,
'Malicious': {
'Vendor': vendor,
'Description': description
}
}
return {outputPaths[indicator_type_lower]: entry}
else:
raise DemistoException('Wrong indicator type supplied: {}, expected {}'
.format(indicator_type, INDICATOR_TYPE_TO_CONTEXT_KEY.keys()))
# Will add only if 'requests' module imported
if 'requests' in sys.modules:
class BaseClient(object):
"""Client to use in integrations with powerful _http_request
:type base_url: ``str``
:param base_url: Base server address with suffix, for example: https://example.com/api/v2/.
:type verify: ``bool``
:param verify: Whether the request should verify the SSL certificate.
:type proxy: ``bool``
:param proxy: Whether to run the integration using the system proxy.
:type ok_codes: ``tuple``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204).
If you specify "None", will use requests.Response.ok
:type headers: ``dict``
:param headers:
The request headers, for example: {'Accept`: `application/json`}.
Can be None.
:type auth: ``dict`` or ``tuple``
:param auth:
The request authorization, for example: (username, password).
Can be None.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, base_url, verify=True, proxy=False, ok_codes=tuple(), headers=None, auth=None):
self._base_url = base_url
self._verify = verify
self._ok_codes = ok_codes
self._headers = headers
self._auth = auth
self._session = requests.Session()
if not proxy:
skip_proxy()
if not verify:
skip_cert_verification()
def __del__(self):
try:
self._session.close()
except Exception: # noqa
demisto.debug('failed to close BaseClient session with the following error:\n{}'.format(traceback.format_exc()))
def _implement_retry(self, retries=0,
status_list_to_retry=None,
backoff_factor=5,
raise_on_redirect=False,
raise_on_status=False):
"""
Implements the retry mechanism.
In the default case where retries = 0 the request will fail on the first time
:type retries: ``int``
:param retries: How many retries should be made in case of a failure. when set to '0'- will fail on the first time
:type status_list_to_retry: ``iterable``
:param status_list_to_retry: A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ['GET', 'POST', 'PUT']
and the response status code is in ``status_list_to_retry``.
:type backoff_factor ``float``
:param backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff_factor set to 5
:type raise_on_redirect ``bool``
:param raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:type raise_on_status ``bool``
:param raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
"""
try:
method_whitelist = "allowed_methods" if hasattr(Retry.DEFAULT, "allowed_methods") else "method_whitelist"
whitelist_kawargs = {
method_whitelist: frozenset(['GET', 'POST', 'PUT'])
}
retry = Retry(
total=retries,
read=retries,
connect=retries,
backoff_factor=backoff_factor,
status=retries,
status_forcelist=status_list_to_retry,
raise_on_status=raise_on_status,
raise_on_redirect=raise_on_redirect,
**whitelist_kawargs
)
adapter = HTTPAdapter(max_retries=retry)
self._session.mount('http://', adapter)
self._session.mount('https://', adapter)
except NameError:
pass
def _http_request(self, method, url_suffix='', full_url=None, headers=None, auth=None, json_data=None,
params=None, data=None, files=None, timeout=10, resp_type='json', ok_codes=None,
return_empty_response=False, retries=0, status_list_to_retry=None,
backoff_factor=5, raise_on_redirect=False, raise_on_status=False,
error_handler=None, empty_valid_codes=None, **kwargs):
"""A wrapper for requests lib to send our requests and handle requests and responses better.
:type method: ``str``
:param method: The HTTP method, for example: GET, POST, and so on.
:type url_suffix: ``str``
:param url_suffix: The API endpoint.
:type full_url: ``str``
:param full_url:
Bypasses the use of self._base_url + url_suffix. This is useful if you need to
make a request to an address outside of the scope of the integration
API.
:type headers: ``dict``
:param headers: Headers to send in the request. If None, will use self._headers.
:type auth: ``tuple``
:param auth:
The authorization tuple (usually username/password) to enable Basic/Digest/Custom HTTP Auth.
if None, will use self._auth.
:type params: ``dict``
:param params: URL parameters to specify the query.
:type data: ``dict``
:param data: The data to send in a 'POST' request.
:type json_data: ``dict``
:param json_data: The dictionary to send in a 'POST' request.
:type files: ``dict``
:param files: The file data to send in a 'POST' request.
:type timeout: ``float`` or ``tuple``
:param timeout:
The amount of time (in seconds) that a request will wait for a client to
establish a connection to a remote machine before a timeout occurs.
can be only float (Connection Timeout) or a tuple (Connection Timeout, Read Timeout).
:type resp_type: ``str``
:param resp_type:
Determines which data format to return from the HTTP request. The default
is 'json'. Other options are 'text', 'content', 'xml' or 'response'. Use 'response'
to return the full response object.
:type ok_codes: ``tuple``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204). If you specify
"None", will use self._ok_codes.
:return: Depends on the resp_type parameter
:rtype: ``dict`` or ``str`` or ``requests.Response``
:type retries: ``int``
:param retries: How many retries should be made in case of a failure. when set to '0'- will fail on the first time
:type status_list_to_retry: ``iterable``
:param status_list_to_retry: A set of integer HTTP status codes that we should force a retry on.
A retry is initiated if the request method is in ['GET', 'POST', 'PUT']
and the response status code is in ``status_list_to_retry``.
:type backoff_factor ``float``
:param backoff_factor:
A backoff factor to apply between attempts after the second try
(most errors are resolved immediately by a second try without a
delay). urllib3 will sleep for::
{backoff factor} * (2 ** ({number of total retries} - 1))
seconds. If the backoff_factor is 0.1, then :func:`.sleep` will sleep
for [0.0s, 0.2s, 0.4s, ...] between retries. It will never be longer
than :attr:`Retry.BACKOFF_MAX`.
By default, backoff_factor set to 5
:type raise_on_redirect ``bool``
:param raise_on_redirect: Whether, if the number of redirects is
exhausted, to raise a MaxRetryError, or to return a response with a
response code in the 3xx range.
:type raise_on_status ``bool``
:param raise_on_status: Similar meaning to ``raise_on_redirect``:
whether we should raise an exception, or return a response,
if status falls in ``status_forcelist`` range and retries have
been exhausted.
:type error_handler ``callable``
:param error_handler: Given an error entery, the error handler outputs the
new formatted error message.
:type empty_valid_codes: ``list``
:param empty_valid_codes: A list of all valid status codes of empty responses (usually only 204, but
can vary)
"""
try:
# Replace params if supplied
address = full_url if full_url else urljoin(self._base_url, url_suffix)
headers = headers if headers else self._headers
auth = auth if auth else self._auth
if retries:
self._implement_retry(retries, status_list_to_retry, backoff_factor, raise_on_redirect, raise_on_status)
# Execute
res = self._session.request(
method,
address,
verify=self._verify,
params=params,
data=data,
json=json_data,
files=files,
headers=headers,
auth=auth,
timeout=timeout,
**kwargs
)
# Handle error responses gracefully
if not self._is_status_code_valid(res, ok_codes):
if error_handler:
error_handler(res)
else:
err_msg = 'Error in API call [{}] - {}' \
.format(res.status_code, res.reason)
try:
# Try to parse json error response
error_entry = res.json()
err_msg += '\n{}'.format(json.dumps(error_entry))
raise DemistoException(err_msg, res=res)
except ValueError:
err_msg += '\n{}'.format(res.text)
raise DemistoException(err_msg, res=res)
if not empty_valid_codes:
empty_valid_codes = [204]
is_response_empty_and_successful = (res.status_code in empty_valid_codes)
if is_response_empty_and_successful and return_empty_response:
return res
resp_type = resp_type.lower()
try:
if resp_type == 'json':
return res.json()
if resp_type == 'text':
return res.text
if resp_type == 'content':
return res.content
if resp_type == 'xml':
ET.parse(res.text)
return res
except ValueError as exception:
raise DemistoException('Failed to parse json object from response: {}'
.format(res.content), exception)
except requests.exceptions.ConnectTimeout as exception:
err_msg = 'Connection Timeout Error - potential reasons might be that the Server URL parameter' \
' is incorrect or that the Server is not accessible from your host.'
raise DemistoException(err_msg, exception)
except requests.exceptions.SSLError as exception:
# in case the "Trust any certificate" is already checked
if not self._verify:
raise
err_msg = 'SSL Certificate Verification Failed - try selecting \'Trust any certificate\' checkbox in' \
' the integration configuration.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ProxyError as exception:
err_msg = 'Proxy Error - if the \'Use system proxy\' checkbox in the integration configuration is' \
' selected, try clearing the checkbox.'
raise DemistoException(err_msg, exception)
except requests.exceptions.ConnectionError as exception:
# Get originating Exception in Exception chain
error_class = str(exception.__class__)
err_type = '<' + error_class[error_class.find('\'') + 1: error_class.rfind('\'')] + '>'
err_msg = 'Verify that the server URL parameter' \
' is correct and that you have access to the server from your host.' \
'\nError Type: {}\nError Number: [{}]\nMessage: {}\n' \
.format(err_type, exception.errno, exception.strerror)
raise DemistoException(err_msg, exception)
except requests.exceptions.RetryError as exception:
try:
reason = 'Reason: {}'.format(exception.args[0].reason.args[0])
except Exception: # noqa: disable=broad-except
reason = ''
err_msg = 'Max Retries Error- Request attempts with {} retries failed. \n{}'.format(retries, reason)
raise DemistoException(err_msg, exception)
def _is_status_code_valid(self, response, ok_codes=None):
"""If the status code is OK, return 'True'.
:type response: ``requests.Response``
:param response: Response from API after the request for which to check the status.
:type ok_codes: ``tuple`` or ``list``
:param ok_codes:
The request codes to accept as OK, for example: (200, 201, 204). If you specify
"None", will use response.ok.
:return: Whether the status of the response is valid.
:rtype: ``bool``
"""
# Get wanted ok codes
status_codes = ok_codes if ok_codes else self._ok_codes
if status_codes:
return response.status_code in status_codes
return response.ok
def batch(iterable, batch_size=1):
"""Gets an iterable and yields slices of it.
:type iterable: ``list``
:param iterable: list or other iterable object.
:type batch_size: ``int``
:param batch_size: the size of batches to fetch
:rtype: ``list``
:return:: Iterable slices of given
"""
current_batch = iterable[:batch_size]
not_batched = iterable[batch_size:]
while current_batch:
yield current_batch
current_batch = not_batched[:batch_size]
not_batched = not_batched[batch_size:]
def dict_safe_get(dict_object, keys, default_return_value=None, return_type=None, raise_return_type=True):
"""Recursive safe get query (for nested dicts and lists), If keys found return value otherwise return None or default value.
Example:
>>> data = {"something" : {"test": "A"}}
>>> dict_safe_get(data, ['something', 'test'])
>>> 'A'
>>> dict_safe_get(data, ['something', 'else'], 'default value')
>>> 'default value'
:type dict_object: ``dict``
:param dict_object: dictionary to query.
:type keys: ``list``
:param keys: keys for recursive get.
:type default_return_value: ``object``
:param default_return_value: Value to return when no key available.
:type return_type: ``type``
:param return_type: Excepted return type.
:type raise_return_type: ``bool``
:param raise_return_type: Whether to raise an error when the value didn't match the expected return type.
:rtype: ``object``
:return:: Value from nested query.
"""
return_value = dict_object
for key in keys:
try:
return_value = return_value[key]
except (KeyError, TypeError, IndexError, AttributeError):
return_value = default_return_value
break
if return_type and not isinstance(return_value, return_type):
if raise_return_type:
raise TypeError("Safe get Error:\nDetails: Return Type Error Excepted return type {0},"
" but actual type from nested dict/list is {1} with value {2}.\n"
"Query: {3}\nQueried object: {4}".format(return_type, type(return_value),
return_value, keys, dict_object))
return_value = default_return_value
return return_value
CONTEXT_UPDATE_RETRY_TIMES = 3
MIN_VERSION_FOR_VERSIONED_CONTEXT = '6.0.0'
def merge_lists(original_list, updated_list, key):
"""
Replace values in a list with those in an updated list.
Example:
>>> original = [{'id': '1', 'updated': 'n'}, {'id': '2', 'updated': 'n'}, {'id': '11', 'updated': 'n'}]
>>> updated = [{'id': '1', 'updated': 'y'}, {'id': '3', 'updated': 'y'}, {'id': '11', 'updated': 'n',
>>> 'remove': True}]
>>> result = [{'id': '1', 'updated': 'y'}, {'id': '2', 'updated': 'n'}, {'id': '3', 'updated': 'y'}]
:type original_list: ``list``
:param original_list: The original list.
:type updated_list: ``list``
:param updated_list: The updated list.
:type key: ``str``
:param key: The key to replace elements by.
:rtype: ``list``
:return: The merged list.
"""
original_dict = {element[key]: element for element in original_list}
updated_dict = {element[key]: element for element in updated_list}
original_dict.update(updated_dict)
removed = [obj for obj in original_dict.values() if obj.get('remove', False) is True]
for r in removed:
demisto.debug('Removing from integration context: {}'.format(str(r)))
merged_list = [obj for obj in original_dict.values() if obj.get('remove', False) is False]
return merged_list
def set_integration_context(context, sync=True, version=-1):
"""
Sets the integration context.
:type context: ``dict``
:param context: The context to set.
:type sync: ``bool``
:param sync: Whether to save the context directly to the DB.
:type version: ``Any``
:param version: The version of the context to set.
:rtype: ``dict``
:return: The new integration context
"""
demisto.debug('Setting integration context')
if is_versioned_context_available():
demisto.debug('Updating integration context with version {}. Sync: {}'.format(version, sync))
return demisto.setIntegrationContextVersioned(context, version, sync)
else:
return demisto.setIntegrationContext(context)
def get_integration_context(sync=True, with_version=False):
"""
Gets the integration context.
:type sync: ``bool``
:param sync: Whether to get the integration context directly from the DB.
:type with_version: ``bool``
:param with_version: Whether to return the version.
:rtype: ``dict``
:return: The integration context.
"""
if is_versioned_context_available():
integration_context = demisto.getIntegrationContextVersioned(sync)
if with_version:
return integration_context
else:
return integration_context.get('context', {})
else:
return demisto.getIntegrationContext()
def is_versioned_context_available():
"""
Determines whether versioned integration context is available according to the server version.
:rtype: ``bool``
:return: Whether versioned integration context is available
"""
return is_demisto_version_ge(MIN_VERSION_FOR_VERSIONED_CONTEXT)
def set_to_integration_context_with_retries(context, object_keys=None, sync=True,
max_retry_times=CONTEXT_UPDATE_RETRY_TIMES):
"""
Update the integration context with a dictionary of keys and values with multiple attempts.
The function supports merging the context keys using the provided object_keys parameter.
If the version is too old by the time the context is set,
another attempt will be made until the limit after a random sleep.
:type context: ``dict``
:param context: A dictionary of keys and values to set.
:type object_keys: ``dict``
:param object_keys: A dictionary to map between context keys and their unique ID for merging them.
:type sync: ``bool``
:param sync: Whether to save the context directly to the DB.
:type max_retry_times: ``int``
:param max_retry_times: The maximum number of attempts to try.
:rtype: ``None``
:return: None
"""
attempt = 0
# do while...
while True:
if attempt == max_retry_times:
raise Exception('Failed updating integration context. Max retry attempts exceeded.')
# Update the latest context and get the new version
integration_context, version = update_integration_context(context, object_keys, sync)
demisto.debug('Attempting to update the integration context with version {}.'.format(version))
# Attempt to update integration context with a version.
# If we get a ValueError (DB Version), then the version was not updated and we need to try again.
attempt += 1
try:
set_integration_context(integration_context, sync, version)
demisto.debug('Successfully updated integration context with version {}.'
''.format(version))
break
except ValueError as ve:
demisto.debug('Failed updating integration context with version {}: {} Attempts left - {}'
''.format(version, str(ve), CONTEXT_UPDATE_RETRY_TIMES - attempt))
# Sleep for a random time
time_to_sleep = randint(1, 100) / 1000
time.sleep(time_to_sleep)
def get_integration_context_with_version(sync=True):
"""
Get the latest integration context with version, if available.
:type sync: ``bool``
:param sync: Whether to get the context directly from the DB.
:rtype: ``tuple``
:return: The latest integration context with version.
"""
latest_integration_context_versioned = get_integration_context(sync, with_version=True)
version = -1
if is_versioned_context_available():
integration_context = latest_integration_context_versioned.get('context', {})
if sync:
version = latest_integration_context_versioned.get('version', 0)
else:
integration_context = latest_integration_context_versioned
return integration_context, version
def update_integration_context(context, object_keys=None, sync=True):
"""
Update the integration context with a given dictionary after merging it with the latest integration context.
:type context: ``dict``
:param context: The keys and values to update in the integration context.
:type object_keys: ``dict``
:param object_keys: A dictionary to map between context keys and their unique ID for merging them
with the latest context.
:type sync: ``bool``
:param sync: Whether to use the context directly from the DB.
:rtype: ``tuple``
:return: The updated integration context along with the current version.
"""
integration_context, version = get_integration_context_with_version(sync)
if not object_keys:
object_keys = {}
for key, _ in context.items():
latest_object = json.loads(integration_context.get(key, '[]'))
updated_object = context[key]
if key in object_keys:
merged_list = merge_lists(latest_object, updated_object, object_keys[key])
integration_context[key] = json.dumps(merged_list)
else:
integration_context[key] = json.dumps(updated_object)
return integration_context, version
class DemistoException(Exception):
def __init__(self, message, exception=None, res=None, *args):
self.res = res
self.message = message
self.exception = exception
super(DemistoException, self).__init__(message, exception, *args)
def __str__(self):
return str(self.message)
class GetRemoteDataArgs:
"""get-remote-data args parser
:type args: ``dict``
:param args: arguments for the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.remote_incident_id = args['id']
self.last_update = args['lastUpdate']
class GetModifiedRemoteDataArgs:
"""get-modified-remote-data args parser
:type args: ``dict``
:param args: arguments for the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.last_update = args['lastUpdate']
class UpdateRemoteSystemArgs:
"""update-remote-system args parser
:type args: ``dict``
:param args: arguments for the command of the command.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, args):
self.data = args.get('data') # type: ignore
self.entries = args.get('entries')
self.incident_changed = args.get('incidentChanged')
self.remote_incident_id = args.get('remoteId')
self.inc_status = args.get('status')
self.delta = args.get('delta')
class GetRemoteDataResponse:
"""get-remote-data response parser
:type mirrored_object: ``dict``
:param mirrored_object: The object you are mirroring, in most cases the incident.
:type entries: ``list``
:param entries: The entries you want to add to the war room.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, mirrored_object, entries):
self.mirrored_object = mirrored_object
self.entries = entries
def extract_for_local(self):
"""Extracts the response into the mirrored incident.
:return: List of details regarding the mirrored incident.
:rtype: ``list``
"""
if self.mirrored_object:
return [self.mirrored_object] + self.entries
class GetModifiedRemoteDataResponse:
"""get-modified-remote-data response parser
:type modified_incident_ids: ``list``
:param modified_incident_ids: The incidents that were modified since the last check.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, modified_incident_ids):
self.modified_incident_ids = modified_incident_ids
def to_entry(self):
"""Extracts the response
:return: List of incidents to run the get-remote-data command on.
:rtype: ``list``
"""
demisto.info('Modified incidents: {}'.format(self.modified_incident_ids))
return {'Contents': self.modified_incident_ids, 'Type': EntryType.NOTE, 'ContentsFormat': EntryFormat.JSON}
class SchemeTypeMapping:
"""Scheme type mappings builder.
:type type_name: ``str``
:param type_name: The name of the remote incident type.
:type fields: ``dict``
:param fields: The dict of fields to their description.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, type_name='', fields=None):
self.type_name = type_name
self.fields = fields if fields else {}
def add_field(self, name, description=''):
"""Adds a field to the incident type mapping.
:type name: ``str``
:param name: The name of the field.
:type description: ``str``
:param description: The description for that field.a
:return: No data returned
:rtype: ``None``
"""
self.fields.update({
name: description
})
def extract_mapping(self):
"""Extracts the mapping into XSOAR mapping screen.
:return: the mapping object for the current field.
:rtype: ``dict``
"""
return {
self.type_name: self.fields
}
class GetMappingFieldsResponse:
"""Handler for the mapping fields object.
:type scheme_types_mapping: ``list``
:param scheme_types_mapping: List of all the mappings in the remote system.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, scheme_types_mapping=None):
self.scheme_types_mappings = scheme_types_mapping if scheme_types_mapping else []
def add_scheme_type(self, scheme_type_mapping):
"""Add another incident type mapping.
:type scheme_type_mapping: ``dict``
:param scheme_type_mapping: mapping of a singular field.
:return: No data returned
:rtype: ``None``
"""
self.scheme_types_mappings.append(scheme_type_mapping)
def extract_mapping(self):
"""Extracts the mapping into XSOAR mapping screen.
:return: the mapping object for the current field.
:rtype: ``dict``
"""
all_mappings = {}
for scheme_types_mapping in self.scheme_types_mappings:
all_mappings.update(scheme_types_mapping.extract_mapping())
return all_mappings
def get_x_content_info_headers():
"""Get X-Content-* headers to send in outgoing requests to use when performing requests to
external services such as oproxy.
:return: headers dict
:rtype: ``dict``
"""
calling_context = demisto.callingContext.get('context', {})
brand_name = calling_context.get('IntegrationBrand', '')
instance_name = calling_context.get('IntegrationInstance', '')
headers = {
'X-Content-Version': CONTENT_RELEASE_VERSION,
'X-Content-Name': brand_name or instance_name or 'Name not found',
'X-Content-LicenseID': demisto.getLicenseID(),
'X-Content-Branch': CONTENT_BRANCH_NAME,
'X-Content-Server-Version': get_demisto_version_as_str(),
}
return headers
class BaseWidget:
@abstractmethod
def to_display(self):
pass
class TextWidget(BaseWidget):
"""Text Widget representation
:type text: ``str``
:param text: The text for the widget to display
:return: No data returned
:rtype: ``None``
"""
def __init__(self, text):
# type: (str) -> None
self.text = text
def to_display(self):
"""Text Widget representation
:type text: ``str``
:param text: The text for the widget to display
:return: No data returned
:rtype: ``None``
"""
return self.text
class TrendWidget(BaseWidget):
"""Trend Widget representation
:type current_number: ``int``
:param current_number: The Current number in the trend.
:type previous_number: ``int``
:param previous_number: The previous number in the trend.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, current_number, previous_number):
# type: (int, int) -> None
self.current_number = current_number
self.previous_number = previous_number
def to_display(self):
return json.dumps({
'currSum': self.current_number,
'prevSum': self.previous_number
})
class NumberWidget(BaseWidget):
"""Number Widget representation
:type number: ``int``
:param number: The number for the widget to display.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, number):
# type: (int) -> None
self.number = number
def to_display(self):
return self.number
class BarColumnPieWidget(BaseWidget):
"""Bar/Column/Pie Widget representation
:type categories: ``list``
:param categories: a list of categories to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, categories=None):
# type: (list) -> None
self.categories = categories if categories else [] # type: List[dict]
def add_category(self, name, number):
"""Add a category to widget.
:type name: ``str``
:param name: the name of the category to add.
:type number: ``int``
:param number: the number value of the category.
:return: No data returned.
:rtype: ``None``
"""
self.categories.append({
'name': name,
'data': [number]
})
def to_display(self):
return json.dumps(self.categories)
class LineWidget(BaseWidget):
"""Line Widget representation
:type categories: ``Any``
:param categories: a list of categories to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, categories=None):
# type: (list) -> None
self.categories = categories if categories else [] # type: List[dict]
def add_category(self, name, number, group):
"""Add a category to widget.
:type name: ``str``
:param name: the name of the category to add.
:type number: ``int``
:param number: the number value of the category.
:type group: ``str``
:param group: the name of the relevant group.
:return: No data returned
:rtype: ``None``
"""
self.categories.append({
'name': name,
'data': [number],
'groups': [
{
'name': group,
'data': [number]
},
]
})
def to_display(self):
processed_names = [] # type: List[str]
processed_categories = [] # type: List[dict]
for cat in self.categories:
if cat['name'] in processed_names:
for processed_category in processed_categories:
if cat['name'] == processed_category['name']:
processed_category['data'] = [processed_category['data'][0] + cat['data'][0]]
processed_category['groups'].extend(cat['groups'])
break
else:
processed_categories.append(cat)
processed_names.append(cat['name'])
return json.dumps(processed_categories)
class TableOrListWidget(BaseWidget):
"""Table/List Widget representation
:type data: ``Any``
:param data: a list of data to display(Better use the add_category function to populate the data.
:return: No data returned
:rtype: ``None``
"""
def __init__(self, data=None):
# type: (Any) -> None
self.data = data if data else []
if not isinstance(self.data, list):
self.data = [data]
def add_row(self, data):
"""Add a row to the widget.
:type data: ``Any``
:param data: the data to add to the list/table.
:return: No data returned
:rtype: ``None``
"""
self.data.append(data)
def to_display(self):
return json.dumps({
'total': len(self.data),
'data': self.data
})
class IndicatorsSearcher:
"""Used in order to search indicators by the paging or serachAfter param
:type page: ``int``
:param page: the number of page from which we start search indicators from.
:type filter_fields: ``str``
:param filter_fields: comma separated fields to filter (e.g. "value,type")
:return: No data returned
:rtype: ``None``
"""
def __init__(self, page=0, filter_fields=None):
# searchAfter is available in searchIndicators from version 6.1.0
self._can_use_search_after = is_demisto_version_ge('6.1.0')
# populateFields merged in https://github.com/demisto/server/pull/18398
self._can_use_filter_fields = is_demisto_version_ge('6.1.0', build_number='1095800')
self._search_after_title = 'searchAfter'
self._search_after_param = None
self._page = page
self._filter_fields = filter_fields
def search_indicators_by_version(self, from_date=None, query='', size=100, to_date=None, value=''):
"""There are 2 cases depends on the sever version:
1. Search indicators using paging, raise the page number in each call.
2. Search indicators using searchAfter param, update the _search_after_param in each call.
:type from_date: ``str``
:param from_date: the start date to search from.
:type query: ``str``
:param query: indicator search query
:type size: ``size``
:param size: limit the number of returned results.
:type to_date: ``str``
:param to_date: the end date to search until to.
:type value: ``str``
:param value: the indicator value to search.
:return: object contains the search results
:rtype: ``dict``
"""
if self._can_use_search_after:
# if search_after_param exists use it for paging, else use the page number
search_iocs_params = assign_params(
fromDate=from_date,
toDate=to_date,
query=query,
size=size,
value=value,
searchAfter=self._search_after_param,
populateFields=self._filter_fields if self._can_use_filter_fields else None,
page=self._page if not self._search_after_param else None
)
res = demisto.searchIndicators(**search_iocs_params)
self._search_after_param = res[self._search_after_title]
if res[self._search_after_title] is None:
demisto.info('Elastic search using searchAfter returned all indicators')
else:
res = demisto.searchIndicators(fromDate=from_date, toDate=to_date, query=query, size=size, page=self._page,
value=value)
self._page += 1
return res
@property
def page(self):
return self._page
class AutoFocusKeyRetriever:
"""AutoFocus API Key management class
:type api_key: ``str``
:param api_key: Auto Focus API key coming from the integration parameters
:type override_default_credentials: ``bool``
:param override_default_credentials: Whether to override the default credentials and use the
Cortex XSOAR given AutoFocus API Key
:return: No data returned
:rtype: ``None``
"""
def __init__(self, api_key):
# demisto.getAutoFocusApiKey() is available from version 6.2.0
if not api_key:
if not is_demisto_version_ge("6.2.0"): # AF API key is available from version 6.2.0
raise DemistoException('For versions earlier than 6.2.0, configure an API Key.')
try:
api_key = demisto.getAutoFocusApiKey() # is not available on tenants
except ValueError as err:
raise DemistoException('AutoFocus API Key is only available on the main account for TIM customers. ' + str(err))
self.key = api_key
def get_feed_last_run():
"""
This function gets the feed's last run: from XSOAR version 6.2.0: using `demisto.getLastRun()`.
Before XSOAR version 6.2.0: using `demisto.getIntegrationContext()`.
:rtype: ``dict``
:return: All indicators from the feed's last run
"""
if is_demisto_version_ge('6.2.0'):
feed_last_run = demisto.getLastRun() or {}
if not feed_last_run:
integration_ctx = demisto.getIntegrationContext()
if integration_ctx:
feed_last_run = integration_ctx
demisto.setLastRun(feed_last_run)
demisto.setIntegrationContext({})
else:
feed_last_run = demisto.getIntegrationContext() or {}
return feed_last_run
def set_feed_last_run(last_run_indicators):
"""
This function sets the feed's last run: from XSOAR version 6.2.0: using `demisto.setLastRun()`.
Before XSOAR version 6.2.0: using `demisto.setIntegrationContext()`.
:type last_run_indicators: ``dict``
:param last_run_indicators: Indicators to save in "lastRun" object.
:rtype: ``None``
:return: None
"""
if is_demisto_version_ge('6.2.0'):
demisto.setLastRun(last_run_indicators)
else:
demisto.setIntegrationContext(last_run_indicators)
def support_multithreading():
"""Adds lock on the calls to the Cortex XSOAR server from the Demisto object to support integration which use multithreading.
:return: No data returned
:rtype: ``None``
"""
global demisto
prev_do = demisto._Demisto__do # type: ignore[attr-defined]
demisto.lock = Lock() # type: ignore[attr-defined]
def locked_do(cmd):
try:
if demisto.lock.acquire(timeout=60): # type: ignore[call-arg,attr-defined]
return prev_do(cmd) # type: ignore[call-arg]
else:
raise RuntimeError('Failed acquiring lock')
finally:
demisto.lock.release() # type: ignore[attr-defined]
demisto._Demisto__do = locked_do # type: ignore[attr-defined]
|
mit
| 4,435,624,354,605,948,000
| 36.151411
| 738
| 0.577134
| false
| 4.137854
| false
| false
| false
|
kkovaacs/zorp
|
pylib/Zorp/SockAddr.py
|
1
|
9351
|
############################################################################
##
## Copyright (c) 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009,
## 2010, 2011 BalaBit IT Ltd, Budapest, Hungary
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
##
##
############################################################################
"""
<module maturity="stable">
<summary>
Module defining interface to the SockAddr.
</summary>
<description>
<para>
This module implements <parameter>inet_ntoa</parameter> and <parameter>inet_aton</parameter>. The module also provides an interface
to the SockAddr services of the Zorp core. SockAddr is used for example to define the bind address of
<link linkend="python.Dispatch.Dispatcher">Dispatchers</link>, or the address of the ZAS server in
<link linkend="python.AuthDB.AuthenticationProvider">AuthenticationProvider</link> policies.
</para>
</description>
</module>
"""
from string import split, atoi
from socket import htonl, ntohl
def inet_aton(ip):
"""
<function maturity="stable">
<summary>
Function to convert an internet address to a 32-bit integer.
</summary>
<description>
<para>
This function converts the string representation of an IPv4 address
to an integer in network byte order.
Returns unsigned long in network byte order.
</para>
</description>
<metainfo>
<arguments>
<argument maturity="stable">
<name>ip</name>
<type><string/></type>
<description>A dotted-quad string</description>
</argument>
</arguments>
</metainfo>
</function>
"""
# FIXME: there is no parameter check
parts = split(ip, '.', 4);
return htonl(atoi(parts[0]) << 24 | \
atoi(parts[1]) << 16 | \
atoi(parts[2]) << 8 | \
atoi(parts[3]))
def inet_ntoa(ip):
"""
<function maturity="stable">
<summary>
Function to convert a 32-bit integer into an IPv4 address.
</summary>
<description>
<para>
This function converts an IP address from network byte order
into its string representation (dotted quad).
Returns string representation of the IP address.
</para>
</description>
<metainfo>
<arguments>
<argument maturity="stable">
<name>ip</name>
<type></type>
<description>The IP address as a 32-bit integer (network byte order).</description>
</argument>
</arguments>
</metainfo>
</function>
"""
ip = ntohl(ip)
parts = (((ip & 0xff000000) >> 24) & 0xff,
(ip & 0x00ff0000) >> 16,
(ip & 0x0000ff00) >> 8,
(ip & 0x000000ff))
return "%u.%u.%u.%u" % parts
class SockAddrInet:
"""
<class maturity="stable">
<summary>
Class encapsulating an IPv4 address:port pair.
</summary>
<description>
<para>
This class encapsulates an IPv4 address:port pair, similarly to
the <parameter>sockaddr_in</parameter> struct in C. The class is implemented and exported by
the Zorp core. The <parameter>SockAddrInet</parameter> Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
<example>
<title>SockAddrInet example</title>
<para>
The following example defines an IPv4 address:port pair.</para>
<synopsis>
SockAddrInet('192.168.10.10', 80)
</synopsis>
<para>
The following example uses SockAddrInet in a dispatcher. See <xref linkend="python.Dispatch.Dispatcher"/> for details on Dispatchers.
</para>
<synopsis>
Dispatcher(transparent=TRUE, bindto=DBSockAddr(protocol=ZD_PROTO_TCP, sa=SockAddrInet('192.168.11.11', 50080)), service="intra_HTTP_inter", backlog=255, rule_port="50080")
</synopsis>
</example>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>inet</parameter> value that indicates an address in the AF_INET domain.</description>
</attribute>
<attribute maturity="stable">
<name>ip</name>
<type></type>
<description>IP address (network byte order).</description>
</attribute>
<attribute maturity="stable">
<name>ip_s</name>
<type></type>
<description>IP address in string representation.</description>
</attribute>
<attribute maturity="stable">
<name>port</name>
<type></type>
<description>Port number (network byte order).</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
pass
class SockAddrInetRange:
"""
<class maturity="stable">
<summary>
Class encapsulating an IPv4 address and a port range.
</summary>
<description>
<para>
A specialized SockAddrInet class which allocates a new port
within the given range of ports when a dispatcher bounds to it.
The class is implemented and exported by
the Zorp core. The <parameter>SockAddrInetRange</parameter> Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>inet</parameter> value that indicates an address in the AF_INET domain.</description>
</attribute>
<attribute maturity="stable">
<name>ip</name>
<type></type>
<description>IP address (network byte order).</description>
</attribute>
<attribute maturity="stable">
<name>ip_s</name>
<type></type>
<description>IP address in string representation.</description>
</attribute>
<attribute maturity="stable">
<name>port</name>
<type></type>
<description>Port number (network byte order).</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
pass
class SockAddrUnix:
"""
<class maturity="stable">
<summary>
Class encapsulating a UNIX domain socket.
</summary>
<description>
<para>
This class encapsulates a UNIX domain socket endpoint.
The socket is represented by a filename. The <parameter>SockAddrUnix</parameter>
Python class serves only
documentation purposes, and has no real connection to the
behavior implemented in C.
</para>
<example>
<title>SockAddrUnix example</title>
<para>
The following example defines a Unix domain socket.</para>
<synopsis>
SockAddrUnix('/var/sample.socket')
</synopsis>
<para>
The following example uses SockAddrUnix in a DirectedRouter.
</para>
<synopsis>
Service(name="demo_service", proxy_class=HttpProxy, router=DirectedRouter(dest_addr=SockAddrUnix('/var/sample.socket'), overrideable=FALSE, forge_addr=FALSE))
</synopsis>
</example>
</description>
<metainfo>
<attributes>
<attribute maturity="stable">
<name>type</name>
<type><string/></type>
<description>The <parameter>unix</parameter> value that indicates an address in the UNIX domain.</description>
</attribute>
</attributes>
</metainfo>
</class>
"""
#class SockAddrInet6(SockAddr):
# def __init__(self, ip, port):
# SockAddr.__init__(self, 'inet6')
# self.ip = ip
# self.port = port
|
gpl-2.0
| 7,737,876,345,786,557,000
| 35.960474
| 171
| 0.556411
| false
| 4.37576
| false
| false
| false
|
voostar/hp_laserprinter_monitor
|
frontline/frontline/settings.py
|
1
|
5540
|
# Django settings for chifanbu project.
import os.path
DEBUG = True
TEMPLATE_DEBUG = DEBUG
ADMINS = (
# ('Your Name', 'your_email@example.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # Add 'postgresql_psycopg2', 'mysql', 'sqlite3' or 'oracle'.
'NAME': 'hp_laserprinter_monitor', # Or path to database file if using sqlite3.
# The following settings are not used with sqlite3:
'USER': 'root',
'PASSWORD': 'long841205',
'HOST': '10.8.144.247', # Empty for localhost through domain sockets or '127.0.0.1' for localhost through TCP.
'PORT': '', # Set to empty string for default.
}
}
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# In a Windows environment this must be set to your system time zone.
TIME_ZONE = 'Asia/Shanghai'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale.
USE_L10N = True
# If you set this to False, Django will not use timezone-aware datetimes.
USE_TZ = True
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/var/www/example.com/media/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://example.com/media/", "http://media.example.com/"
MEDIA_URL = ''
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/var/www/example.com/static/"
STATIC_ROOT = ''
# URL prefix for static files.
# Example: "http://example.com/static/", "http://static.example.com/"
STATIC_URL = '/static/'
# Additional locations of static files
STATICFILES_DIRS = (
# Put strings here, like "/home/html/static" or "C:/www/django/static".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
os.path.join(os.path.dirname(__file__), 'static',),
)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'yw#t3j-j3+v7_(mb2a#xlk7k7uu@gtu75-%7&&&bl*dvbc+j@8'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
# Uncomment the next line for simple clickjacking protection:
# 'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'frontline.urls'
# Python dotted path to the WSGI application used by Django's runserver.
WSGI_APPLICATION = 'frontline.wsgi.application'
TEMPLATE_DIRS = (
"templates",
"templates/displayer",
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
)
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Uncomment the next line to enable the admin:
'django.contrib.admin',
# Uncomment the next line to enable admin documentation:
# 'django.contrib.admindocs',
'displayer',
)
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
|
unlicense
| -5,563,714,469,655,122,000
| 33.409938
| 139
| 0.686643
| false
| 3.685961
| false
| false
| false
|
segfaulthunter/asynchia
|
asynchia/forthcoming.py
|
1
|
7047
|
# -*- coding: us-ascii -*-
# asynchia - asynchronous networking library
# Copyright (C) 2009 Florian Mayer <florian.mayer@bitsrc.org>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
# You should have received a copy of the GNU Lesser General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
""" Facilities to refer to data that is not yet available.
Example:
# Usually acquired by a call that results in network I/O.
# Global variable for demostration purposes.
a = DataNotifier()
def bar():
# Request result of network I/O.
blub = yield a
yield Coroutine.return_(blub)
def foo():
# Wait for completion of new coroutine which - in turn - waits
# for I/O.
blub = yield Coroutine.call_itr(bar())
print "yay %s" % blub
c = Coroutine(foo())
c.call()
# Network I/O complete.
a.submit('blub')
"""
import threading
import asynchia
from asynchia.util import b
_NULL = object()
class PauseContext(object):
""" Collection of Coroutines which are currently paused but not waiting
for any data. They are paused to prevent too much time to be spent in
them, preventing possibly important I/O from being done. """
def __init__(self):
self.paused = []
def unpause(self):
""" Continue all paused coroutines. """
for coroutine in self.paused:
coroutine.call()
self.paused[:] = []
def pause(self, coroutine):
""" Add coroutine to the list of paused coroutines. """
self.paused.append(coroutine)
class Coroutine(object):
""" Create coroutine from given iterator. Yielding None will pause the co-
routine until continuation by the given PauseContext. Yielding a
DataNotifier will send the requested value to the coroutine once
available. Yielding an instance of Coroutine.return_ will end execution
of the coroutine and send the return value to any coroutines that may be
waiting for it or calls any callbacks associated with it. """
class return_:
""" Yield an instance of this to signal that the coroutine finished
with the given return value (defaults to None). """
def __init__(self, obj=None):
self.obj = obj
def __init__(self, itr, pcontext=None, datanotifier=None):
self.itr = itr
if datanotifier is None:
datanotifier = DataNotifier()
self.datanotifier = datanotifier
self.pcontext = pcontext
def send(self, data):
""" Send requested data to coroutine. """
try:
self.handle_result(self.itr.send(data))
except StopIteration:
self.datanotifier.submit(None)
def call(self):
""" Start (or resume) execution of the coroutine. """
try:
self.handle_result(self.itr.next())
except StopIteration:
self.datanotifier.submit(None)
def handle_result(self, result):
""" Internal. """
if result is None:
if self.pcontext is not None:
self.pcontext.pause(self)
else:
raise ValueError("No PauseContext.")
elif isinstance(result, Coroutine.return_):
self.datanotifier.submit(result.obj)
else:
result.add_coroutine(self)
@classmethod
def call_itr(cls, itr):
""" Create a coroutine from the given iterator, start it
and return the DataNotifier. """
coroutine = cls(itr)
coroutine.call()
return coroutine.datanotifier
class DataNotifier(object):
""" Call registered callbacks and send data to registered coroutines
at submission of data. """
def __init__(self, socket_map):
self.dcallbacks = []
self.rcallbacks = []
self.coroutines = []
self.finished = False
self.data = _NULL
self.event = threading.Event()
self.socket_map = socket_map
def add_coroutine(self, coroutine):
""" Add coroutine that waits for the submission of this data. """
if self.data is _NULL:
self.coroutines.append(coroutine)
else:
coroutine.send(self.data)
def add_databack(self, callback):
""" Add databack (function that receives the the data-notifier data
upon submission as arguments). """
if self.data is _NULL:
self.dcallbacks.append(callback)
else:
callback(self.data)
def add_callback(self, callback):
""" Add callback (function that only receives the data upon
submission as an argument). """
if self.data is _NULL:
self.rcallbacks.append(callback)
else:
callback(self, self.data)
def poll(self):
""" Poll whether result has already been submitted. """
return self.finished
def submit(self, data):
""" Submit data; send it to any coroutines that may be registered and
call any data- and callbacks that may be registered. """
self.data = data
for callback in self.dcallbacks:
callback(data)
for callback in self.rcallbacks:
callback(self, data)
for coroutine in self.coroutines:
coroutine.send(data)
self.coroutines[:] = []
self.rcallbacks[:] = []
self.dcallbacks[:] = []
# Wake up threads waiting for the data.
self.event.set()
self.finished = True
def inject(self, data):
""" Submit data and ensure their callbacks are called in the main
thread. """
self.socket_map.call_synchronized(lambda: self.submit(data))
def wait(self, timeout=None):
""" Block execution of current thread until the data is available.
Return requested data. """
self.event.wait(timeout)
return self.data
@staticmethod
def _coroutine(datanotifier, fun, args, kwargs):
""" Implementation detail. """
datanotifier.inject(fun(*args, **kwargs))
@classmethod
def threaded_coroutine(cls, socket_map, fun, *args, **kwargs):
""" Run fun(*args, **kwargs) in a thread and return a DataNotifier
notifying upon availability of the return value of the function. """
datanot = cls(socket_map)
threading.Thread(
target=cls._coroutine, args=(datanot, fun, args, kwargs)
).start()
return datanot
|
gpl-3.0
| -4,675,045,574,142,878,000
| 33.043478
| 78
| 0.618277
| false
| 4.234976
| false
| false
| false
|
patrickdw123/ParanoiDF
|
PDFUtils.py
|
1
|
15029
|
# ParanoiDF. A combination of several PDF analysis/manipulation tools to
# produce one of the most technically useful PDF analysis tools.
#
# Idea proposed by Julio Hernandez-Castro, University of Kent, UK.
# By Patrick Wragg
# University of Kent
# 21/07/2014
#
# With thanks to:
# Julio Hernandez-Castro, my supervisor.
# Jose Miguel Esparza for writing PeePDF (the basis of this tool).
# Didier Stevens for his "make-PDF" tools.
# Blake Hartstein for Jsunpack-n.
# Yusuke Shinyama for Pdf2txt.py (PDFMiner)
# Nacho Barrientos Arias for Pdfcrack.
# Kovid Goyal for Calibre (DRM removal).
# Jay Berkenbilt for QPDF.
#
# Copyright (C) 2014-2018 Patrick Wragg
#
# This file is part of ParanoiDF.
#
# ParanoiDF is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ParanoiDF is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ParanoiDF. If not, see <http://www.gnu.org/licenses/>.
#
# This was written by Jose Miguel Esparza for the tool PeePDF.
'''
Module with some misc functions
'''
import os, re, htmlentitydefs, json, urllib, urllib2
def clearScreen():
'''
Simple method to clear the screen depending on the OS
'''
if os.name == 'nt':
os.system('cls')
elif os.name == 'posix':
os.system('reset')
elif os.name == 'mac':
os.system('clear')
def countArrayElements(array):
'''
Simple method to count the repetitions of elements in an array
@param array: An array of elements
@return: A tuple (elements,counters), where elements is a list with the distinct elements and counters is the list with the number of times they appear in the array
'''
elements = []
counters = []
for element in array:
if element in elements:
indx = elements.index(element)
counters[indx] += 1
else:
elements.append(element)
counters.append(1)
return elements,counters
def countNonPrintableChars(string):
'''
Simple method to return the non printable characters found in an string
@param string: A string
@return: Number of non printable characters in the string
'''
counter = 0
for i in range(len(string)):
if ord(string[i]) <= 31 or ord(string[i]) > 127:
counter += 1
return counter
def decodeName(name):
'''
Decode the given PDF name
@param name: A PDFName string to decode
@return: A tuple (status,statusContent), where statusContent is the decoded PDF name in case status = 0 or an error in case status = -1
'''
decodedName = name
hexNumbers = re.findall('#([0-9a-f]{2})', name, re.DOTALL | re.IGNORECASE)
for hexNumber in hexNumbers:
try:
decodedName = decodedName.replace('#'+hexNumber,chr(int(hexNumber,16)))
except:
return (-1,'Error decoding name')
return (0,decodedName)
def decodeString(string):
'''
Decode the given PDF string
@param string: A PDFString to decode
@return A tuple (status,statusContent), where statusContent is the decoded PDF string in case status = 0 or an error in case status = -1
'''
decodedString = string
octalNumbers = re.findall('\\\\([0-7]{1-3})', decodedString, re.DOTALL)
for octal in octalNumbers:
try:
decodedString = decodedString.replace('\\\\'+octal,chr(int(octal,8)))
except:
return (-1,'Error decoding string')
return (0,decodedString)
def encodeName(name):
'''
Encode the given PDF name
@param name: A PDFName string to encode
@return: A tuple (status,statusContent), where statusContent is the encoded PDF name in case status = 0 or an error in case status = -1
'''
encodedName = ''
if name[0] == '/':
name = name[1:]
for char in name:
if char == '\0':
encodedName += char
else:
try:
hex = '%x' % ord(char)
encodedName += '#'+hex
except:
return (-1,'Error encoding name')
return (0,'/'+encodedName)
def encodeString(string):
'''
Encode the given PDF string
@param string: A PDFString to encode
@return: A tuple (status,statusContent), where statusContent is the encoded PDF string in case status = 0 or an error in case status = -1
'''
encodedString = ''
try:
for char in string:
octal = '%o' % ord(char)
encodedString += '\\'+(3-len(octal))*'0'+octal
except:
return (-1,'Error encoding string')
return (0,encodedString)
def escapeRegExpString(string):
'''
Escape the given string to include it as a regular expression
@param string: A regular expression to be escaped
@return: Escaped string
'''
toEscapeChars = ['\\','(',')','.','|','^','$','*','+','?','[',']']
escapedValue = ''
for i in range(len(string)):
if string[i] in toEscapeChars:
escapedValue += '\\'+string[i]
else:
escapedValue += string[i]
return escapedValue
def escapeString(string):
'''
Escape the given string
@param string: A string to be escaped
@return: Escaped string
'''
toEscapeChars = ['\\','(',')']
escapedValue = ''
for i in range(len(string)):
if string[i] in toEscapeChars and (i == 0 or string[i-1] != '\\'):
if string[i] == '\\':
if len(string) > i+1 and re.match('[0-7]',string[i+1]):
escapedValue += string[i]
else:
escapedValue += '\\'+string[i]
else:
escapedValue += '\\'+string[i]
elif string[i] == '\r':
escapedValue += '\\r'
elif string[i] == '\n':
escapedValue += '\\n'
elif string[i] == '\t':
escapedValue += '\\t'
elif string[i] == '\b':
escapedValue += '\\b'
elif string[i] == '\f':
escapedValue += '\\f'
else:
escapedValue += string[i]
return escapedValue
def getBitsFromNum(num, bitsPerComponent = 8):
'''
Makes the conversion between number and bits
@param num: Number to be converted
@param bitsPerComponent: Number of bits needed to represent a component
@return: A tuple (status,statusContent), where statusContent is the string containing the resulting bits in case status = 0 or an error in case status = -1
'''
if not isinstance(num,int):
return (-1,'num must be an integer')
if not isinstance(bitsPerComponent,int):
return (-1,'bitsPerComponent must be an integer')
try:
bitsRepresentation = bin(num)
bitsRepresentation = bitsRepresentation.replace('0b','')
mod = len(bitsRepresentation) % 8
if mod != 0:
bitsRepresentation = '0'*(8-mod) + bitsRepresentation
bitsRepresentation = bitsRepresentation[-1*bitsPerComponent:]
except:
return (-1,'Error in conversion from number to bits')
return (0,bitsRepresentation)
def getNumsFromBytes(bytes, bitsPerComponent = 8):
'''
Makes the conversion between bytes and numbers, depending on the number of bits used per component.
@param bytes: String representing the bytes to be converted
@param bitsPerComponent: Number of bits needed to represent a component
@return: A tuple (status,statusContent), where statusContent is a list of numbers in case status = 0 or an error in case status = -1
'''
if not isinstance(bytes,str):
return (-1,'bytes must be a string')
if not isinstance(bitsPerComponent,int):
return (-1,'bitsPerComponent must be an integer')
outputComponents = []
bitsStream = ''
for byte in bytes:
try:
bitsRepresentation = bin(ord(byte))
bitsRepresentation = bitsRepresentation.replace('0b','')
bitsRepresentation = '0'*(8-len(bitsRepresentation)) + bitsRepresentation
bitsStream += bitsRepresentation
except:
return (-1,'Error in conversion from bytes to bits')
try:
for i in range(0,len(bitsStream),bitsPerComponent):
bytes = ''
bits = bitsStream[i:i+bitsPerComponent]
num = int(bits,2)
outputComponents.append(num)
except:
return (-1,'Error in conversion from bits to bytes')
return (0,outputComponents)
def getBytesFromBits(bitsStream):
'''
Makes the conversion between bits and bytes.
@param bitsStream: String representing a chain of bits
@return: A tuple (status,statusContent), where statusContent is the string containing the resulting bytes in case status = 0 or an error in case status = -1
'''
if not isinstance(bitsStream,str):
return (-1,'The bitsStream must be a string')
bytes = ''
if re.match('[01]*$',bitsStream):
try:
for i in range(0,len(bitsStream),8):
bits = bitsStream[i:i+8]
byte = chr(int(bits,2))
bytes += byte
except:
return (-1,'Error in conversion from bits to bytes')
return (0,bytes)
else:
return (-1,'The format of the bit stream is not correct')
def getBytesFromFile(filename, offset, numBytes):
'''
Returns the number of bytes specified from a file, starting from the offset specified
@param filename: Name of the file
@param offset: Bytes offset
@param numBytes: Number of bytes to retrieve
@return: A tuple (status,statusContent), where statusContent is the bytes read in case status = 0 or an error in case status = -1
'''
if not isinstance(offset,int) or not isinstance(numBytes,int):
return (-1,'The offset and the number of bytes must be integers')
if os.path.exists(filename):
fileSize = os.path.getsize(filename)
bytesFile = open(filename,'rb')
bytesFile.seek(offset)
if offset+numBytes > fileSize:
bytes = bytesFile.read()
else:
bytes = bytesFile.read(numBytes)
bytesFile.close()
return (0,bytes)
else:
return (-1,'File does not exist')
def hexToString(hexString):
'''
Simple method to convert an hexadecimal string to ascii string
@param hexString: A string in hexadecimal format
@return: A tuple (status,statusContent), where statusContent is an ascii string in case status = 0 or an error in case status = -1
'''
string = ''
if len(hexString) % 2 != 0:
hexString = '0'+hexString
try:
for i in range(0,len(hexString),2):
string += chr(int(hexString[i]+hexString[i+1],16))
except:
return (-1,'Error in hexadecimal conversion')
return (0,string)
def numToHex(num, numBytes):
'''
Given a number returns its hexadecimal format with the specified length, adding '\0' if necessary
@param num: A number (int)
@param numBytes: Length of the output (int)
@return: A tuple (status,statusContent), where statusContent is a number in hexadecimal format in case status = 0 or an error in case status = -1
'''
hexString = ''
if not isinstance(num,int):
return (-1,'Bad number')
try:
hexNumber = hex(num)[2:]
if len(hexNumber) % 2 != 0:
hexNumber = '0'+hexNumber
for i in range(0,len(hexNumber)-1,2):
hexString += chr(int(hexNumber[i]+hexNumber[i+1],16))
hexString = '\0'*(numBytes-len(hexString))+hexString
except:
return (-1,'Error in hexadecimal conversion')
return (0,hexString)
def numToString(num, numDigits):
'''
Given a number returns its string format with the specified length, adding '0' if necessary
@param num: A number (int)
@param numDigits: Length of the output string (int)
@return: A tuple (status,statusContent), where statusContent is a number in string format in case status = 0 or an error in case status = -1
'''
if not isinstance(num,int):
return (-1,'Bad number')
strNum = str(num)
if numDigits < len(strNum):
return (-1,'Bad digit number')
for i in range(numDigits-len(strNum)):
strNum = '0' + strNum
return (0,strNum)
def unescapeHTMLEntities(text):
'''
Removes HTML or XML character references and entities from a text string.
@param text The HTML (or XML) source text.
@return The plain text, as a Unicode string, if necessary.
Author: Fredrik Lundh
Source: http://effbot.org/zone/re-sub.htm#unescape-html
'''
def fixup(m):
text = m.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, text)
def unescapeString(string):
'''
Unescape the given string
@param string: An escaped string
@return: Unescaped string
'''
toUnescapeChars = ['\\','(',')']
unescapedValue = ''
i = 0
while i < len(string):
if string[i] == '\\' and i != len(string)-1:
if string[i+1] in toUnescapeChars:
if string[i+1] == '\\':
unescapedValue += '\\'
i += 1
else:
pass
elif string[i+1] == 'r':
i += 1
unescapedValue += '\r'
elif string[i+1] == 'n':
i += 1
unescapedValue += '\n'
elif string[i+1] == 't':
i += 1
unescapedValue += '\t'
elif string[i+1] == 'b':
i += 1
unescapedValue += '\b'
elif string[i+1] == 'f':
i += 1
unescapedValue += '\f'
else:
unescapedValue += string[i]
else:
unescapedValue += string[i]
i += 1
return unescapedValue
def vtcheck(md5, vtKey):
'''
Function to check a hash on VirusTotal and get the report summary
@param md5: The MD5 to check (hexdigest)
@param vtKey: The VirusTotal API key needed to perform the request
@return: A dictionary with the result of the request
'''
vtUrl = 'https://www.virustotal.com/vtapi/v2/file/report'
parameters = {'resource':md5,'apikey':vtKey}
try:
data = urllib.urlencode(parameters)
req = urllib2.Request(vtUrl, data)
response = urllib2.urlopen(req)
jsonResponse = response.read()
except:
return (-1, 'The request to VirusTotal has not been successful')
try:
jsonDict = json.loads(jsonResponse)
except:
return (-1, 'An error has occurred while parsing the JSON response from VirusTotal')
return (0, jsonDict)
|
gpl-3.0
| -899,530,424,950,584,600
| 32.103524
| 166
| 0.623461
| false
| 3.648701
| false
| false
| false
|
ratschlab/ASP
|
examples/undocumented/python_modular/kernel_combined_modular.py
|
1
|
1900
|
from tools.load import LoadMatrix
from numpy import double
lm=LoadMatrix()
traindat = double(lm.load_numbers('../data/fm_train_real.dat'))
testdat = double(lm.load_numbers('../data/fm_test_real.dat'))
traindna = lm.load_dna('../data/fm_train_dna.dat')
testdna = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,traindna,testdna],[traindat,testdat,traindna,testdna]]
def kernel_combined_modular(fm_train_real=traindat,fm_test_real=testdat,fm_train_dna=traindna,fm_test_dna=testdna ):
from shogun.Kernel import CombinedKernel, GaussianKernel, FixedDegreeStringKernel, LocalAlignmentStringKernel
from shogun.Features import RealFeatures, StringCharFeatures, CombinedFeatures, DNA
kernel=CombinedKernel()
feats_train=CombinedFeatures()
feats_test=CombinedFeatures()
subkfeats_train=RealFeatures(fm_train_real)
subkfeats_test=RealFeatures(fm_test_real)
subkernel=GaussianKernel(10, 1.1)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
degree=3
subkernel=FixedDegreeStringKernel(10, degree)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
subkfeats_train=StringCharFeatures(fm_train_dna, DNA)
subkfeats_test=StringCharFeatures(fm_test_dna, DNA)
subkernel=LocalAlignmentStringKernel(10)
feats_train.append_feature_obj(subkfeats_train)
feats_test.append_feature_obj(subkfeats_test)
kernel.append_kernel(subkernel)
kernel.init(feats_train, feats_train)
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('Combined')
kernel_combined_modular(*parameter_list[0])
|
gpl-2.0
| -7,027,474,954,381,840,000
| 36.254902
| 116
| 0.787895
| false
| 2.857143
| true
| false
| false
|
1flow/1flow
|
oneflow/core/migrations/0129_auto__add_notificationpreferences.py
|
1
|
79648
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'NotificationPreferences'
db.create_table(u'core_notificationpreferences', (
('preferences', self.gf('django.db.models.fields.related.OneToOneField')(related_name='notifications', unique=True, primary_key=True, to=orm['core.Preferences'])),
('received_pokes_email', self.gf('django.db.models.fields.BooleanField')(default=True)),
))
db.send_create_signal('core', ['NotificationPreferences'])
def backwards(self, orm):
# Deleting model 'NotificationPreferences'
db.delete_table(u'core_notificationpreferences')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'base.user': {
'Meta': {'object_name': 'User'},
'address_book': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'avatar_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'}),
'email_announcements': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
'hash_codes': ('jsonfield.fields.JSONField', [], {'default': "{'unsubscribe': 'd92bc8cc02bc498ca8bb388177cd417f'}", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'register_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'sent_emails': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '254', 'db_index': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.article': {
'Meta': {'object_name': 'Article', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'pages_urls': ('json_field.fields.JSONField', [], {'default': "u'null'", 'null': 'True', 'blank': 'True'}),
'publishers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'publications'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.author': {
'Meta': {'unique_together': "(('origin_name', 'website'),)", 'object_name': 'Author'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Author']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'identities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'identities_rel_+'", 'null': 'True', 'to': "orm['core.Author']"}),
'is_unsure': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'null': 'True', 'blank': 'True'}),
'origin_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'origin_id_str': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}),
'origin_name': ('django.db.models.fields.CharField', [], {'max_length': '7168', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authors'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'website_data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'})
},
'core.baseaccount': {
'Meta': {'object_name': 'BaseAccount'},
'conn_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_conn': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_usable': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseaccount_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'accounts'", 'to': u"orm['base.User']"})
},
'core.basefeed': {
'Meta': {'object_name': 'BaseFeed'},
'closed_reason': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_closed': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_fetch': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'errors': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'fetch_interval': ('django.db.models.fields.IntegerField', [], {'default': '43200', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_internal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'items': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'options': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.basefeed_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'processing_chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'processing_parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.baseitem': {
'Meta': {'object_name': 'BaseItem'},
'authors': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'authored_items'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.BaseItem']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.baseitem_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'sources': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sources_rel_+'", 'null': 'True', 'to': "orm['core.BaseItem']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'})
},
'core.chaineditem': {
'Meta': {'object_name': 'ChainedItem'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'chained_items'", 'to': "orm['core.ProcessingChain']"}),
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'item_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.chaineditemparameter': {
'Meta': {'object_name': 'ChainedItemParameter'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ChainedItem']"}),
'notes_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'notes_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'parameters': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'})
},
'core.combinedfeed': {
'Meta': {'object_name': 'CombinedFeed'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.combinedfeedrule': {
'Meta': {'ordering': "('position',)", 'object_name': 'CombinedFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'combinedfeed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.CombinedFeed']"}),
'feeds': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['core.BaseFeed']", 'symmetrical': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.email': {
'Meta': {'object_name': 'Email', '_ormbases': ['core.BaseItem']},
'attachments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'emails'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'attachments_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'message_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'unique': 'True', 'max_length': '256', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.folder': {
'Meta': {'unique_together': "(('name', 'user', 'parent'),)", 'object_name': 'Folder'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Folder']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'folders'", 'to': u"orm['base.User']"})
},
'core.helpcontent': {
'Meta': {'ordering': "['ordering', 'id']", 'object_name': 'HelpContent'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content_en': ('django.db.models.fields.TextField', [], {}),
'content_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'ordering': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'core.helpwizards': {
'Meta': {'object_name': 'HelpWizards'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'wizards'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_all': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'welcome_beta_shown': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'core.historicalarticle': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalArticle'},
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'comments_feed_url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_orphaned': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'pages_urls': ('json_field.fields.JSONField', [], {'default': "u'null'", 'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '512', 'db_index': 'True'}),
'url_absolute': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'url_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'version_description': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'core.historicalemail': {
'Meta': {'ordering': "(u'-history_date', u'-history_id')", 'object_name': 'HistoricalEmail'},
'attachments_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'baseitem_ptr_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'content': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'content_type': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'blank': 'True'}),
'date_published': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'blank': 'True'}),
'default_rating': ('django.db.models.fields.FloatField', [], {'default': '0.0', 'blank': 'True'}),
'duplicate_of_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'excerpt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'history_date': ('django.db.models.fields.DateTimeField', [], {}),
u'history_id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'history_type': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'history_user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
u'id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'is_hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_restricted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'language_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'message_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '256', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '1024'}),
'origin': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'polymorphic_ctype_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'text_direction': ('django.db.models.fields.CharField', [], {'default': "u'ltr'", 'max_length': '3', 'db_index': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'word_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'core.historyentry': {
'Meta': {'object_name': 'HistoryEntry'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_core.historyentry_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']"})
},
'core.homepreferences': {
'Meta': {'object_name': 'HomePreferences'},
'experimental_features': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'home'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_shows': ('django.db.models.fields.IntegerField', [], {'default': '2', 'blank': 'True'}),
'show_advanced_preferences': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'style': ('django.db.models.fields.CharField', [], {'default': "u'RL'", 'max_length': '2', 'blank': 'True'})
},
'core.language': {
'Meta': {'object_name': 'Language'},
'dj_code': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '16'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'iso639_1': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_2': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'iso639_3': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Language']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.mailaccount': {
'Meta': {'object_name': 'MailAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'password': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'port': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'use_ssl': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'})
},
'core.mailfeed': {
'Meta': {'object_name': 'MailFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mail_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.MailAccount']"}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '2'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'core.mailfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'MailFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.MailFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'header_field': ('django.db.models.fields.IntegerField', [], {'default': '4', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mailfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.MailFeed']"}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_header': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'})
},
'core.nodepermissions': {
'Meta': {'object_name': 'NodePermissions'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SyncNode']", 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'token': ('django.db.models.fields.CharField', [], {'default': "'886ae65681ad4ba9b657a060abcbb3f2'", 'max_length': '32', 'blank': 'True'})
},
'core.notificationpreferences': {
'Meta': {'object_name': 'NotificationPreferences'},
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'notifications'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'received_pokes_email': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.originaldata': {
'Meta': {'object_name': 'OriginalData'},
'email': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'email_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'feedparser': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feedparser_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'google_reader': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'google_reader_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'item': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'original_data'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.BaseItem']"}),
'matching_rule': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'matching_rule_processed': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'twitter': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'twitter_processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.preferences': {
'Meta': {'object_name': 'Preferences'},
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['base.User']", 'unique': 'True', 'primary_key': 'True'})
},
'core.processingchain': {
'Meta': {'object_name': 'ProcessingChain'},
'applies_on': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.ProcessingChain']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_chains'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processingerror': {
'Meta': {'object_name': 'ProcessingError'},
'chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'data': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'instance_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'is_temporary': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'issue_ref': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'processor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'errors'", 'null': 'True', 'to': "orm['core.ChainedItem']"})
},
'core.processor': {
'Meta': {'object_name': 'Processor'},
'accept_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.ProcessorCategory']"}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Processor']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'languages': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Language']"}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_processors'", 'null': 'True', 'to': u"orm['base.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.Processor']"}),
'process_code': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'requirements': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'source_uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processors'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.processorcategory': {
'Meta': {'object_name': 'ProcessorCategory'},
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'maintainer': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'maintained_categories'", 'null': 'True', 'to': u"orm['base.User']"}),
'name_en': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'name_fr': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'name_nt': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.ProcessorCategory']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'source_address': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'processor_categories'", 'null': 'True', 'to': u"orm['base.User']"})
},
'core.read': {
'Meta': {'unique_together': "(('user', 'item'),)", 'object_name': 'Read'},
'bookmark_type': ('django.db.models.fields.CharField', [], {'default': "u'U'", 'max_length': '2'}),
'check_set_subscriptions_131004_done': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'date_analysis': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_archived': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_auto_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_bookmarked': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_fact': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_fun': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowhow': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_knowledge': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_number': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_prospective': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_quote': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_read': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_rules': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_starred': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_analysis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_auto_read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_fact': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_fun': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_good': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_knowhow': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_knowledge': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_number': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_prospective': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_quote': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_read': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'is_rules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_starred': ('django.db.models.fields.NullBooleanField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'item': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'reads'", 'to': "orm['core.BaseItem']"}),
'knowledge_type': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}),
'rating': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'senders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'reads_sent'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['base.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_reads'", 'to': u"orm['base.User']"})
},
'core.readpreferences': {
'Meta': {'object_name': 'ReadPreferences'},
'auto_mark_read_delay': ('django.db.models.fields.IntegerField', [], {'default': '4500', 'blank': 'True'}),
'bookmarked_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'bookmarked_marks_unread': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'mark_auto_read_hide_delay': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'read'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'read_switches_to_fullscreen': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'reading_speed': ('django.db.models.fields.IntegerField', [], {'default': '200', 'blank': 'True'}),
'show_bottom_navbar': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'starred_marks_read': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'starred_removes_bookmarked': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'watch_attributes_mark_archived': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.rssatomfeed': {
'Meta': {'object_name': 'RssAtomFeed', '_ormbases': ['core.BaseFeed']},
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'last_etag': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'last_modified': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '512'}),
'website': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'feeds'", 'null': 'True', 'to': "orm['core.WebSite']"})
},
'core.selectorpreferences': {
'Meta': {'object_name': 'SelectorPreferences'},
'extended_folders_depth': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'folders_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'lists_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'selector'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'show_closed_streams': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'subscriptions_in_multiple_folders': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'titles_show_unread_count': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.sharepreferences': {
'Meta': {'object_name': 'SharePreferences'},
'default_message': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'share'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"})
},
'core.simpletag': {
'Meta': {'unique_together': "(('name', 'language'),)", 'object_name': 'SimpleTag'},
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.Language']", 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'origin_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'origin_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']", 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.SimpleTag']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'core.snappreferences': {
'Meta': {'object_name': 'SnapPreferences'},
'default_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'snap'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'select_paragraph': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.staffpreferences': {
'Meta': {'object_name': 'StaffPreferences'},
'no_home_redirect': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'preferences': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'staff'", 'unique': 'True', 'primary_key': 'True', 'to': "orm['core.Preferences']"}),
'reading_lists_show_bad_articles': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'selector_shows_admin_links': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'super_powers_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
'core.subscription': {
'Meta': {'unique_together': "(('feed', 'user'),)", 'object_name': 'Subscription'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'subscriptions'", 'blank': 'True', 'to': "orm['core.BaseFeed']"}),
'folders': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Folder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'reads': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Read']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.SimpleTag']", 'null': 'True', 'blank': 'True'}),
'thumbnail': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'thumbnail_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'all_subscriptions'", 'blank': 'True', 'to': u"orm['base.User']"})
},
'core.syncnode': {
'Meta': {'object_name': 'SyncNode'},
'broadcast': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_last_seen': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_local_instance': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'local_token': ('django.db.models.fields.CharField', [], {'default': "'4c81945df7bf461a85a9be23e1b77958'", 'max_length': '32', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
'permission': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'remote_token': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '-1', 'blank': 'True'}),
'strategy': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'sync_error': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '384', 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['base.User']", 'null': 'True', 'blank': 'True'}),
'uuid': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'})
},
'core.tweet': {
'Meta': {'object_name': 'Tweet', '_ormbases': ['core.BaseItem']},
u'baseitem_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseItem']", 'unique': 'True', 'primary_key': 'True'}),
'entities': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'tweets'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.BaseItem']"}),
'entities_fetched': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'mentions': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'mentions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Author']"}),
'tweet_id': ('django.db.models.fields.BigIntegerField', [], {'db_index': 'True', 'unique': 'True', 'blank': 'True'})
},
'core.twitteraccount': {
'Meta': {'object_name': 'TwitterAccount', '_ormbases': ['core.BaseAccount']},
u'baseaccount_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseAccount']", 'unique': 'True', 'primary_key': 'True'}),
'fetch_owned_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'fetch_subscribed_lists': ('django.db.models.fields.NullBooleanField', [], {'null': 'True', 'blank': 'True'}),
'social_auth': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'twitter_account'", 'unique': 'True', 'to': u"orm['default.UserSocialAuth']"}),
'timeline': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'twitter_account'", 'unique': 'True', 'null': 'True', 'to': "orm['core.TwitterFeed']"})
},
'core.twitterfeed': {
'Meta': {'object_name': 'TwitterFeed', '_ormbases': ['core.BaseFeed']},
'account': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'twitter_feeds'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.TwitterAccount']"}),
'backfill_completed': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
u'basefeed_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.BaseFeed']", 'unique': 'True', 'primary_key': 'True'}),
'finish_action': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'is_backfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_timeline': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_action': ('django.db.models.fields.IntegerField', [], {'default': '3'}),
'rules_operation': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'scrape_blacklist': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'scrape_whitelist': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'track_locations': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'track_terms': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'uri': ('django.db.models.fields.CharField', [], {'max_length': '255', 'unique': 'True', 'null': 'True', 'blank': 'True'})
},
'core.twitterfeedrule': {
'Meta': {'ordering': "('group', 'position')", 'object_name': 'TwitterFeedRule'},
'check_error': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'clone_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.TwitterFeedRule']", 'null': 'True', 'blank': 'True'}),
'group': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'group_operation': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_valid': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'match_case': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'match_field': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_type': ('django.db.models.fields.IntegerField', [], {'default': '1', 'blank': 'True'}),
'match_value': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'other_field': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'twitterfeed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'rules'", 'to': "orm['core.TwitterFeed']"})
},
'core.usercounters': {
'Meta': {'object_name': 'UserCounters'},
'placeholder': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_counters'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"})
},
'core.userfeeds': {
'Meta': {'object_name': 'UserFeeds'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.BaseFeed']", 'null': 'True', 'blank': 'True'}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_feeds'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_feed'", 'unique': 'True', 'null': 'True', 'to': "orm['core.BaseFeed']"})
},
'core.userimport': {
'Meta': {'object_name': 'UserImport', '_ormbases': ['core.HistoryEntry']},
'date_finished': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'date_started': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'historyentry_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.HistoryEntry']", 'unique': 'True', 'primary_key': 'True'}),
'lines': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'results': ('json_field.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'blank': 'True'}),
'urls': ('django.db.models.fields.TextField', [], {})
},
'core.usersubscriptions': {
'Meta': {'object_name': 'UserSubscriptions'},
'blogs': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'blogs'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Subscription']"}),
'imported_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'imported_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'received_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'received_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'sent_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'sent_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'user_subscriptions'", 'unique': 'True', 'primary_key': 'True', 'to': u"orm['base.User']"}),
'written_items': ('django.db.models.fields.related.OneToOneField', [], {'blank': 'True', 'related_name': "'written_items_user_subscriptions'", 'unique': 'True', 'null': 'True', 'to': "orm['core.Subscription']"})
},
'core.website': {
'Meta': {'object_name': 'WebSite'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'db_index': 'True', 'blank': 'True'}),
'date_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'description_en': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_fr': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'description_nt': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'duplicate_of': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.WebSite']", 'null': 'True', 'blank': 'True'}),
'duplicate_status': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'fetch_limit_nr': ('django.db.models.fields.IntegerField', [], {'default': '16', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'image_url': ('django.db.models.fields.URLField', [], {'max_length': '384', 'null': 'True', 'blank': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'mail_warned': ('json_field.fields.JSONField', [], {'default': '[]', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['core.WebSite']"}),
'processing_chain': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'websites'", 'null': 'True', 'to': "orm['core.ProcessingChain']"}),
'processing_parameters': ('yamlfield.fields.YAMLField', [], {'null': 'True', 'blank': 'True'}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'short_description_en': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_fr': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'short_description_nt': ('django.db.models.fields.CharField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '200', 'blank': 'True'})
},
u'default.usersocialauth': {
'Meta': {'unique_together': "(('provider', 'uid'),)", 'object_name': 'UserSocialAuth', 'db_table': "'social_auth_usersocialauth'"},
'extra_data': ('social.apps.django_app.default.fields.JSONField', [], {'default': "'{}'"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'provider': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'uid': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'social_auth'", 'to': u"orm['base.User']"})
}
}
complete_apps = ['core']
|
agpl-3.0
| 161,011,278,336,962,400
| 99.949303
| 226
| 0.556649
| false
| 3.579525
| false
| false
| false
|
akelge/utils
|
mailIdManager/account.py
|
1
|
2481
|
#
# account.py
# mailVirtual
#
# Created by Andrea Mistrali on 25/09/09.
# Copyright akelge@gmail.com 2009. All rights reserved.
#
# $Id$
from Foundation import *
class Accounts(object):
pl=None
binary=False
modified=False
filename=''
def __new__(cls, filename):
try:
cls.pl=NSMutableDictionary.dictionaryWithContentsOfFile_(filename)
except IOError:
return None
cls.filename=filename
return object.__new__(cls)
def __init__(self, filename="com.apple.mail.plist"):
self.accountList=[]
if self.pl:
accountList=[a for a in self.pl['MailAccounts'] if (a['AccountType'] in ['IMAPAccount', 'POPAccount'])]
for account in accountList:
self.accountList.append(Account(account, self))
def save(self, filename=None):
if not filename:
filename=self.filename
if self.pl:
self.pl.writeToFile_atomically_(filename, False)
class Account(object):
def __init__(self, accountDict, parent):
self.account = accountDict
self.name = self.account['AccountName']
self.parent = parent
self.mainAddress = "%s <%s>" % (self.account['FullUserName'], self.account['EmailAddresses'][0])
# Setup Aliases
if not self.account.has_key('EmailAliases'):
self.account['EmailAliases']=[]
self.aliases=self.account['EmailAliases']
def __repr__(self):
return r"<Account '%s'>" % (self.name)
def addAlias(self, name, alias, index=None):
newAlias={'name': name, 'alias': alias}
if index != None:
self.aliases.insert(index, newAlias)
else:
self.aliases.append(newAlias)
self.parent.modified=True
def delAlias(self, index):
if index in range(0,len(self.aliases)):
self.aliases.pop(index)
self.parent.modified=True
def modAlias(self, index, name, alias):
if index in range(0,len(self.aliases)):
self.delAlias(index)
self.addAlias(name, alias, index)
self.parent.modified=True
def moveAliasUpDown(self, index, step):
"""
Move an alias of step positions in list, watching for overflow
"""
if (index-step) in range(0,len(self.aliases)):
item=self.aliases.pop(index)
self.aliases.insert((index-step), item)
self.parent.modified=True
|
gpl-2.0
| -7,802,215,055,761,493,000
| 30.0125
| 115
| 0.597743
| false
| 3.870515
| false
| false
| false
|
spacedogXYZ/sms_checkin
|
events/models.py
|
1
|
3806
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.core.urlresolvers import reverse
from django.db.models import Avg, Count
from django.db import models
from django.utils import timezone
from timezone_field import TimeZoneField
from phonenumber_field.modelfields import PhoneNumberField
from reminders.models import Prompt
class Event(models.Model):
name = models.CharField(max_length=150)
location = models.CharField(max_length=150, null=True)
host_name = models.CharField(max_length=150, null=True)
time_zone = TimeZoneField(default='US/Pacific')
starts_at = models.DateTimeField(verbose_name="Starts at (local)")
ends_at = models.DateTimeField(verbose_name="Ends at (local)")
created = models.DateTimeField(auto_now_add=True)
prompt_before = models.ForeignKey(Prompt, related_name='+', null=True)
prompt_after = models.ForeignKey(Prompt, related_name='+', null=True)
def __str__(self):
return 'Event #{0} - {1}'.format(self.pk, self.name)
def get_absolute_url(self):
return reverse('view_event', args=[str(self.id)])
@property
def participants(self):
return [a.participant for a in self.attendance_set.select_related('participant')]
@property
def confirmed(self):
return self.attendance_set.filter(confirmed=True)
@property
def ratings(self):
return self.attendance_set.filter(rating__isnull=False).annotate(Count('id')).aggregate(Avg('rating'))
def get_starts_at(self):
"""Returns event.starts_at in specified event.time_zone"""
# NOTE: don't just force timezone into datetime
# DST will mess it up, http://bugs.python.org/issue22994
# use time_zone.localize and normalize instead
# clear existing tzinfo (which was UTC from server), making a naive datetime
starts_at_naive = self.starts_at.replace(tzinfo=None)
# use timezone.localize to add the user's correct tzinfo
starts_at_local = self.time_zone.localize(starts_at_naive)
# normalize to apply DST rules
starts_at_normal = self.time_zone.normalize(starts_at_local)
return starts_at_normal
get_starts_at.short_description = "Starts at (%s)" % timezone.get_current_timezone_name()
# this displays in django admin, which converts to server time before display
def get_ends_at(self):
"""Returns event.ends_at in specified event.time_zone"""
ends_at_naive = self.ends_at.replace(tzinfo=None)
ends_at_local = self.time_zone.localize(ends_at_naive)
ends_at_normal = self.time_zone.normalize(ends_at_local)
return ends_at_normal
get_ends_at.short_description = "Ends at (%s)" % timezone.get_current_timezone_name()
class Participant(models.Model):
created = models.DateTimeField(auto_now_add=True)
name = models.CharField(max_length=150)
phone = PhoneNumberField()
email = models.EmailField()
event = models.ManyToManyField(Event, through='Attendance')
def __str__(self):
return 'Participant #{0} - {1}'.format(self.pk, self.name)
@property
def attending(self):
""" all attendances for a participant, ordered by event end times descending """
future_attendances = self.attendance_set.select_related('event')
# TODO filter out attendances in the past #.filter(event__ends_at__gte=timezone.now())
return future_attendances.order_by('-event__ends_at')
class Attendance(models.Model):
participant = models.ForeignKey(Participant)
event = models.ForeignKey(Event)
confirmed = models.NullBooleanField(default=None, blank=True, null=True)
rating = models.IntegerField(default=None, blank=True, null=True)
class Meta:
verbose_name_plural = "attending"
|
agpl-3.0
| 2,791,979,001,343,510,500
| 38.247423
| 110
| 0.691277
| false
| 3.76087
| false
| false
| false
|
TomAugspurger/pandas
|
pandas/tests/series/test_arithmetic.py
|
1
|
24034
|
from datetime import timedelta
import operator
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import IncompatibleFrequency
import pandas as pd
from pandas import Categorical, Index, Series, bdate_range, date_range, isna
import pandas._testing as tm
from pandas.core import nanops, ops
def _permute(obj):
return obj.take(np.random.permutation(len(obj)))
class TestSeriesFlexArithmetic:
@pytest.mark.parametrize(
"ts",
[
(lambda x: x, lambda x: x * 2, False),
(lambda x: x, lambda x: x[::2], False),
(lambda x: x, lambda x: 5, True),
(lambda x: tm.makeFloatSeries(), lambda x: tm.makeFloatSeries(), True),
],
)
@pytest.mark.parametrize(
"opname", ["add", "sub", "mul", "floordiv", "truediv", "pow"]
)
def test_flex_method_equivalence(self, opname, ts):
# check that Series.{opname} behaves like Series.__{opname}__,
tser = tm.makeTimeSeries().rename("ts")
series = ts[0](tser)
other = ts[1](tser)
check_reverse = ts[2]
op = getattr(Series, opname)
alt = getattr(operator, opname)
result = op(series, other)
expected = alt(series, other)
tm.assert_almost_equal(result, expected)
if check_reverse:
rop = getattr(Series, "r" + opname)
result = rop(series, other)
expected = alt(other, series)
tm.assert_almost_equal(result, expected)
def test_flex_method_subclass_metadata_preservation(self, all_arithmetic_operators):
# GH 13208
class MySeries(Series):
_metadata = ["x"]
@property
def _constructor(self):
return MySeries
opname = all_arithmetic_operators
op = getattr(Series, opname)
m = MySeries([1, 2, 3], name="test")
m.x = 42
result = op(m, 1)
assert result.x == 42
def test_flex_add_scalar_fill_value(self):
# GH12723
s = Series([0, 1, np.nan, 3, 4, 5])
exp = s.fillna(0).add(2)
res = s.add(2, fill_value=0)
tm.assert_series_equal(res, exp)
pairings = [(Series.div, operator.truediv, 1), (Series.rdiv, ops.rtruediv, 1)]
for op in ["add", "sub", "mul", "pow", "truediv", "floordiv"]:
fv = 0
lop = getattr(Series, op)
lequiv = getattr(operator, op)
rop = getattr(Series, "r" + op)
# bind op at definition time...
requiv = lambda x, y, op=op: getattr(operator, op)(y, x)
pairings.append((lop, lequiv, fv))
pairings.append((rop, requiv, fv))
@pytest.mark.parametrize("op, equiv_op, fv", pairings)
def test_operators_combine(self, op, equiv_op, fv):
def _check_fill(meth, op, a, b, fill_value=0):
exp_index = a.index.union(b.index)
a = a.reindex(exp_index)
b = b.reindex(exp_index)
amask = isna(a)
bmask = isna(b)
exp_values = []
for i in range(len(exp_index)):
with np.errstate(all="ignore"):
if amask[i]:
if bmask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(fill_value, b[i]))
elif bmask[i]:
if amask[i]:
exp_values.append(np.nan)
continue
exp_values.append(op(a[i], fill_value))
else:
exp_values.append(op(a[i], b[i]))
result = meth(a, b, fill_value=fill_value)
expected = Series(exp_values, exp_index)
tm.assert_series_equal(result, expected)
a = Series([np.nan, 1.0, 2.0, 3.0, np.nan], index=np.arange(5))
b = Series([np.nan, 1, np.nan, 3, np.nan, 4.0], index=np.arange(6))
result = op(a, b)
exp = equiv_op(a, b)
tm.assert_series_equal(result, exp)
_check_fill(op, equiv_op, a, b, fill_value=fv)
# should accept axis=0 or axis='rows'
op(a, b, axis=0)
class TestSeriesArithmetic:
# Some of these may end up in tests/arithmetic, but are not yet sorted
def test_add_series_with_period_index(self):
rng = pd.period_range("1/1/2000", "1/1/2010", freq="A")
ts = Series(np.random.randn(len(rng)), index=rng)
result = ts + ts[::2]
expected = ts + ts
expected.iloc[1::2] = np.nan
tm.assert_series_equal(result, expected)
result = ts + _permute(ts[::2])
tm.assert_series_equal(result, expected)
msg = "Input has different freq=D from PeriodIndex\\(freq=A-DEC\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
ts + ts.asfreq("D", how="end")
@pytest.mark.parametrize(
"target_add,input_value,expected_value",
[
("!", ["hello", "world"], ["hello!", "world!"]),
("m", ["hello", "world"], ["hellom", "worldm"]),
],
)
def test_string_addition(self, target_add, input_value, expected_value):
# GH28658 - ensure adding 'm' does not raise an error
a = Series(input_value)
result = a + target_add
expected = Series(expected_value)
tm.assert_series_equal(result, expected)
def test_divmod(self):
# GH#25557
a = Series([1, 1, 1, np.nan], index=["a", "b", "c", "d"])
b = Series([2, np.nan, 1, np.nan], index=["a", "b", "d", "e"])
result = a.divmod(b)
expected = divmod(a, b)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
result = a.rdivmod(b)
expected = divmod(b, a)
tm.assert_series_equal(result[0], expected[0])
tm.assert_series_equal(result[1], expected[1])
@pytest.mark.parametrize("index", [None, range(9)])
def test_series_integer_mod(self, index):
# GH#24396
s1 = Series(range(1, 10))
s2 = Series("foo", index=index)
msg = "not all arguments converted during string formatting"
with pytest.raises(TypeError, match=msg):
s2 % s1
def test_add_with_duplicate_index(self):
# GH14227
s1 = Series([1, 2], index=[1, 1])
s2 = Series([10, 10], index=[1, 2])
result = s1 + s2
expected = pd.Series([11, 12, np.nan], index=[1, 1, 2])
tm.assert_series_equal(result, expected)
def test_add_na_handling(self):
from decimal import Decimal
from datetime import date
s = Series(
[Decimal("1.3"), Decimal("2.3")], index=[date(2012, 1, 1), date(2012, 1, 2)]
)
result = s + s.shift(1)
result2 = s.shift(1) + s
assert isna(result[0])
assert isna(result2[0])
def test_add_corner_cases(self, datetime_series):
empty = Series([], index=Index([]), dtype=np.float64)
result = datetime_series + empty
assert np.isnan(result).all()
result = empty + empty.copy()
assert len(result) == 0
# FIXME: dont leave commented-out
# TODO: this returned NotImplemented earlier, what to do?
# deltas = Series([timedelta(1)] * 5, index=np.arange(5))
# sub_deltas = deltas[::2]
# deltas5 = deltas * 5
# deltas = deltas + sub_deltas
# float + int
int_ts = datetime_series.astype(int)[:-5]
added = datetime_series + int_ts
expected = Series(
datetime_series.values[:-5] + int_ts.values,
index=datetime_series.index[:-5],
name="ts",
)
tm.assert_series_equal(added[:-5], expected)
def test_mul_empty_int_corner_case(self):
s1 = Series([], [], dtype=np.int32)
s2 = Series({"x": 0.0})
tm.assert_series_equal(s1 * s2, Series([np.nan], index=["x"]))
def test_sub_datetimelike_align(self):
# GH#7500
# datetimelike ops need to align
dt = Series(date_range("2012-1-1", periods=3, freq="D"))
dt.iloc[2] = np.nan
dt2 = dt[::-1]
expected = Series([timedelta(0), timedelta(0), pd.NaT])
# name is reset
result = dt2 - dt
tm.assert_series_equal(result, expected)
expected = Series(expected, name=0)
result = (dt2.to_frame() - dt.to_frame())[0]
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Comparisons
class TestSeriesFlexComparison:
def test_comparison_flex_basic(self):
left = pd.Series(np.random.randn(10))
right = pd.Series(np.random.randn(10))
tm.assert_series_equal(left.eq(right), left == right)
tm.assert_series_equal(left.ne(right), left != right)
tm.assert_series_equal(left.le(right), left < right)
tm.assert_series_equal(left.lt(right), left <= right)
tm.assert_series_equal(left.gt(right), left > right)
tm.assert_series_equal(left.ge(right), left >= right)
# axis
for axis in [0, None, "index"]:
tm.assert_series_equal(left.eq(right, axis=axis), left == right)
tm.assert_series_equal(left.ne(right, axis=axis), left != right)
tm.assert_series_equal(left.le(right, axis=axis), left < right)
tm.assert_series_equal(left.lt(right, axis=axis), left <= right)
tm.assert_series_equal(left.gt(right, axis=axis), left > right)
tm.assert_series_equal(left.ge(right, axis=axis), left >= right)
#
msg = "No axis named 1 for object type"
for op in ["eq", "ne", "le", "le", "gt", "ge"]:
with pytest.raises(ValueError, match=msg):
getattr(left, op)(right, axis=1)
def test_comparison_flex_alignment(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.eq(right), exp)
exp = pd.Series([True, True, False, True], index=list("abcd"))
tm.assert_series_equal(left.ne(right), exp)
exp = pd.Series([False, False, True, False], index=list("abcd"))
tm.assert_series_equal(left.le(right), exp)
exp = pd.Series([False, False, False, False], index=list("abcd"))
tm.assert_series_equal(left.lt(right), exp)
exp = pd.Series([False, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right), exp)
exp = pd.Series([False, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right), exp)
def test_comparison_flex_alignment_fill(self):
left = Series([1, 3, 2], index=list("abc"))
right = Series([2, 2, 2], index=list("bcd"))
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.eq(right, fill_value=2), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.ne(right, fill_value=2), exp)
exp = pd.Series([False, False, True, True], index=list("abcd"))
tm.assert_series_equal(left.le(right, fill_value=0), exp)
exp = pd.Series([False, False, False, True], index=list("abcd"))
tm.assert_series_equal(left.lt(right, fill_value=0), exp)
exp = pd.Series([True, True, True, False], index=list("abcd"))
tm.assert_series_equal(left.ge(right, fill_value=0), exp)
exp = pd.Series([True, True, False, False], index=list("abcd"))
tm.assert_series_equal(left.gt(right, fill_value=0), exp)
class TestSeriesComparison:
def test_comparison_different_length(self):
a = Series(["a", "b", "c"])
b = Series(["b", "a"])
msg = "only compare identically-labeled Series"
with pytest.raises(ValueError, match=msg):
a < b
a = Series([1, 2])
b = Series([2, 3, 4])
with pytest.raises(ValueError, match=msg):
a == b
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes(self, opname):
# GH#15115
ser = Series([1, 3, 2], index=range(3))
const = 2
result = getattr(ser, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize("opname", ["eq", "ne", "gt", "lt", "ge", "le"])
def test_ser_flex_cmp_return_dtypes_empty(self, opname):
# GH#15115 empty Series case
ser = Series([1, 3, 2], index=range(3))
empty = ser.iloc[:0]
const = 2
result = getattr(empty, opname)(const).dtypes
expected = np.dtype("bool")
assert result == expected
@pytest.mark.parametrize(
"op",
[operator.eq, operator.ne, operator.le, operator.lt, operator.ge, operator.gt],
)
@pytest.mark.parametrize(
"names", [(None, None, None), ("foo", "bar", None), ("baz", "baz", "baz")]
)
def test_ser_cmp_result_names(self, names, op):
# datetime64 dtype
dti = pd.date_range("1949-06-07 03:00:00", freq="H", periods=5, name=names[0])
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# datetime64tz dtype
dti = dti.tz_localize("US/Central")
dti = pd.DatetimeIndex(dti, freq="infer") # freq not preserved by tz_localize
ser = Series(dti).rename(names[1])
result = op(ser, dti)
assert result.name == names[2]
# timedelta64 dtype
tdi = dti - dti.shift(1)
ser = Series(tdi).rename(names[1])
result = op(ser, tdi)
assert result.name == names[2]
# interval dtype
if op in [operator.eq, operator.ne]:
# interval dtype comparisons not yet implemented
ii = pd.interval_range(start=0, periods=5, name=names[0])
ser = Series(ii).rename(names[1])
result = op(ser, ii)
assert result.name == names[2]
# categorical
if op in [operator.eq, operator.ne]:
# categorical dtype comparisons raise for inequalities
cidx = tdi.astype("category")
ser = Series(cidx).rename(names[1])
result = op(ser, cidx)
assert result.name == names[2]
def test_comparisons(self):
left = np.random.randn(10)
right = np.random.randn(10)
left[:3] = np.nan
result = nanops.nangt(left, right)
with np.errstate(invalid="ignore"):
expected = (left > right).astype("O")
expected[:3] = np.nan
tm.assert_almost_equal(result, expected)
s = Series(["a", "b", "c"])
s2 = Series([False, True, False])
# it works!
exp = Series([False, False, False])
tm.assert_series_equal(s == s2, exp)
tm.assert_series_equal(s2 == s, exp)
# -----------------------------------------------------------------
# Categorical Dtype Comparisons
def test_categorical_comparisons(self):
# GH#8938
# allow equality comparisons
a = Series(list("abc"), dtype="category")
b = Series(list("abc"), dtype="object")
c = Series(["a", "b", "cc"], dtype="object")
d = Series(list("acb"), dtype="object")
e = Categorical(list("abc"))
f = Categorical(list("acb"))
# vs scalar
assert not (a == "a").all()
assert ((a != "a") == ~(a == "a")).all()
assert not ("a" == a).all()
assert (a == "a")[0]
assert ("a" == a)[0]
assert not ("a" != a)[0]
# vs list-like
assert (a == a).all()
assert not (a != a).all()
assert (a == list(a)).all()
assert (a == b).all()
assert (b == a).all()
assert ((~(a == b)) == (a != b)).all()
assert ((~(b == a)) == (b != a)).all()
assert not (a == c).all()
assert not (c == a).all()
assert not (a == d).all()
assert not (d == a).all()
# vs a cat-like
assert (a == e).all()
assert (e == a).all()
assert not (a == f).all()
assert not (f == a).all()
assert (~(a == e) == (a != e)).all()
assert (~(e == a) == (e != a)).all()
assert (~(a == f) == (a != f)).all()
assert (~(f == a) == (f != a)).all()
# non-equality is not comparable
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
a < b
with pytest.raises(TypeError, match=msg):
b < a
with pytest.raises(TypeError, match=msg):
a > b
with pytest.raises(TypeError, match=msg):
b > a
def test_unequal_categorical_comparison_raises_type_error(self):
# unequal comparison should raise for unordered cats
cat = Series(Categorical(list("abc")))
msg = "can only compare equality or not"
with pytest.raises(TypeError, match=msg):
cat > "b"
cat = Series(Categorical(list("abc"), ordered=False))
with pytest.raises(TypeError, match=msg):
cat > "b"
# https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057
# and following comparisons with scalars not in categories should raise
# for unequal comps, but not for equal/not equal
cat = Series(Categorical(list("abc"), ordered=True))
msg = "Cannot compare a Categorical for op.+with a scalar"
with pytest.raises(TypeError, match=msg):
cat < "d"
with pytest.raises(TypeError, match=msg):
cat > "d"
with pytest.raises(TypeError, match=msg):
"d" < cat
with pytest.raises(TypeError, match=msg):
"d" > cat
tm.assert_series_equal(cat == "d", Series([False, False, False]))
tm.assert_series_equal(cat != "d", Series([True, True, True]))
# -----------------------------------------------------------------
def test_comparison_tuples(self):
# GH#11339
# comparisons vs tuple
s = Series([(1, 1), (1, 2)])
result = s == (1, 2)
expected = Series([False, True])
tm.assert_series_equal(result, expected)
result = s != (1, 2)
expected = Series([True, False])
tm.assert_series_equal(result, expected)
result = s == (0, 0)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
result = s != (0, 0)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
s = Series([(1, 1), (1, 1)])
result = s == (1, 1)
expected = Series([True, True])
tm.assert_series_equal(result, expected)
result = s != (1, 1)
expected = Series([False, False])
tm.assert_series_equal(result, expected)
s = Series([frozenset([1]), frozenset([1, 2])])
result = s == frozenset([1])
expected = Series([True, False])
tm.assert_series_equal(result, expected)
def test_comparison_operators_with_nas(self):
ser = Series(bdate_range("1/1/2000", periods=10), dtype=object)
ser[::2] = np.nan
# test that comparisons work
ops = ["lt", "le", "gt", "ge", "eq", "ne"]
for op in ops:
val = ser[5]
f = getattr(operator, op)
result = f(ser, val)
expected = f(ser.dropna(), val).reindex(ser.index)
if op == "ne":
expected = expected.fillna(True).astype(bool)
else:
expected = expected.fillna(False).astype(bool)
tm.assert_series_equal(result, expected)
# FIXME: dont leave commented-out
# fffffffuuuuuuuuuuuu
# result = f(val, s)
# expected = f(val, s.dropna()).reindex(s.index)
# tm.assert_series_equal(result, expected)
def test_ne(self):
ts = Series([3, 4, 5, 6, 7], [3, 4, 5, 6, 7], dtype=float)
expected = [True, True, False, True, True]
assert tm.equalContents(ts.index != 5, expected)
assert tm.equalContents(~(ts.index == 5), expected)
def test_comp_ops_df_compat(self):
# GH 1134
s1 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s2 = pd.Series([2, 2, 2], index=list("ABD"), name="x")
s3 = pd.Series([1, 2, 3], index=list("ABC"), name="x")
s4 = pd.Series([2, 2, 2, 2], index=list("ABCD"), name="x")
for left, right in [(s1, s2), (s2, s1), (s3, s4), (s4, s3)]:
msg = "Can only compare identically-labeled Series objects"
with pytest.raises(ValueError, match=msg):
left == right
with pytest.raises(ValueError, match=msg):
left != right
with pytest.raises(ValueError, match=msg):
left < right
msg = "Can only compare identically-labeled DataFrame objects"
with pytest.raises(ValueError, match=msg):
left.to_frame() == right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() != right.to_frame()
with pytest.raises(ValueError, match=msg):
left.to_frame() < right.to_frame()
def test_compare_series_interval_keyword(self):
# GH#25338
s = Series(["IntervalA", "IntervalB", "IntervalC"])
result = s == "IntervalA"
expected = Series([True, False, False])
tm.assert_series_equal(result, expected)
# ------------------------------------------------------------------
# Unsorted
# These arithmetic tests were previously in other files, eventually
# should be parametrized and put into tests.arithmetic
class TestTimeSeriesArithmetic:
# TODO: De-duplicate with test below
def test_series_add_tz_mismatch_converts_to_utc_duplicate(self):
rng = date_range("1/1/2011", periods=10, freq="H", tz="US/Eastern")
ser = Series(np.random.randn(len(rng)), index=rng)
ts_moscow = ser.tz_convert("Europe/Moscow")
result = ser + ts_moscow
assert result.index.tz is pytz.utc
result = ts_moscow + ser
assert result.index.tz is pytz.utc
def test_series_add_tz_mismatch_converts_to_utc(self):
rng = date_range("1/1/2011", periods=100, freq="H", tz="utc")
perm = np.random.permutation(100)[:90]
ser1 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("US/Eastern")
)
perm = np.random.permutation(100)[:90]
ser2 = Series(
np.random.randn(90), index=rng.take(perm).tz_convert("Europe/Berlin")
)
result = ser1 + ser2
uts1 = ser1.tz_convert("utc")
uts2 = ser2.tz_convert("utc")
expected = uts1 + uts2
assert result.index.tz == pytz.UTC
tm.assert_series_equal(result, expected)
def test_series_add_aware_naive_raises(self):
rng = date_range("1/1/2011", periods=10, freq="H")
ser = Series(np.random.randn(len(rng)), index=rng)
ser_utc = ser.tz_localize("utc")
msg = "Cannot join tz-naive with tz-aware DatetimeIndex"
with pytest.raises(Exception, match=msg):
ser + ser_utc
with pytest.raises(Exception, match=msg):
ser_utc + ser
def test_datetime_understood(self):
# Ensures it doesn't fail to create the right series
# reported in issue#16726
series = pd.Series(pd.date_range("2012-01-01", periods=3))
offset = pd.offsets.DateOffset(days=6)
result = series - offset
expected = pd.Series(pd.to_datetime(["2011-12-26", "2011-12-27", "2011-12-28"]))
tm.assert_series_equal(result, expected)
|
bsd-3-clause
| 4,321,271,863,813,454,300
| 33.882438
| 88
| 0.544562
| false
| 3.534932
| true
| false
| false
|
simkuring/simkuring_telegram_bot
|
bot.py
|
1
|
2433
|
import requests
from time import sleep
import json
import ConfigParser
import modules
# config
config = ConfigParser.ConfigParser()
config.read("config.ini")
key = config.get("setting","key")
limit = config.getint("setting","limit")
sleepTime = config.getint("setting","sleep")
queryLimit = config.getint("setting","query_limit")
timeout = queryLimit = config.getint("setting","timeout")
# set url
headers = {"Content-type": "application/x-www-form-urlencoded"}
url = "https://api.telegram.org/bot"
sendMsgUrl = url + key + "/sendMessage"
getMsgUrl = url + key + "/getUpdates"
# help and about
def help(args):
return """
/jam
/adzan [bandung, bogor, jakarta, aceh, samarinda, balikpapan, makassar]
/ddg [keyword]
/about
"""
def about(args):
about = """
Bot Simkuring v .1 alpha by Simkuring Laboratory
"""
return about
# bot command list + function
commandLists = {
"/jam":modules.jam,
"/adzan":modules.adzan,
"/ddg":modules.ddg,
"/about":about,
"/help":help
}
def sendMessage(chatId, msgId, text):
try:
data = {"chat_id":chatId,"text":text,"reply_to_message_id":msgId}
r = requests.post(sendMsgUrl,data=data)
if r.status_code != 200:
print r.status_code
except:
print "weee"
def parseCommand(msg):
panjang = len(msg['result'])
for i in range(panjang):
try:
perintah = msg['result'][i]['message']['text'].replace("@SimkuringBot","")
command = perintah.split()
if command[0] in commandLists.keys():
data = commandLists[command[0]](command)
sendMessage(msg['result'][i]['message']['chat']['id'], msg['result'][i]['message']['message_id'], data)
except:
pass
def main():
lastMessageId = 0;
while (True):
data = {
"offset":lastMessageId,
"timeout":timeout,
"limit":queryLimit
}
bot = requests.post(getMsgUrl,data=data)
if bot.status_code == 200:
msg = bot.json()
panjang = len(msg['result'])
if panjang > 0 :
if panjang < limit :
parseCommand(msg)
lastMessageId = msg['result'][panjang-1]['update_id'] + 1
else:
print bot.status_code
sleep(sleepTime)
if __name__ == "__main__":
main()
|
gpl-3.0
| 7,329,419,161,624,676,000
| 26.659091
| 119
| 0.572544
| false
| 3.615156
| true
| false
| false
|
ElricleNecro/CalculServer
|
setup.py
|
1
|
1267
|
#! /usr/bin/env python3
# -*- coding:Utf8 -*-
#--------------------------------------------------------------------------------------------------------------
# All necessary import:
#--------------------------------------------------------------------------------------------------------------
import os, sys, glob
#from setuptools import find_packages
import setuptools as st
from distutils.core import setup
from distutils.command.install_data import install_data
packages = st.find_packages()
#--------------------------------------------------------------------------------------------------------------
# Call the setup function:
#--------------------------------------------------------------------------------------------------------------
setup(
name = 'CalculServer',
version = '0.1',
description = 'Python Module for analysis gadget simulation on two different computer.',
author = 'Guillaume Plum',
packages = packages,
cmdclass = {'install_data': install_data},
# data_files = [
# ('share/LibThese/animation-plugins', ["share/LibThese/animation-plugins/__init__.py"]), #glob.glob("share/LibThese/animation-plugins/*.py")),
# ],
scripts = [
'scripts/cs_notifier.py',
'scripts/cs_runner.py',
],
)
#vim:spelllang=
|
lgpl-3.0
| 3,314,974,385,399,036,000
| 35.2
| 145
| 0.444357
| false
| 4.675277
| false
| false
| false
|
lukasmonk/lucaschess
|
Code/GestorOpeningLines.py
|
1
|
37899
|
import time
import random
from Code import Gestor
from Code import Jugada
from Code import Books
from Code import ControlPosicion
from Code import TrListas
from Code.QT import QTUtil2
from Code.QT import Iconos
from Code.QT import QTVarios
from Code import Util
from Code import OpeningLines
from Code import XMotorRespuesta
from Code import Partida
from Code.Constantes import *
class GestorOpeningEngines(Gestor.Gestor):
def inicio(self, pathFichero):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.dbop.open_cache_engines()
self.tipoJuego = kJugOpeningLines
self.level = self.dbop.getconfig("ENG_LEVEL", 0)
self.numengine = self.dbop.getconfig("ENG_ENGINE", 0)
self.trainingEngines = self.dbop.trainingEngines()
self.auto_analysis = self.trainingEngines.get("AUTO_ANALYSIS", True)
self.ask_movesdifferent = self.trainingEngines.get("ASK_MOVESDIFFERENT", False)
liTimes = self.trainingEngines.get("TIMES")
if not liTimes:
liTimes = [500, 1000, 2000, 4000, 8000]
liBooks = self.trainingEngines.get("BOOKS")
if not liBooks:
liBooks = ["", "", "", "", ""]
liEngines = self.trainingEngines["ENGINES"]
num_engines_base = len(liEngines)
liEnginesExt = self.trainingEngines.get("EXT_ENGINES", [])
num_engines = num_engines_base+len(liEnginesExt)
if self.numengine >= num_engines:
self.level += 1
self.numengine = 0
self.dbop.setconfig("ENG_LEVEL", self.level)
self.dbop.setconfig("ENG_ENGINE", 0)
num_levels = len(liTimes)
if self.level >= num_levels:
if QTUtil2.pregunta(self.pantalla, "%s.\n%s" % (_("Training finished"), _("Do you want to reinit?"))):
self.dbop.setconfig("ENG_LEVEL", 0)
self.dbop.setconfig("ENG_ENGINE", 0)
self.reinicio(dbop)
return
self.time = liTimes[self.level]
nombook = liBooks[self.level]
if nombook:
listaLibros = Books.ListaLibros()
listaLibros.recuperaVar(self.configuracion.ficheroBooks)
self.book = listaLibros.buscaLibro(nombook)
if self.book:
self.book.polyglot()
else:
self.book = None
if self.numengine < num_engines_base:
self.keyengine = liEngines[self.numengine]
else:
self.keyengine = "*" + liEnginesExt[self.numengine-num_engines_base-1]
self.plies_mandatory = self.trainingEngines["MANDATORY"]
self.plies_control = self.trainingEngines["CONTROL"]
self.plies_pendientes = self.plies_control
self.lost_points = self.trainingEngines["LOST_POINTS"]
self.siJugamosConBlancas = self.trainingEngines["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.siAprobado = False
rival = self.configuracion.buscaRivalExt(self.keyengine)
self.xrival = self.procesador.creaGestorMotor(rival, self.time, None)
self.xrival.siBlancas = self.siRivalConBlancas
juez = self.configuracion.buscaRival(self.trainingEngines["ENGINE_CONTROL"])
self.xjuez = self.procesador.creaGestorMotor(juez, int(self.trainingEngines["ENGINE_TIME"] * 1000), None)
self.xjuez.anulaMultiPV()
self.li_info = [
"<b>%s</b>: %d/%d - %s" % (_("Engine"), self.numengine+1, num_engines, self.xrival.nombre),
"<b>%s</b>: %d/%d - %0.1f\"" % (_("Level"), self.level + 1, num_levels, self.time / 1000.0),
]
self.dicFENm2 = self.trainingEngines["DICFENM2"]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.ayudas = 9999 # Para que analice sin problemas
self.partida = Partida.Partida()
self.pantalla.ponToolBar((k_mainmenu, k_abandonar, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
if not self.runcontrol():
if siRival:
self.desactivaTodas()
if self.mueveRival():
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
def mueveRival(self):
si_obligatorio = self.partida.numJugadas() <= self.plies_mandatory
si_pensar = True
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, set())
if si_obligatorio:
nmoves = len(moves)
if nmoves == 0:
si_obligatorio = False
else:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
if self.book:
move_book = self.book.eligeJugadaTipo(self.partida.ultPosicion.fen(), "au")
if move_book in list(moves):
move = move_book
if move is None:
move = random.choice(list(moves))
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
si_pensar = False
if si_pensar:
move = None
if self.book:
move = self.book.eligeJugadaTipo(self.partida.ultPosicion.fen(), "mp")
if move is None:
move = self.dbop.get_cache_engines(self.keyengine, self.time, fenM2)
if move is None:
rmRival = self.xrival.juegaPartida(self.partida)
move = rmRival.movimiento()
self.dbop.set_cache_engines(self.keyengine, self.time, fenM2, move)
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
if si_obligatorio:
if move not in moves:
move = list(moves)[0]
desde, hasta, coronacion = move[:2], move[2:4], move[4:]
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
nmoves = len(moves)
if nmoves > 0:
if jg.movimiento() not in moves:
for move in moves:
self.tablero.creaFlechaMulti(move, False)
self.tablero.creaFlechaMulti(jg.movimiento(), True)
if self.ask_movesdifferent:
mensaje = "%s\n%s" % (_("This is not the move in the opening lines"),
_("Do you want to go on with this move?"))
if not QTUtil2.pregunta(self.pantalla, mensaje):
self.ponFinJuego()
return True
else:
self.mensajeEnPGN(_("This is not the move in the opening lines, you must repeat the game"))
self.ponFinJuego()
return True
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
fenM2 = jg.posicionBase.fenM2()
jg.es_linea = False
if fenM2 in self.dicFENm2:
if jg.movimiento() in self.dicFENm2[fenM2]:
jg.criticaDirecta = "!"
jg.es_linea = True
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def muestraInformacion(self):
li = []
li.extend(self.li_info)
si_obligatorio = self.partida.numJugadas() < self.plies_mandatory
if si_obligatorio and self.estado != kFinJuego:
fenM2 = self.partida.ultPosicion.fenM2()
moves = self.dicFENm2.get(fenM2, [])
if len(moves) > 0:
li.append( "<b>%s</b>: %d/%d" % (_("Mandatory move"), self.partida.numJugadas()+1, self.plies_mandatory))
else:
si_obligatorio = False
if not si_obligatorio and self.estado != kFinJuego:
tm = self.plies_pendientes
if tm > 1 and self.partida.numJugadas() and not self.partida.jugada(-1).es_linea:
li.append("%s: %d" % (_("Moves until the control"), tm-1))
self.ponRotulo1("<br>".join(li))
def run_auto_analysis(self):
lista = []
for njg in range(self.partida.numJugadas()):
jg = self.partida.jugada(njg)
if jg.siBlancas() == self.siJugamosConBlancas:
fenM2 = jg.posicionBase.fenM2()
if fenM2 not in self.dicFENm2:
jg.njg = njg
lista.append(jg)
jg.fenM2 = fenM2
total = len(lista)
for pos, jg in enumerate(lista, 1):
if self.siCancelado():
break
self.ponteEnJugada(jg.njg)
self.mensEspera(siCancelar=True, masTitulo="%d/%d" % (pos, total))
nombre = self.xanalyzer.nombre
tiempo = self.xanalyzer.motorTiempoJugada
depth = self.xanalyzer.motorProfundidad
mrm = self.dbop.get_cache_engines(nombre, tiempo, jg.fenM2, depth)
ok = False
if mrm:
rm, pos = mrm.buscaRM(jg.movimiento())
if rm:
ok = True
if not ok:
mrm, pos = self.xanalyzer.analizaJugada(jg, self.xanalyzer.motorTiempoJugada, self.xanalyzer.motorProfundidad)
self.dbop.set_cache_engines(nombre, tiempo, jg.fenM2, mrm, depth)
jg.analisis = mrm, pos
self.pantalla.base.pgnRefresh()
def mensEspera(self, siFinal=False, siCancelar=False, masTitulo=None):
if siFinal:
if self.um:
self.um.final()
else:
if self.um is None:
self.um = QTUtil2.mensajeTemporal(self.pantalla, _("Analyzing"), 0, posicion="ad", siCancelar=True,
titCancelar=_("Cancel"))
if masTitulo:
self.um.rotulo( _("Analyzing") + " " + masTitulo )
self.um.me.activarCancelar(siCancelar)
def siCancelado(self):
si = self.um.cancelado()
if si:
self.um.final()
return si
def runcontrol(self):
puntosInicio, mateInicio = 0, 0
puntosFinal, mateFinal = 0, 0
numJugadas = self.partida.numJugadas()
if numJugadas == 0:
return False
self.um = None # controla unMomento
def aprobado():
mens = "<b><span style=\"color:green\">%s</span></b>" % _("Congratulations, goal achieved")
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.dbop.setconfig("ENG_ENGINE", self.numengine + 1)
self.mensajeEnPGN(mens)
self.siAprobado = True
def suspendido():
mens = "<b><span style=\"color:red\">%s</span></b>" % _("You must repeat the game")
self.li_info.append("")
self.li_info.append(mens)
self.muestraInformacion()
self.mensajeEnPGN(mens)
def calculaJG(jg, siinicio):
fen = jg.posicionBase.fen() if siinicio else jg.posicion.fen()
nombre = self.xjuez.nombre
tiempo = self.xjuez.motorTiempoJugada
mrm = self.dbop.get_cache_engines(nombre, tiempo, fen)
if mrm is None:
self.mensEspera()
mrm = self.xjuez.analiza(fen)
self.dbop.set_cache_engines(nombre, tiempo, fen, mrm)
rm = mrm.mejorMov()
if (" w " in fen) == self.siJugamosConBlancas:
return rm.puntos, rm.mate
else:
return -rm.puntos, -rm.mate
siCalcularInicio = True
if self.partida.siTerminada():
self.ponFinJuego()
jg = self.partida.jugada(-1)
if jg.siJaqueMate:
if jg.siBlancas() == self.siJugamosConBlancas:
aprobado()
else:
suspendido()
self.ponFinJuego()
return True
puntosFinal, mateFinal = 0, 0
else:
jg = self.partida.jugada(-1)
if jg.es_linea:
self.plies_pendientes = self.plies_control
else:
self.plies_pendientes -= 1
if self.plies_pendientes > 0:
return False
# Si la ultima jugada es de la linea no se calcula nada
self.mensEspera()
puntosFinal, mateFinal = calculaJG(jg, False)
# Se marcan todas las jugadas que no siguen las lineas
# Y se busca la ultima del color del jugador
if siCalcularInicio:
jg_inicial = None
for njg in range(numJugadas):
jg = self.partida.jugada(njg)
fenM2 = jg.posicionBase.fenM2()
if fenM2 in self.dicFENm2:
moves = self.dicFENm2[fenM2]
if jg.movimiento() not in moves:
jg.criticaDirecta = "?!"
if jg_inicial is None:
jg_inicial = jg
elif jg_inicial is None:
jg_inicial = jg
if jg_inicial:
puntosInicio, mateInicio = calculaJG(jg_inicial, True)
else:
puntosInicio, mateInicio = 0, 0
self.li_info.append("<b>%s:</b>" %_("Score"))
template = " <b>%s</b>: %d"
def appendInfo(label, puntos, mate):
mens = template % (label, puntos)
if mate:
mens += " %s %d" % (_("Mate"), mate)
self.li_info.append(mens)
appendInfo(_("Start"), puntosInicio, mateInicio)
appendInfo(_("End"), puntosFinal, mateFinal)
perdidos = (puntosInicio-puntosFinal)
ok = perdidos < self.lost_points
if mateInicio or mateFinal:
ok = mateFinal > mateInicio
mens = template % ("(%d)-(%d)" %(puntosInicio, puntosFinal), perdidos)
mens = "%s %s %d" %(mens, "<" if ok else ">", self.lost_points)
self.li_info.append(mens)
if not ok:
if self.auto_analysis:
self.run_auto_analysis()
self.mensEspera(siFinal=True)
suspendido()
else:
self.mensEspera(siFinal=True)
aprobado()
self.ponFinJuego()
return True
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave in (k_reiniciar, k_siguiente):
self.reiniciar()
elif clave == k_peliculaRepetir:
self.dbop.setconfig("ENG_ENGINE", self.numengine)
self.reiniciar()
elif clave == k_abandonar:
self.ponFinJuego()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
liMasOpciones = []
liMasOpciones.append(("libros", _("Consult a book"), Iconos.Libros()))
liMasOpciones.append((None, None, None))
liMasOpciones.append((None, _("Options"), Iconos.Opciones()))
mens = _("cancel") if self.auto_analysis else _("activate")
liMasOpciones.append(("auto_analysis", "%s: %s" % (_("Automatic analysis"), mens), Iconos.Analizar()))
liMasOpciones.append((None, None, None))
mens = _("cancel") if self.ask_movesdifferent else _("activate")
liMasOpciones.append(("ask_movesdifferent", "%s: %s" % (_("Ask when the moves are different from the line"), mens), Iconos.Pelicula_Seguir()))
liMasOpciones.append((None, None, True)) # Para salir del submenu
liMasOpciones.append((None, None, None))
liMasOpciones.append(("run_analysis", _("Specific analysis"), Iconos.Analizar()))
liMasOpciones.append((None, None, None))
liMasOpciones.append(("add_line", _("Add this line"), Iconos.OpeningLines()))
resp = self.utilidades(liMasOpciones)
if resp == "libros":
self.librosConsulta(False)
elif resp == "add_line":
numJugadas, nj, fila, siBlancas = self.jugadaActual()
partida = self.partida
if numJugadas != nj+1:
menu = QTVarios.LCMenu(self.pantalla)
menu.opcion("all", _("Add all moves"), Iconos.PuntoAzul())
menu.separador()
menu.opcion("parcial", _("Add until current move"), Iconos.PuntoVerde())
resp = menu.lanza()
if resp is None:
return
if resp == "parcial":
partida = self.partida.copia(nj)
self.dbop.append(partida)
self.dbop.updateTrainingEngines()
QTUtil2.mensaje(self.pantalla, _("Done"))
elif resp == "auto_analysis":
self.auto_analysis = not self.auto_analysis
self.trainingEngines["AUTO_ANALYSIS"] = self.auto_analysis
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "ask_movesdifferent":
self.ask_movesdifferent = not self.ask_movesdifferent
self.trainingEngines["ASK_MOVESDIFFERENT"] = self.ask_movesdifferent
self.dbop.setTrainingEngines(self.trainingEngines)
elif resp == "run_analysis":
self.um = None
self.mensEspera()
self.run_auto_analysis()
self.mensEspera(siFinal=True)
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
self.procesador.openings()
return False
def reiniciar(self):
self.reinicio(self.dbop)
def ponFinJuego(self):
self.estado = kFinJuego
self.desactivaTodas()
liOpciones = [k_mainmenu]
if self.siAprobado:
liOpciones.append(k_siguiente)
liOpciones.append(k_peliculaRepetir)
else:
liOpciones.append(k_reiniciar)
liOpciones.append(k_configurar)
liOpciones.append(k_utilidades)
self.pantalla.ponToolBar(liOpciones)
class GestorOpeningLines(Gestor.Gestor):
def inicio(self, pathFichero, modo, num_linea):
self.tablero.saveVisual()
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.tablero.dbVisual_setFichero(dbop.nomFichero)
self.reinicio(dbop, modo, num_linea)
def reinicio(self, dbop, modo, num_linea):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.modo = modo
self.num_linea = num_linea
self.training = self.dbop.training()
self.liGames = self.training["LIGAMES_%s" % modo.upper()]
self.game = self.liGames[num_linea]
self.liPV = self.game["LIPV"]
self.numPV = len(self.liPV)
self.calc_totalTiempo()
self.dicFENm2 = self.training["DICFENM2"]
li = self.dbop.getNumLinesPV(self.liPV)
if len(li) > 10:
mensLines = ",".join(["%d"%line for line in li[:10]]) + ", ..."
else:
mensLines = ",".join(["%d"%line for line in li])
self.liMensBasic = [
"%d/%d" % (self.num_linea+1, len(self.liGames)),
"%s: %s" % (_("Lines"), mensLines),
]
self.siAyuda = False
self.tablero.dbVisual_setShowAllways(False)
self.partida = Partida.Partida()
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_reiniciar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(self.partida.ultPosicion)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def calc_totalTiempo(self):
self.tm = 0
for game in self.liGames:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
self.tablero.dbVisual_setShowAllways(True)
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" %(_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.game["TRIES"]:
tgm += tr["TIME"]
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm)), _("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm)), _("Total"))
self.ponRotulo2(mens)
if self.siAyuda:
dicNAGs = TrListas.dicNAGs()
mens3 = ""
fenM2 = self.partida.ultPosicion.fenM2()
reg = self.dbop.getfenvalue(fenM2)
if reg:
mens3 = reg.get("COMENTARIO", "")
ventaja = reg.get("VENTAJA", 0)
valoracion = reg.get("VALORACION", 0)
if ventaja:
mens3 += "\n %s" % dicNAGs[ventaja]
if valoracion:
mens3 += "\n %s" % dicNAGs[valoracion]
self.ponRotulo3(mens3 if mens3 else None)
def partidaTerminada(self, siCompleta):
self.estado = kFinJuego
tm = time.time() - self.ini_time
li = [_("Line finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
if siCompleta:
mensaje = "\n".join(li)
self.mensajeEnPGN(mensaje)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores
}
self.game["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if siCompleta:
if sinError:
self.game["NOERROR"] += 1
noError = self.game["NOERROR"]
if self.modo == "sequential":
salto = 2**(noError + 1)
numGames = len(self.liGames)
for x in range(salto, numGames):
game = self.liGames[x]
if game["NOERROR"] != noError:
salto = x
break
liNuevo = self.liGames[1:salto]
liNuevo.append(self.game)
if numGames > salto:
liNuevo.extend(self.liGames[salto:])
self.training["LIGAMES_SEQUENTIAL"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente))
else:
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
else:
self.game["NOERROR"] -= 1
self.pantalla.ponToolBar((k_mainmenu, k_reiniciar, k_configurar, k_utilidades))
else:
if not sinError:
self.game["NOERROR"] -= 1
self.game["NOERROR"] = max(0, self.game["NOERROR"])
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.calc_totalTiempo()
self.muestraInformacion()
def muestraAyuda(self):
pv = self.liPV[len(self.partida)]
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
fenM2 = self.partida.ultPosicion.fenM2()
for pv1 in self.dicFENm2[fenM2]:
if pv1 != pv:
self.tablero.creaFlechaMov(pv1[:2], pv1[2:4], "ms40")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_reiniciar:
self.reiniciar()
elif clave == k_configurar:
self.configurar(siSonidos=True)
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop, self.modo, self.num_linea)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.tablero.restoreVisual()
self.procesador.inicio()
if self.modo == "static":
self.procesador.openingsTrainingStatic(self.pathFichero)
else:
self.procesador.openings()
return False
def reiniciar(self):
if len(self.partida) > 0 and self.estado != kFinJuego:
self.partidaTerminada(False)
self.reinicio(self.dbop, self.modo, self.num_linea)
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
siRival = siBlancas == self.siRivalConBlancas
numJugadas = len(self.partida)
if numJugadas >= self.numPV:
self.partidaTerminada(True)
return
pv = self.liPV[numJugadas]
if siRival:
self.desactivaTodas()
self.rmRival = XMotorRespuesta.RespuestaMotor("Apertura", self.siRivalConBlancas)
self.rmRival.desde = pv[:2]
self.rmRival.hasta = pv[2:4]
self.rmRival.coronacion = pv[4:]
self.mueveRival(self.rmRival)
self.siguienteJugada()
else:
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
pvObj = self.liPV[len(self.partida)]
if pvSel != pvObj:
fenM2 = jg.posicionBase.fenM2()
li = self.dicFENm2.get(fenM2, [])
if pvSel in li:
mens = _("You have selected a correct move, but this line uses another one.")
QTUtil2.mensajeTemporal(self.pantalla, mens, 2, posicion="tb", background="#C3D6E8")
self.sigueHumano()
return False
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(self.pantalla, mens, 1.2, posicion="ad", background="#FF9B00", pmImagen=Iconos.pmError())
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.siguienteJugada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
def mueveRival(self, respMotor):
desde = respMotor.desde
hasta = respMotor.hasta
coronacion = respMotor.coronacion
siBien, mens, jg = Jugada.dameJugada(self.partida.ultPosicion, desde, hasta, coronacion)
if siBien:
self.partida.ultPosicion = jg.posicion
self.masJugada(jg, False)
self.movimientosPiezas(jg.liMovs, True)
self.error = ""
return True
else:
self.error = mens
return False
class GestorOpeningLinesPositions(Gestor.Gestor):
def inicio(self, pathFichero):
self.pathFichero = pathFichero
dbop = OpeningLines.Opening(pathFichero)
self.reinicio(dbop)
def reinicio(self, dbop):
self.dbop = dbop
self.tipoJuego = kJugOpeningLines
self.training = self.dbop.training()
self.liTrainPositions = self.training["LITRAINPOSITIONS"]
self.trposition = self.liTrainPositions[0]
self.tm = 0
for game in self.liTrainPositions:
for tr in game["TRIES"]:
self.tm += tr["TIME"]
self.liMensBasic = [
"%s: %d" % (_("Moves"), len(self.liTrainPositions)),
]
self.siAyuda = False
self.siSaltoAutomatico = True
cp = ControlPosicion.ControlPosicion()
cp.leeFen(self.trposition["FENM2"] + " 0 1")
self.partida = Partida.Partida(iniPosicion=cp)
self.ayudas = 9999 # Para que analice sin problemas
self.siJugamosConBlancas = self.training["COLOR"] == "WHITE"
self.siRivalConBlancas = not self.siJugamosConBlancas
self.pantalla.ponToolBar((k_mainmenu, k_ayuda, k_configurar))
self.pantalla.activaJuego(True, False, siAyudas=False)
self.ponMensajero(self.mueveHumano)
self.ponPosicion(cp)
self.mostrarIndicador(True)
self.quitaAyudas()
self.ponPiezasAbajo(self.siJugamosConBlancas)
self.pgnRefresh(True)
self.ponCapInfoPorDefecto()
self.estado = kJugando
self.ponPosicionDGT()
self.quitaInformacion()
self.errores = 0
self.ini_time = time.time()
self.muestraInformacion()
self.siguienteJugada()
def ayuda(self):
self.siAyuda = True
self.pantalla.ponToolBar((k_mainmenu, k_configurar))
self.muestraAyuda()
self.muestraInformacion()
def muestraInformacion(self):
li = []
li.append("%s: %d" %(_("Errors"), self.errores))
if self.siAyuda:
li.append(_("Help activated"))
self.ponRotulo1("\n".join(li))
tgm = 0
for tr in self.trposition["TRIES"]:
tgm += tr["TIME"]
mas = time.time() - self.ini_time
mens = "\n" + "\n".join(self.liMensBasic)
mens += "\n%s:\n %s %s\n %s %s" % (_("Working time"),
time.strftime("%H:%M:%S", time.gmtime(tgm+mas)), _("Current"),
time.strftime("%H:%M:%S", time.gmtime(self.tm+mas)), _("Total"))
self.ponRotulo2(mens)
def posicionTerminada(self):
tm = time.time() - self.ini_time
siSalta = self.siSaltoAutomatico and self.errores == 0 and self.siAyuda == False
if not siSalta:
li = [_("Finished.")]
if self.siAyuda:
li.append(_("Help activated"))
if self.errores > 0:
li.append("%s: %d" % (_("Errors"), self.errores))
QTUtil2.mensajeTemporal(self.pantalla, "\n".join(li), 1.2)
dictry = {
"DATE": Util.hoy(),
"TIME": tm,
"AYUDA": self.siAyuda,
"ERRORS": self.errores
}
self.trposition["TRIES"].append(dictry)
sinError = self.errores == 0 and not self.siAyuda
if sinError:
self.trposition["NOERROR"] += 1
else:
self.trposition["NOERROR"] = max(0, self.trposition["NOERROR"]-1)
noError = self.trposition["NOERROR"]
salto = 2**(noError + 1) + 1
numPosics = len(self.liTrainPositions)
for x in range(salto, numPosics):
posic = self.liTrainPositions[x]
if posic["NOERROR"] != noError:
salto = x
break
liNuevo = self.liTrainPositions[1:salto]
liNuevo.append(self.trposition)
if numPosics > salto:
liNuevo.extend(self.liTrainPositions[salto:])
self.training["LITRAINPOSITIONS"] = liNuevo
self.pantalla.ponToolBar((k_mainmenu, k_siguiente, k_configurar))
self.dbop.setTraining(self.training)
self.estado = kFinJuego
self.muestraInformacion()
if siSalta:
self.reinicio(self.dbop)
def muestraAyuda(self):
liMoves = self.trposition["MOVES"]
for pv in liMoves:
self.tablero.creaFlechaMov(pv[:2], pv[2:4], "mt80")
def procesarAccion(self, clave):
if clave == k_mainmenu:
self.finPartida()
elif clave == k_configurar:
base = _("What to do after solving")
if self.siSaltoAutomatico:
liMasOpciones = [("lmo_stop", "%s: %s" % (base, _("Stop")), Iconos.PuntoRojo())]
else:
liMasOpciones = [("lmo_jump", "%s: %s" % (base, _("Jump to the next")), Iconos.PuntoVerde())]
resp = self.configurar(siSonidos=True, siCambioTutor=False, liMasOpciones=liMasOpciones)
if resp in ("lmo_stop", "lmo_jump"):
self.siSaltoAutomatico = resp == "lmo_jump"
elif clave == k_utilidades:
self.utilidades()
elif clave == k_siguiente:
self.reinicio(self.dbop)
elif clave == k_ayuda:
self.ayuda()
else:
Gestor.Gestor.rutinaAccionDef(self, clave)
def finalX(self):
return self.finPartida()
def finPartida(self):
self.dbop.close()
self.procesador.inicio()
self.procesador.openings()
return False
def siguienteJugada(self):
self.muestraInformacion()
if self.estado == kFinJuego:
return
self.estado = kJugando
self.siJuegaHumano = False
self.ponVista()
siBlancas = self.partida.ultPosicion.siBlancas
self.ponIndicador(siBlancas)
self.refresh()
self.activaColor(siBlancas)
self.siJuegaHumano = True
if self.siAyuda:
self.muestraAyuda()
def mueveHumano(self, desde, hasta, coronacion=""):
jg = self.checkMueveHumano(desde, hasta, coronacion)
if not jg:
return False
pvSel = desde + hasta + coronacion
lipvObj = self.trposition["MOVES"]
if pvSel not in lipvObj:
self.errores += 1
mens = "%s: %d" % (_("Error"), self.errores)
QTUtil2.mensajeTemporal(self.pantalla, mens, 2, posicion="ad", background="#FF9B00")
self.muestraInformacion()
self.sigueHumano()
return False
self.movimientosPiezas(jg.liMovs)
self.masJugada(jg, True)
self.posicionTerminada()
return True
def masJugada(self, jg, siNuestra):
self.partida.append_jg(jg)
if self.partida.pendienteApertura:
self.partida.asignaApertura()
self.ponFlechaSC(jg.desde, jg.hasta)
self.beepExtendido(siNuestra)
self.pgnRefresh(self.partida.ultPosicion.siBlancas)
self.refresh()
self.ponPosicionDGT()
|
gpl-2.0
| 7,077,392,566,974,328,000
| 33.737855
| 154
| 0.555107
| false
| 3.215595
| true
| false
| false
|
Null01/detect-polygons-from-image
|
src/plot_edge_filter.py
|
1
|
2379
|
"""
==============
Edge operators
==============
Edge operators are used in image processing within edge detection algorithms.
They are discrete differentiation operators, computing an approximation of the
gradient of the image intensity function.
"""
import numpy as np
import matplotlib.pyplot as plt
from skimage.data import camera
from skimage.filters import roberts, sobel, scharr, prewitt
from scipy import misc
from skimage import color
from skimage import measure
from skimage.measure import find_contours, approximate_polygon
#image = camera()
file_tile = "tile_colorizer_05.png"
fimg = misc.imread("../web/img/"+file_tile)
image = color.colorconv.rgb2grey(fimg)
contours = measure.find_contours(image, 0.75)
coords = approximate_polygon(contours[0], tolerance=0.02)
edge_roberts = roberts(image)
print edge_roberts
edge_sobel = sobel(image)
fig, ax = plt.subplots(ncols=2, sharex=True, sharey=True,
figsize=(8, 4))
ax[0].imshow(edge_roberts, cmap=plt.cm.gray)
ax[0].set_title('Roberts Edge Detection')
ax[1].imshow(edge_sobel, cmap=plt.cm.gray)
ax[1].set_title('Sobel Edge Detection')
for a in ax:
a.axis('off')
plt.tight_layout()
plt.show()
######################################################################
# Different operators compute different finite-difference approximations of
# the gradient. For example, the Scharr filter results in a less rotational
# variance than the Sobel filter that is in turn better than the Prewitt
# filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters
# and the Scharr filter is illustrated below with an image that is the
# discretization of a rotation- invariant continuous function. The
# discrepancy between the Prewitt and Sobel filters, and the Scharr filter is
# stronger for regions of the image where the direction of the gradient is
# close to diagonal, and for regions with high spatial frequencies. For the
# example image the differences between the filter results are very small and
# the filter results are visually almost indistinguishable.
#
# .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators
#
# .. [2] B. Jaehne, H. Scharr, and S. Koerkel. Principles of filter design.
# In Handbook of Computer Vision and Applications. Academic Press,
# 1999.
#
# .. [3] https://en.wikipedia.org/wiki/Prewitt_operator
|
gpl-3.0
| 8,243,494,613,554,497,000
| 31.589041
| 78
| 0.720891
| false
| 3.45283
| false
| false
| false
|
seecr/weightless-core
|
weightless/core/__init__.py
|
1
|
3794
|
## begin license ##
#
# "Weightless" is a High Performance Asynchronous Networking Library. See http://weightless.io
#
# Copyright (C) 2006-2011 Seek You Too (CQ2) http://www.cq2.nl
# Copyright (C) 2011-2012, 2015, 2020-2021 Seecr (Seek You Too B.V.) https://seecr.nl
#
# This file is part of "Weightless"
#
# "Weightless" is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# "Weightless" is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with "Weightless"; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
#
## end license ##
VERSION='$Version: x.y.z$'[9:-1].strip() # Modified by package scripts
from functools import wraps
from types import GeneratorType, FunctionType
from os.path import dirname, abspath, isdir, join #DO_NOT_DISTRIBUTE
from sys import version_info #DO_NOT_DISTRIBUTE
pycmd = "python%s.%s" % version_info[:2] #DO_NOT_DISTRIBUTE
_mydir = abspath(dirname(__file__)) #DO_NOT_DISTRIBUTE
_projectdir = dirname(dirname(_mydir)) #DO_NOT_DISTRIBUTE
if isdir(join(_mydir, '.svn')) or isdir(join(_projectdir, '.git')): #DO_NOT_DISTRIBUTE
from os import system #DO_NOT_DISTRIBUTE
status = system( #DO_NOT_DISTRIBUTE
"cd %s/../..; %s setup.py build_ext --inplace" #DO_NOT_DISTRIBUTE
% (abspath(dirname(__file__)), pycmd)) #DO_NOT_DISTRIBUTE
if status > 0: #DO_NOT_DISTRIBUTE
import sys #DO_NOT_DISTRIBUTE
sys.exit(status) #DO_NOT_DISTRIBUTE
import platform
if hasattr(platform, 'python_implementation'):
cpython = platform.python_implementation() == "CPython"
elif hasattr(platform, 'system'):
cpython = platform.system() != "Java"
else:
cpython = False
try:
from os import getenv
if getenv('WEIGHTLESS_COMPOSE_TEST') == 'PYTHON':
raise ImportError('Python compose for testing purposes')
from .ext import compose as _compose, local, tostring, Yield, is_generator, DeclineMessage
cextension = True
ComposeType = _compose
except ImportError as e:
from warnings import warn
warn("Using Python version of compose(), local() and tostring()", stacklevel=2)
def is_generator(o):
return type(o) is GeneratorType
class DeclineMessage(Exception):
pass
from ._compose_py import compose as _compose, Yield
from ._local_py import local
from ._tostring_py import tostring
cextension = False
ComposeType = GeneratorType
def compose(X, *args, **kwargs):
if type(X) == FunctionType: # compose used as decorator
@wraps(X)
def helper(*args, **kwargs):
return _compose(X(*args, **kwargs))
return helper
elif is_generator(X):
return _compose(X, *args, **kwargs)
raise TypeError("compose() expects generator, got %s" % repr(X))
#from compose import compose, local, tostring, Yield
from .utils import identify, autostart, retval, consume, asList, asString, asBytes, return_
from ._observable import Observable, Transparent, be, methodOrMethodPartialStr, NoneOfTheObserversRespond
|
gpl-2.0
| -8,486,436,413,945,769,000
| 43.116279
| 105
| 0.644439
| false
| 3.887295
| false
| false
| false
|
networkjanitor/faeriawikibot
|
gamepedia_rw_pages.py
|
1
|
13081
|
import configparser
import os
import sys
import gamepedia_client
class GamepediaPagesRW:
gc = None
'''
Create new instance of GamepediaClient (required for name attribution)
'''
def create_gamepedia_client(self, username=None, password=None):
global cfg_file
if username is None:
username = cfg_file['account']['username']
if password is None:
password = cfg_file['account']['password']
self.gc = gamepedia_client.GamepediaClient(username=username, password=password)
'''
Download and save page.
'''
def download(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
res = self.gc.read(page)
with open(path, 'w') as f:
f.write(res)
'''
Write text from local file to page
'''
def upload(self, path, page):
if self.gc is None:
self.create_gamepedia_client()
with open(path, 'r') as f:
res = f.read()
self.gc.write(page, res)
'''
Backup selection of pages
'''
def backup(self):
self.backup_galleries_cards()
'''
Archivate selection of pages
'''
def archivate(self):
self.download('setup/Template/Card_stats', 'Template:Card_stats')
self.download('setup/Template/Cardlist', 'Template:Cardlist')
self.download('setup/Template/Card_nav', 'Template:Card_nav')
self.download('setup/Template/Codexcontentlist', 'Template:Codexcontentlist')
self.download('setup/Lore/The_world', 'The_world')
self.download('setup/Lore/Factions', 'Factions')
self.download('setup/Lore/The_player,_the_orbs,_the_memoria', 'The_player,_the_orbs,_the_memoria')
self.download('setup/Lore/The_Faëria', 'The_Faëria')
self.download('setup/Template/Lake', 'Template:Lake')
self.download('setup/Template/Mountain', 'Template:Mountain')
self.download('setup/Template/Forest', 'Template:Forest')
self.download('setup/Template/Desert', 'Template:Desert')
self.download('setup/Template/Dpl_lake', 'Template:dpl_lake')
self.download('setup/Template/Dpl_mountain', 'Template:dpl_mountain')
self.download('setup/Template/Dpl_forest', 'Template:dpl_forest')
self.download('setup/Template/Dpl_desert', 'Template:dpl_desert')
self.download('setup/Template/Dpl_life', 'Template:Lif')
self.download('setup/Template/Dpl_power', 'Template:Pow')
self.download('setup/Template/Dpl_name', 'Template:dpl_name')
self.download('setup/Template/Dpl_display', 'Template:dpl_display')
self.download('setup/Template/Rarity', 'Template:Rarity')
self.download('setup/Template/Common', 'Template:Common')
self.download('setup/Template/Rare', 'Template:Rare')
self.download('setup/Template/Epic', 'Template:Epic')
self.download('setup/Template/Legendary', 'Template:Legendary')
self.download('setup/List/List_of_Cards', 'List_of_Cards')
self.download('setup/List/List_of_Blue_cards', 'List_of_Blue_cards')
self.download('setup/List/List_of_Green_cards', 'List_of_Green_cards')
self.download('setup/List/List_of_Red_cards', 'List_of_Red_cards')
self.download('setup/List/List_of_Yellow_cards', 'List_of_Yellow_cards')
self.download('setup/List/List_of_Human_cards', 'List_of_Human_cards')
self.download('setup/List/List_of_Common_cards', 'List_of_Common_cards')
self.download('setup/List/List_of_Rare_cards', 'List_of_Rare_cards')
self.download('setup/List/List_of_Epic_cards', 'List_of_Epic_cards')
self.download('setup/List/List_of_Legendary_cards', 'List_of_Legendary_cards')
self.download('setup/List/List_of_Creature_cards', 'List_of_Creature_cards')
self.download('setup/List/List_of_Structure_cards', 'List_of_Structure_cards')
self.download('setup/List/List_of_Event_cards', 'List_of_Event_Cards')
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/Cards/By Color/Human', 'Human')
self.download('setup/Cards/By Color/Blue', 'Blue')
self.download('setup/Cards/By Color/Green', 'Green')
self.download('setup/Cards/By Color/Red', 'Red')
self.download('setup/Cards/By Color/Yellow', 'Yellow')
self.download('setup/Cards/By Type/Creature', 'Creature')
self.download('setup/Cards/By Type/Event', 'Event')
self.download('setup/Cards/By Type/Structure', 'Structure')
self.download('setup/Cards/By Rarity/Common', 'Common')
self.download('setup/Cards/By Rarity/Rare', 'Rare')
self.download('setup/Cards/By Rarity/Epic', 'Epic')
self.download('setup/Cards/By Rarity/Legendary', 'Legendary')
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
'''
Restore selection of default pages
'''
def restore(self):
self.restore_cards_by()
self.restore_galleries_cards()
'''
Restore Cards By-X
'''
def restore_cards_by(self):
self.upload('setup/Cards/By Color/Human', 'Human')
self.upload('setup/Cards/By Color/Blue', 'Blue')
self.upload('setup/Cards/By Color/Green', 'Green')
self.upload('setup/Cards/By Color/Red', 'Red')
self.upload('setup/Cards/By Color/Yellow', 'Yellow')
self.upload('setup/Cards/By Type/Creature', 'Creature')
self.upload('setup/Cards/By Type/Event', 'Event')
self.upload('setup/Cards/By Type/Structure', 'Structure')
self.upload('setup/Cards/By Rarity/Common', 'Common')
self.upload('setup/Cards/By Rarity/Rare', 'Rare')
self.upload('setup/Cards/By Rarity/Epic', 'Epic')
self.upload('setup/Cards/By Rarity/Legendary', 'Legendary')
'''
Restore Changelog Templates
'''
def restore_templates_changelog(self):
self.upload('setup/Template/Changelog/Cl_codexcode1', 'Template:Cl_codexcode1')
self.upload('setup/Template/Changelog/Cl_codexcode2', 'Template:Cl_codexcode2')
self.upload('setup/Template/Changelog/Cl_codexcode3', 'Template:Cl_codexcode3')
self.upload('setup/Template/Changelog/Cl_color', 'Template:Cl_color')
self.upload('setup/Template/Changelog/Cl_desc', 'Template:Cl_desc')
self.upload('setup/Template/Changelog/Cl_desert', 'Template:Cl_desert')
self.upload('setup/Template/Changelog/Cl_faeria', 'Template:Cl_faeria')
self.upload('setup/Template/Changelog/Cl_forest', 'Template:Cl_forest')
self.upload('setup/Template/Changelog/Cl_lake', 'Template:Cl_lake')
self.upload('setup/Template/Changelog/Cl_life', 'Template:Cl_life')
self.upload('setup/Template/Changelog/Cl_mountain', 'Template:Cl_mountain')
self.upload('setup/Template/Changelog/Cl_name', 'Template:Cl_name')
self.upload('setup/Template/Changelog/Cl_power', 'Template:Cl_power')
self.upload('setup/Template/Changelog/Cl_rarity', 'Template:Cl_rarity')
self.upload('setup/Template/Changelog/Cl_type', 'Template:Cl_type')
self.upload('setup/Template/Changelog/Cl_unknown', 'Template:Cl_unknown')
self.upload('setup/Template/Changelog/Cl_info', 'Template:Cl_info')
'''
Restore Card Galleries
'''
def restore_galleries_cards(self):
self.upload('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.upload('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.upload('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.upload('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.upload('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.upload('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.upload('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.upload('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.upload('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.upload('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.upload('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.upload('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
'''
Restore Lists of (effect) cards
'''
def restore_lists_effects(self):
self.download('setup/List/List_of_Charge_X_cards', 'List_of_Charge_X_cards')
self.download('setup/List/List_of_Faeria_X_cards', 'List_of_Faeria_X_cards')
self.download('setup/List/List_of_Options_cards', 'List_of_Options_cards')
self.download('setup/List/List_of_Ranged_cards', 'List_of_Ranged_cards')
self.download('setup/List/List_of_Production_cards', 'List_of_Production_cards')
self.download('setup/List/List_of_Combat_cards', 'List_of_Combat_cards')
self.download('setup/List/List_of_Protection_cards', 'List_of_Protection_cards')
self.download('setup/List/List_of_Taunt_cards', 'List_of_Taund_cards')
self.download('setup/List/List_of_Haste_cards', 'List_of_Haste_cards')
self.download('setup/List/List_of_Last_Words_cards', 'List_of_Last_Words_cards')
self.download('setup/List/List_of_Deathtouch_cards', 'List_of_Deathtouch_cards')
self.download('setup/List/List_of_Flying_cards', 'List_of_Flying_cards')
self.download('setup/List/List_of_Jump_cards', 'List_of_Jump_cards')
self.download('setup/List/List_of_Aquatic_cards', 'List_of_Aquatic_cards')
self.download('setup/List/List_of_Activate_cards', 'List_of_Activate_cards')
self.download('setup/List/List_of_Gift_cards', 'List_of_Gift_cards')
self.download('setup/List/List_of_Random_cards', 'List_of_Random_cards')
'''
Restore Card Galleries
'''
'''
Backup Card Galleries
'''
def backup_galleries_cards(self):
self.download('setup/Gallery/Gallery_of_Blue_cards', 'Gallery_of_Blue_cards')
self.download('setup/Gallery/Gallery_of_Green_cards', 'Gallery_of_Green_cards')
self.download('setup/Gallery/Gallery_of_Human_cards', 'Gallery_of_Human_cards')
self.download('setup/Gallery/Gallery_of_Red_cards', 'Gallery_of_Red_cards')
self.download('setup/Gallery/Gallery_of_Yellow_cards', 'Gallery_of_Yellow_cards')
self.download('setup/Gallery/Gallery_of_Creature_cards', 'Gallery_of_Creature_cards')
self.download('setup/Gallery/Gallery_of_Structure_cards', 'Gallery_of_Structure_cards')
self.download('setup/Gallery/Gallery_of_Event_cards', 'Gallery_of_Event_cards')
self.download('setup/Gallery/Gallery_of_Common_cards', 'Gallery_of_Common_cards')
self.download('setup/Gallery/Gallery_of_Rare_cards', 'Gallery_of_Rare_cards')
self.download('setup/Gallery/Gallery_of_Epic_cards', 'Gallery_of_Epic_cards')
self.download('setup/Gallery/Gallery_of_Legendary_cards', 'Gallery_of_Legendary_cards')
if __name__ == '__main__':
gr = GamepediaPagesRW()
global cfg_file
cfg_file = configparser.ConfigParser()
path_to_cfg = os.path.abspath(os.path.dirname(sys.argv[0]))
path_to_cfg = os.path.join(path_to_cfg, 'faeriawikibot.conf')
cfg_file.read(path_to_cfg)
gr.restore()
|
mit
| -1,266,205,476,075,803,100
| 50.290196
| 106
| 0.666641
| false
| 3.1776
| false
| false
| false
|
Southpaw-TACTIC/Team
|
src/python/Lib/site-packages/pythonwin/pywin/idle/AutoExpand.py
|
1
|
2763
|
import string
import re
###$ event <<expand-word>>
###$ win <Alt-slash>
###$ unix <Alt-slash>
class AutoExpand:
keydefs = {
'<<expand-word>>': ['<Alt-slash>'],
}
unix_keydefs = {
'<<expand-word>>': ['<Meta-slash>'],
}
menudefs = [
('edit', [
('E_xpand word', '<<expand-word>>'),
]),
]
wordchars = string.letters + string.digits + "_"
def __init__(self, editwin):
self.text = editwin.text
self.text.wordlist = None # XXX what is this?
self.state = None
def expand_word_event(self, event):
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
if not self.state:
words = self.getwords()
index = 0
else:
words, index, insert, line = self.state
if insert != curinsert or line != curline:
words = self.getwords()
index = 0
if not words:
self.text.bell()
return "break"
word = self.getprevword()
self.text.delete("insert - %d chars" % len(word), "insert")
newword = words[index]
index = (index + 1) % len(words)
if index == 0:
self.text.bell() # Warn we cycled around
self.text.insert("insert", newword)
curinsert = self.text.index("insert")
curline = self.text.get("insert linestart", "insert lineend")
self.state = words, index, curinsert, curline
return "break"
def getwords(self):
word = self.getprevword()
if not word:
return []
before = self.text.get("1.0", "insert wordstart")
wbefore = re.findall(r"\b" + word + r"\w+\b", before)
del before
after = self.text.get("insert wordend", "end")
wafter = re.findall(r"\b" + word + r"\w+\b", after)
del after
if not wbefore and not wafter:
return []
words = []
dict = {}
# search backwards through words before
wbefore.reverse()
for w in wbefore:
if dict.get(w):
continue
words.append(w)
dict[w] = w
# search onwards through words after
for w in wafter:
if dict.get(w):
continue
words.append(w)
dict[w] = w
words.append(word)
return words
def getprevword(self):
line = self.text.get("insert linestart", "insert")
i = len(line)
while i > 0 and line[i-1] in self.wordchars:
i = i-1
return line[i:]
|
epl-1.0
| 187,856,678,564,993,900
| 28.032609
| 69
| 0.488961
| false
| 3.919149
| false
| false
| false
|
effigies/mne-python
|
examples/time_frequency/plot_source_power_spectrum.py
|
2
|
1929
|
"""
=========================================================
Compute power spectrum densities of the sources with dSPM
=========================================================
Returns an STC file containing the PSD (in dB) of each of the sources.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
print(__doc__)
import mne
from mne import io
from mne.datasets import sample
from mne.minimum_norm import read_inverse_operator, compute_source_psd
###############################################################################
# Set parameters
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
fname_label = data_path + '/MEG/sample/labels/Aud-lh.label'
# Setup for reading the raw data
raw = io.Raw(raw_fname, verbose=False)
events = mne.find_events(raw, stim_channel='STI 014')
inverse_operator = read_inverse_operator(fname_inv)
raw.info['bads'] = ['MEG 2443', 'EEG 053']
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg=True, eeg=False, eog=True,
stim=False, exclude='bads')
tmin, tmax = 0, 120 # use the first 120s of data
fmin, fmax = 4, 100 # look at frequencies between 4 and 100Hz
n_fft = 2048 # the FFT size (n_fft). Ideally a power of 2
label = mne.read_label(fname_label)
stc = compute_source_psd(raw, inverse_operator, lambda2=1. / 9., method="dSPM",
tmin=tmin, tmax=tmax, fmin=fmin, fmax=fmax,
pick_ori="normal", n_fft=n_fft, label=label)
stc.save('psd_dSPM')
###############################################################################
# View PSD of sources in label
import matplotlib.pyplot as plt
plt.plot(1e3 * stc.times, stc.data.T)
plt.xlabel('Frequency (Hz)')
plt.ylabel('PSD (dB)')
plt.title('Source Power Spectrum (PSD)')
plt.show()
|
bsd-3-clause
| -6,288,450,017,476,853,000
| 34.072727
| 79
| 0.589943
| false
| 3.269492
| false
| false
| false
|
acysos/odoo-addons
|
edicom/models/edicom_albaran.py
|
1
|
4074
|
# -*- coding: utf-8 -*-
# Copyright 2020 Ignacio Ibeas <ignacio@acysos.com>
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import _, api, fields, models
from odoo.exceptions import UserError, ValidationError
import time
import logging
_logger = logging.getLogger(__name__)
class EdicomAlbaran(models.Model):
_name = "edicom.albaran"
_description = "Albaran Edicom"
picking_id = fields.Many2one(
comodel_name='stock.picking', string='Albaran', required=True)
cabalb_ids = fields.One2many(
comodel_name='edicom.cabalb', inverse_name='albaran_edicom_id',
string='Datos de CABALB')
linalb_ids = fields.One2many(
comodel_name='edicom.linalb', inverse_name='albaran_edicom_id',
string='Datos de LINALB')
embalb_ids = fields.One2many(
comodel_name='edicom.embalb', inverse_name='albaran_edicom_id',
string='Datos de EMBALB')
_rec_name = 'picking_id'
@api.multi
def procesar_albaran(self):
cabalb_pool = self.env['edicom.cabalb']
linalb_pool = self.env['edicom.linalb']
embalb_pool = self.env['edicom.embalb']
for albaran_edi in self:
_logger.info('Albaran EDI ' + str(albaran_edi.picking_id.name))
albaran = albaran_edi.picking_id
if not albaran:
raise UserError(
_('No se ha indicado la albaran para generar el fichero.'))
if not (albaran.company_id and albaran.company_id.partner_id and
albaran.company_id.partner_id.codigo_edi):
raise UserError(
_('No se ha indicado el codigo edi en la compañía del '
'albaran.'))
if not (albaran.partner_id and albaran.partner_id.codigo_edi):
raise UserError(
_('No se ha indicado el codigo edi en el cliente.'))
# GENERO LA CABECERA - primero la borro si existe
cabalb_ids = cabalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
cabalb_ids.unlink()
cabalb_ids = cabalb_pool.generar(albaran_edi)
# GENERO EMBALAJES - primero la borro si existe
embalb_ids = embalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
embalb_ids.unlink()
embalb_ids = embalb_pool.generar(albaran_edi)
# GENERO LINEAS - primero la borro si existe
linalb_ids = linalb_pool.search(
[('albaran_edicom_id', '=', albaran_edi.id)])
linalb_ids.unlink()
linalb_ids = linalb_pool.generar(albaran_edi)
return True
@api.multi
def generar_ficheros(self):
cabalb_pool = self.env['edicom.cabalb']
linalb_pool = self.env['edicom.linalb']
embalb_pool = self.env['edicom.embalb']
for albaran_edi in self:
if (albaran_edi.picking_id and albaran_edi.picking_id.company_id
and albaran_edi.picking_id.company_id.edi_path):
path = albaran_edi.picking_id.company_id.edi_path
else:
raise UserError(
_('No se ha indicado la ruta para generar el fichero en '
'la compañía de la albaran.'))
out_char_sep = albaran_edi.picking_id.company_id.out_char_separator
file_suffix = albaran_edi.picking_id.name.replace('/', '')
if albaran_edi.cabalb_ids:
cabalb_pool.exportar(
albaran_edi.cabalb_ids, path, file_suffix, out_char_sep)
if albaran_edi.linalb_ids:
linalb_pool.exportar(
albaran_edi.linalb_ids, path, file_suffix, out_char_sep)
if albaran_edi.embalb_ids:
embalb_pool.exportar(
albaran_edi.embalb_ids, path, file_suffix, out_char_sep)
alert_file = open(path + '/albaranespendientes.txt', 'w')
alert_file.close()
return True
|
agpl-3.0
| -6,508,898,318,081,650,000
| 36.685185
| 79
| 0.578133
| false
| 3.209779
| false
| false
| false
|
raymak/contextualfeaturerecommender
|
phase1/analysis/user_to_aggregates.py
|
1
|
8181
|
#!/usr/bin/python
# input: csv-formatted stream, with each line corresponding to the data for a user
# output:
# assumes the input messages from a specific user are contiguous
import fileinput
import json
rev_inds = {}
FEATURE_NAMES = [
'closetabshortcut',
'newbookmark',
'newtabshortcut',
'newbookmarkshortcut',
'blushypage', 'facebook',
'amazon',
'youtube',
'download',
'gmail',
'reddit']
FEATURE_SUFFIXES = [
'_recommended',
'_recommended_seen',
'_secondary_used_after',
'_secondary_used_only_after',
'_secondary_used_after_to_seen',
'_secondary_used_after_seen',
'_secondary_used',
'_secondary_used_after_seen_to_seen',
'_secondary_used_before',
'_minor_used_after',
'_reaction_used',
'_reaction_used_after_seen',
'_reaction_used_after_seen_to_seen',
'_addon_ignored']
FEATURE_OFFERING_TYPES = {
'closetabshortcut': 'KEYSHORTCUT',
'newbookmark': 'ADDON',
'newtabshortcut': 'KEYSHORTCUT',
'newbookmarkshortcut': 'KEYSHORTCUT',
'blushypage': 'PRIVATEWINDOW',
'facebook': 'PINTAB',
'amazon': 'ADDON',
'youtube': 'ADDON',
'download': 'ADDON',
'gmail': 'ADDON',
'reddit': 'ADDON'
}
ARMS_ROWS_KEYS_ARR = [
'name',
'user_num',
'has_disabled',
'has_moved_button',
'median_num_of_extensions',
'median_total_recommendations'
] + [featureName + suffix for featureName in FEATURE_NAMES
for suffix in FEATURE_SUFFIXES]
ARMS_FEATURES_KEYS_ARR = [
'ARM_arm_name',
'ARM_basis',
'ARM_explanation',
'ARM_ui',
'ARM_user_num',
'ARM_has_disabled',
'ARM_has_moved_button',
'ARM_median_num_of_extensions',
'ARM_median_total_recommendations',
'FEATURE_feature_name',
'FEATURE_offering_type'
] + ['FEATURE' + suffix for suffix in FEATURE_SUFFIXES]
ARM_NAMES = ['explained-doorhanger-active',
'explained-doorhanger-passive',
'unexplained-doorhanger-active',
'unexplained-doorhanger-passive',
'control']
def main(headerLine, userLines):
table = parseCSVtoTable(headerLine, userLines)
table = basicFilter(table)
printTableToCSV(generateArmFeatureReport(table), ARMS_FEATURES_KEYS_ARR)
def basicFilter(table):
selected_indices = [i for i in range(len(table['userid']))
if table['experiment_ver'][i] == '2.0.0'
and table['num_of_extensions'][i] is not None
and not table['test_mode_enabled'][i]
and not table['browsertabsremote_enabled'][i]
]
new_table = {key: [table[key][i] for i in selected_indices] for key in table }
return new_table
def getTableByColumnValue(table, column_name, column_value):
selected_indices = [i for i in range(len(table[column_name])) if table[column_name][i] == column_value]
new_table = {key: [table[key][i] for i in selected_indices] for key in table}
return new_table
def appendRecordDictToTable(table, recordDict):
for col_name in table:
table[col_name].append(recordDict[col_name])
# mutates the given table
def generateArmFeatureReport(table):
armsFeaturesTable = {armsFeaturesKey: [] for armsFeaturesKey in ARMS_FEATURES_KEYS_ARR}
armsTables = {arm: {} for arm in ARM_NAMES}
for arm in armsTables:
armsTables[arm] = getTableByColumnValue(table, 'arm_name', arm)
recordDict = {}
for arm in ARM_NAMES:
userNum = len(armsTables[arm]['userid'])
recordDict['ARM_user_num'] = userNum
recordDict['ARM_arm_name'] = arm
recordDict['ARM_basis'] = armsTables[arm]['arm_basis'][0]
recordDict['ARM_explanation'] = armsTables[arm]['arm_explanation'][0]
recordDict['ARM_ui'] = armsTables[arm]['arm_ui'][0]
recordDict['ARM_has_disabled'] = armsTables[arm]['has_disabled'].count(True)
recordDict['ARM_has_moved_button'] = armsTables[arm]['has_moved_button'].count(True)
recordDict['ARM_median_num_of_extensions'] = sorted(armsTables[arm]['num_of_extensions'])[userNum // 2]
recordDict['ARM_median_total_recommendations'] = sorted(armsTables[arm]['total_recommendations'])[userNum //2]
for featureName in FEATURE_NAMES:
recordDict['FEATURE_feature_name'] = featureName
recordDict['FEATURE_offering_type'] = FEATURE_OFFERING_TYPES[featureName]
for featureSuffix in [
'_recommended',
'_recommended_seen',
'_secondary_used',
'_secondary_used_after',
'_secondary_used_before',
'_minor_used_after',
'_reaction_used',
'_addon_ignored']:
col_name = featureName + featureSuffix
recordDict['FEATURE' + featureSuffix] = armsTables[arm][col_name].count(True)
secondaryUsedAfter = recordDict['FEATURE' + '_secondary_used_after']
recommendedSeen = recordDict['FEATURE' + '_recommended_seen']
# 0 could mean real 0 or 0/0
recordDict['FEATURE' + '_secondary_used_after_to_seen'] = 0 if recommendedSeen == 0 else (100* secondaryUsedAfter) / recommendedSeen
recordDict['FEATURE' + '_secondary_used_only_after'] = [
armsTables[arm][featureName + '_secondary_used_after'][i]
and not armsTables[arm][featureName + '_secondary_used_before'][i]
for i in range(userNum)
].count(True)
recordDict['FEATURE' + '_secondary_used_after_seen'] = [
armsTables[arm][featureName + '_secondary_used_after'][i]
and armsTables[arm][featureName + '_recommended_seen'][i]
for i in range(userNum)
].count(True)
secondaryUsedAfterSeen = recordDict['FEATURE' + '_secondary_used_after_seen']
recordDict['FEATURE' + '_secondary_used_after_seen_to_seen'] = 0 if recommendedSeen == 0 else (100 * secondaryUsedAfterSeen) / recommendedSeen
recordDict['FEATURE' + '_reaction_used_after_seen'] = [
armsTables[arm][featureName + '_reaction_used'][i]
and armsTables[arm][featureName + '_recommended_seen'][i]
for i in range(userNum)
].count(True)
reactionUsedAfterSeen = recordDict['FEATURE' + '_reaction_used_after_seen']
recordDict['FEATURE' + '_reaction_used_after_seen_to_seen'] = 0 if recommendedSeen == 0 else (100 * reactionUsedAfterSeen) / recommendedSeen
appendRecordDictToTable(armsFeaturesTable, recordDict)
return armsFeaturesTable
def printTableToCSV(table, columnNamesArr):
printCSVTableHeader(columnNamesArr)
rowNum = len(table[columnNamesArr[0]])
for i in range(rowNum):
printTableRow(table, i, columnNamesArr)
def printTableRow(table, rowNum, columnNamesArr):
elms = [json.dumps(table[colName][rowNum])
for colName in columnNamesArr]
rowStr = '\t'.join(elms)
print rowStr
def printCSVTableHeader(keysArr):
print '\t'.join(keysArr)
def parseCSVtoTable(headerLine, rows):
table = {}
fields = headerLine.strip().split('\t')
for i in range(len(fields)):
table[fields[i]] = []
rev_inds[i] = fields[i]
for line in rows:
jsonrow = [json.loads(val) for val in line.strip().split('\t')]
for i in range(len(jsonrow)):
table[rev_inds[i]].append(jsonrow[i])
return table
if __name__ == "__main__":
lines = fileinput.input()
main(lines.next(), lines)
|
mpl-2.0
| 6,887,455,980,894,750,000
| 32.666667
| 154
| 0.577802
| false
| 3.828264
| false
| false
| false
|
lizbew/code-practice
|
03-weibo/base62.py
|
1
|
1460
|
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
def rsplit(s, count):
f = lambda x: x > 0 and x or 0
return [s[f(i - count):i] for i in range(len(s), 0, -count)]
def id2mid(id):
result = ''
for i in rsplit(id, 7):
str62 = base62_encode(int(i))
result = str62.zfill(4) + result
return result.lstrip('0')
def mid2id(mid):
result = ''
for i in rsplit(mid, 4):
str10 = str(base62_decode(i)).zfill(7)
result = str10 + result
return result.lstrip('0')
def base62_encode(num, alphabet=ALPHABET):
"""Encode a number in Base X
`num`: The number to encode
`alphabet`: The alphabet to use for encoding
"""
if (num == 0):
return alphabet[0]
arr = []
base = len(alphabet)
while num:
rem = num % base
num = num // base
arr.append(alphabet[rem])
arr.reverse()
return ''.join(arr)
def base62_decode(string, alphabet=ALPHABET):
"""Decode a Base X encoded string into the number
Arguments:
- `string`: The encoded string
- `alphabet`: The alphabet to use for encoding
"""
base = len(alphabet)
strlen = len(string)
num = 0
idx = 0
for char in string:
power = (strlen - (idx + 1))
num += alphabet.index(char) * (base ** power)
idx += 1
return num
if __name__ == '__main__':
print mid2id('CeaOU15IT')
print id2mid('3833781880260331')
|
apache-2.0
| -2,043,530,695,683,512,600
| 25.071429
| 75
| 0.586986
| false
| 3.387471
| false
| false
| false
|
LudditeLabs/query-reform
|
reform/utils/scrap.py
|
1
|
1277
|
import re
class Scrap(object):
"""
Scraps method names from the Java doc
"""
def __init__(self, file_path, out_name):
self.file_path = file_path
self.out_name = out_name
self.java_method_re = re.compile('^([a-z]+.+)\(')
self.js_method_re = re.compile('^([a-z]+): ')
self.python_class_re = re.compile('^([A-z]+.+) \(class in')
self.python_method_re = re.compile('^([A-z]+.+)\(\)')
def scrap_java_methods(self):
self._scrap(self.java_method_re)
def scrap_js_methods(self):
self._scrap(self.js_method_re)
def scrap_python_classes(self):
self._scrap(self.python_class_re)
def scrap_python_methods(self):
self._scrap(self.python_method_re)
def _scrap(self, scrap_re):
res = set()
with open(self.file_path) as f:
for line in f:
match = scrap_re.findall(line.strip())
if match:
res.add(match[0])
print "Found %d methods" % len(res)
with open(self.out_name, 'w') as o:
for r in res:
o.write(r + '\n')
if __name__ == '__main__':
scrapper = Scrap('../../data/raw/js_methods.txt', 'jsapimethods.txt')
scrapper.scrap_js_methods()
|
apache-2.0
| -8,040,165,848,016,772,000
| 27.377778
| 73
| 0.530932
| false
| 3.184539
| false
| false
| false
|
varmarakesh/devops-toolbox
|
devops-toolbox/ftp/install.py
|
1
|
1910
|
__author__ = 'rakesh.varma'
from fabric.api import *
import os
import time
class install:
fuse_git_repo = 'https://github.com/s3fs-fuse/s3fs-fuse.git'
def __init__(self, host_ip, host_user, host_key_file):
env.host_string = host_ip
env.user = host_user
env.key_filename = host_key_file
def install_s3fs(self):
print env.host_string
print env.user
print env.key_filename
sudo('yum install automake fuse-devel gcc-c++ git libcurl-devel libxml2-devel make openssl-devel')
sudo('git clone {0}'.format(self.fuse_git_repo))
sudo('./home/ec2-user/s3fs-fuse/autogen.sh; ./home/ec2-user/s3fs-fuse/configure')
sudo('/bin/make /home/ec2-user')
sudo('make install')
def mount(self, access_key, secret_key):
sudo('touch /etc/passwd-s3fs && chmod 640 /etc/passwd-s3fs && echo "{0}:{1}" > /etc/passwd-s3fs'.format(access_key, secret_key))
sudo('/opt/bin/s3fs vcs-payment /home/vcsuser -o allow_other -o nonempty')
sudo('mount|grep s3fs')
def create_user(self, user, pwd):
print env.host_string
print env.user
print env.key_filename
sudo('hostname')
sudo('useradd -d /home/{0} {1}'.format(user, user))
sudo('echo -e "{0}\n{1}" | passwd {2}'.format(pwd, pwd, user))
sudo('chown -R {0} /home/{1}'.format(user, user))
def install_ftp(self, user):
sudo('yum install -y vsftpd')
sudo('chkconfig vsftpd on')
sudo('setsebool -P ftp_home_dir=1')
sudo('echo "{0}" > /etc/vsftpd/chroot_list'.format(user))
__location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
f = open(os.path.join(__location__, 'vsftpd.conf'))
vsftpd_config = f.read()
sudo('echo "{0}" > /etc/vsftpd/vsftpd.conf'.format(vsftpd_config))
sudo('service vsftpd restart')
|
isc
| -6,202,998,654,796,628,000
| 39.659574
| 136
| 0.604188
| false
| 3.07074
| false
| false
| false
|
qedsoftware/commcare-hq
|
custom/world_vision/sqldata/child_sqldata.py
|
1
|
33675
|
import calendar
from sqlagg import CountUniqueColumn
from sqlagg.columns import SimpleColumn
from sqlagg.filters import LT, LTE, AND, GTE, GT, EQ, NOTEQ, OR, IN
from corehq.apps.reports.datatables import DataTablesHeader, DataTablesColumn
from corehq.apps.reports.sqlreport import DatabaseColumn
from corehq.apps.reports.util import get_INFilter_bindparams
from custom.utils.utils import clean_IN_filter_value
from custom.world_vision.custom_queries import CustomMedianColumn, MeanColumnWithCasting
from custom.world_vision.sqldata import BaseSqlData
from custom.world_vision.sqldata.main_sqldata import ImmunizationOverview
from custom.world_vision.sqldata.mother_sqldata import MotherRegistrationDetails, DeliveryMothersIds
class ChildRegistrationDetails(MotherRegistrationDetails):
table_name = "fluff_WorldVisionChildFluff"
slug = 'child_registration_details'
title = 'Child Registration Details'
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = []
for column in self.columns:
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_registration_details'],
column.slug)},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]}])
return result
@property
def columns(self):
columns = [
DatabaseColumn("Total child registered ever", CountUniqueColumn('doc_id', alias="total"))
]
if 'startdate' not in self.config and 'enddate' not in self.config or 'startdate' not in self.config \
and 'enddate' in self.config:
columns.extend([
DatabaseColumn(
"Total open children cases", CountUniqueColumn(
'doc_id', alias="no_date_opened",
filters=self.filters + [EQ('closed_on', 'empty')]
)
),
DatabaseColumn(
"Total closed children cases", CountUniqueColumn(
'doc_id', alias="no_date_closed",
filters=self.filters + [NOTEQ('closed_on', 'empty')]
)
),
DatabaseColumn(
"New registrations during last 30 days", CountUniqueColumn(
'doc_id', alias="no_date_new_registrations",
filters=self.filters + [AND([GTE('opened_on', "last_month"), LTE('opened_on', "today")])]
)
)
])
else:
columns.extend([
DatabaseColumn(
"Children cases open at end period", CountUniqueColumn(
'doc_id', alias="opened",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GT('closed_on', "stred")])])]
)
),
DatabaseColumn(
"Children cases closed during period", CountUniqueColumn(
'doc_id', alias="closed",
filters=self.filters + [AND([GTE('closed_on', "strsd"), LTE('closed_on', "stred")])]
)
),
DatabaseColumn(
"Total children followed during period", CountUniqueColumn(
'doc_id', alias="followed",
filters=self.filters + [AND([LTE('opened_on', "stred"), OR([EQ('closed_on', 'empty'),
GTE('closed_on', "strsd")])])]
)
),
DatabaseColumn(
"New registrations during period", CountUniqueColumn(
'doc_id', alias="new_registrations",
filters=self.filters + [AND([LTE('opened_on', "stred"), GTE('opened_on', "strsd")])]
)
)
])
return columns
class ClosedChildCasesBreakdown(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'closed_child_cases_breakdown'
title = 'Closed Child Cases Breakdown'
show_total = True
total_row_name = "Children cases closed during the time period"
chart_title = 'Closed Child Cases'
show_charts = True
chart_x_label = ''
chart_y_label = ''
chart_only = True
@property
def group_by(self):
return ['reason_for_child_closure']
@property
def rows(self):
from custom.world_vision import CLOSED_CHILD_CASES_BREAKDOWN
return self._get_rows(CLOSED_CHILD_CASES_BREAKDOWN, super(ClosedChildCasesBreakdown, self).rows)
@property
def filters(self):
filter = super(ClosedChildCasesBreakdown, self).filters[1:]
if 'strsd' in self.config:
filter.append(GTE('closed_on', 'strsd'))
if 'stred' in self.config:
filter.append(LTE('closed_on', 'stred'))
filter.append(NOTEQ('reason_for_child_closure', 'empty'))
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Reason for closure'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Reason for closure", SimpleColumn('reason_for_child_closure')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))
]
class ChildrenDeaths(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_deaths'
title = 'Children Death Details'
total_row_name = "Total Deaths"
show_total = True
show_charts = False
chart_x_label = ''
chart_y_label = ''
custom_total_calculate = True
accordion_start = True
accordion_end = False
table_only = True
def calculate_total_row(self, rows):
total_row = []
if len(rows) > 0:
num_cols = len(rows[0])
for i in range(num_cols):
colrows = [cr[i] for cr in rows[1:] if isinstance(cr[i], dict)]
columns = [r.get('sort_key') for r in colrows if isinstance(r.get('sort_key'), (int, long))]
if len(columns):
total_row.append(reduce(lambda x, y: x + y, columns, 0))
else:
total_row.append('')
return total_row
@property
def rows(self):
result = []
total = self.data['total_deaths']
for idx, column in enumerate(self.columns[:-1]):
if idx == 0:
percent = 'n/a'
else:
percent = self.percent_fn(total, self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]},
{'sort_key': 'percentage', 'html': percent}])
return result
@property
def filters(self):
filter = []
if 'start_date' in self.config:
filter.extend([AND([GTE('date_of_death', 'startdate'), LTE('date_of_death', 'enddate')])])
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Children Death Type'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',)
return [
DatabaseColumn("Total births",
CountUniqueColumn('doc_id',
filters=[AND([IN('mother_id', get_INFilter_bindparams('mother_ids', self.config['mother_ids'])),
OR([EQ('gender', 'female'), EQ('gender', 'male')])])],
alias='total_births')),
DatabaseColumn("Newborn deaths (< 1 m)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'newborn_death')])], alias='newborn_death')),
DatabaseColumn("Infant deaths (< 1 y)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'infant_death')])], alias='infant_death')),
DatabaseColumn("Child deaths (2-5y)",
CountUniqueColumn('doc_id', filters=self.filters + [AND(
[EQ('reason_for_child_closure', 'death'),
EQ('type_of_child_death', 'child_death')])], alias='child_death')),
DatabaseColumn("Total deaths",
CountUniqueColumn('doc_id', filters=self.filters + [EQ('reason_for_child_closure',
'death')], alias='total_deaths'))
]
@property
def filter_values(self):
return clean_IN_filter_value(super(ChildrenDeaths, self).filter_values, 'mother_ids')
class ChildrenDeathDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_death_details'
title = ''
show_total = True
total_row_name = "Total Deaths"
chart_title = 'Child Deaths'
show_charts = True
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = False
@property
def group_by(self):
return ['cause_of_death_child']
@property
def rows(self):
from custom.world_vision import CHILD_CAUSE_OF_DEATH
return self._get_rows(CHILD_CAUSE_OF_DEATH, super(ChildrenDeathDetails, self).rows)
@property
def filters(self):
filter = []
if 'start_date' in self.config:
filter.extend([AND([GTE('date_of_death', 'startdate'), LTE('date_of_death', 'enddate')])])
filter.extend([EQ('reason_for_child_closure', 'death')])
return filter
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Cause of death'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def columns(self):
return [
DatabaseColumn("Cause of death", SimpleColumn('cause_of_death_child')),
DatabaseColumn("Number", CountUniqueColumn('doc_id')),
]
class ChildrenDeathsByMonth(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_death_by_month'
title = ''
show_charts = True
chart_title = 'Seasonal Variation of Child Deaths'
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = True
@property
def group_by(self):
return ['month_of_death', 'year_of_death']
@property
def filters(self):
filters = super(ChildrenDeathsByMonth, self).filters
filters.extend([NOTEQ('month_of_death', 'empty')])
return filters
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Month'), DataTablesColumn('Deaths'), DataTablesColumn('Percentage')])
@property
def rows(self):
rows = [[int(i), 0] for i in range(1, 13)]
sum_of_deaths = 0
for row in super(ChildrenDeathsByMonth, self).rows:
rows[int(row[0])][-1] += row[-1]['html']
sum_of_deaths += row[-1]['html']
for row in rows:
row[0] = calendar.month_name[row[0]]
row.append({'sort_key': self.percent_fn(sum_of_deaths, row[1]),
'html': self.percent_fn(sum_of_deaths, row[1])})
row[1] = {'sort_key': row[1], 'html': row[1]}
return rows
@property
def columns(self):
return [DatabaseColumn("Month", SimpleColumn('month_of_death')),
DatabaseColumn("Year", SimpleColumn('year_of_death')),
DatabaseColumn("Number", CountUniqueColumn('doc_id'))]
class NutritionMeanMedianBirthWeightDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_birth_weights_1'
title = 'Nutrition Details'
accordion_start = True
accordion_end = False
@property
def filters(self):
filters = super(NutritionMeanMedianBirthWeightDetails, self).filters
filters.append(NOTEQ('weight_birth', 'empty'))
return filters
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Mean'), DataTablesColumn('Median')])
@property
def columns(self):
return [
DatabaseColumn("Median Birth Weight",
MeanColumnWithCasting('weight_birth', alias='mean_birth_weight')
),
DatabaseColumn("Median Birth Weight",
CustomMedianColumn('weight_birth', alias='median_birth_weight')
)
]
@property
def rows(self):
return [['Birth Weight (kg)',
"%.2f" % (self.data['mean_birth_weight'] if self.data['mean_birth_weight'] else 0),
"%.2f" % (self.data['median_birth_weight'] if self.data['mean_birth_weight'] else 0)]
]
class NutritionBirthWeightDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_birth_details_2'
title = ''
show_charts = True
chart_title = 'Birth Weight'
chart_x_label = ''
chart_y_label = ''
accordion_start = False
accordion_end = False
chart_only = True
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Percentage')])
@property
def rows(self):
result = []
for idx, column in enumerate(self.columns):
if idx == 0 or idx == 1:
percent = 'n/a'
else:
percent = self.percent_fn(self.data['total_birthweight_known'], self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug],
'color': 'red' if column.slug == 'total_birthweight_lt_25' else 'green'},
{'sort_key': 'percentage', 'html': percent}]
)
return result
@property
def columns(self):
self.config['mother_ids'] = tuple(DeliveryMothersIds(config=self.config).data.keys()) + ('',)
columns = [
DatabaseColumn("Total children with with birthweight known",
CountUniqueColumn('doc_id', alias="total_birthweight_known",
filters=self.filters + [NOTEQ('weight_birth', 'empty')])),
DatabaseColumn("Total births",
CountUniqueColumn('doc_id',
filters=[AND([IN('mother_id', get_INFilter_bindparams('mother_ids', self.config['mother_ids'])),
OR([EQ('gender', 'female'), EQ('gender', 'male')])])],
alias='total_births'))]
columns.extend([
DatabaseColumn("Birthweight < 2.5 kg",
CountUniqueColumn('doc_id',
alias="total_birthweight_lt_25",
filters=self.filters + [AND([LT('weight_birth', 'weight_birth_25'), NOTEQ('weight_birth', 'empty')])]
)
),
DatabaseColumn("Birthweight >= 2.5 kg",
CountUniqueColumn('doc_id',
alias="total_birthweight_gte_25",
filters=self.filters + [AND([GTE('weight_birth', 'weight_birth_25'), NOTEQ('weight_birth', 'empty')])]
)
)
])
return columns
@property
def filter_values(self):
return clean_IN_filter_value(super(NutritionBirthWeightDetails, self).filter_values, 'mother_ids')
class NutritionFeedingDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_feeding_details'
title = ''
accordion_start = False
accordion_end = True
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Feeding type'), DataTablesColumn('Number'), DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')])
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = []
for i in range(0,4):
result.append([{'sort_key': self.columns[2*i].header, 'html': self.columns[2*i].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['nutrition_details'], self.columns[2*i].slug)},
{'sort_key': self.data[self.columns[2*i].slug], 'html': self.data[self.columns[2*i].slug]},
{'sort_key': self.data[self.columns[2*i+1].slug], 'html': self.data[self.columns[2*i + 1].slug],
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['nutrition_details'], self.columns[2*i+1].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[2*i + 1].slug], self.data[self.columns[2*i].slug]),
'html': self.percent_fn(self.data[self.columns[2*i + 1].slug], self.data[self.columns[2*i].slug])}
])
return result
@property
def columns(self):
return [
DatabaseColumn("Early initiation of breastfeeding",
CountUniqueColumn('doc_id', alias="colostrum_feeding",
filters=self.filters + [EQ('breastfeed_1_hour', 'yes')])),
DatabaseColumn("Early initiation of breastfeeding Total Eligible",
CountUniqueColumn('doc_id', alias="colostrum_feeding_total_eligible",
filters=self.filters + [NOTEQ('breastfeed_1_hour', 'empty')])),
DatabaseColumn("Exclusive breastfeeding",
CountUniqueColumn('doc_id', alias="exclusive_breastfeeding",
filters=self.filters + [AND([EQ('exclusive_breastfeeding', "yes"),
GTE('dob', "today_minus_183")])])),
DatabaseColumn("Exclusive Breastfeeding (EBF) Total Eligible",
CountUniqueColumn('doc_id', alias="exclusive_breastfeeding_total_eligible",
filters=self.filters + [GTE('dob', 'today_minus_183')])),
DatabaseColumn("Supplementary feeding",
CountUniqueColumn('doc_id', alias="supplementary_feeding",
filters=self.filters + [AND([EQ('supplementary_feeding_baby', 'yes'),
GTE('dob', 'today_minus_182')])])),
DatabaseColumn("Supplementary feeding Total Eligible",
CountUniqueColumn('doc_id', alias="supplementary_feeding_total_eligible",
filters=self.filters + [GTE('dob', 'today_minus_182')])),
DatabaseColumn("Complementary feeding",
CountUniqueColumn('doc_id', alias="complementary_feeding",
filters=self.filters + [AND([EQ('comp_breastfeeding', 'yes'),
LTE('dob', 'today_minus_183'),
GTE('dob', 'today_minus_730')])])),
DatabaseColumn("Complementary feeding Total Eligible",
CountUniqueColumn('doc_id', alias="complementary_feeding_total_eligible",
filters=self.filters + [AND([LTE('dob', 'today_minus_183'),
GTE('dob', 'today_minus_730')])]))
]
class ChildHealthIndicators(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'Child_health_indicators'
title = 'Child Health Indicators'
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
result = [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[0].slug)},
{'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]}],
[{'sort_key': self.columns[1].header, 'html': self.columns[1].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[1].slug)},
{'sort_key': self.data[self.columns[1].slug], 'html': self.data[self.columns[1].slug]}],
[{'sort_key': self.columns[2].header, 'html': self.columns[2].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[2].slug)},
{'sort_key': self.data[self.columns[2].slug], 'html': self.data[self.columns[2].slug]}]]
for i in range(3, 5):
result.append([{'sort_key': self.columns[i].header, 'html': self.columns[i].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'],
self.columns[i].slug)},
{'sort_key': self.data[self.columns[i].slug], 'html': self.data[self.columns[i].slug]},
{'sort_key': self.percent_fn(self.data[self.columns[1].slug],
self.data[self.columns[i].slug]),
'html': self.percent_fn(self.data[self.columns[1].slug],
self.data[self.columns[i].slug])}])
return result
@property
def columns(self):
return [
DatabaseColumn("Total child ill",
CountUniqueColumn(
'doc_id', alias="total_child_ill",
filters=self.filters + [OR([EQ('pneumonia_since_last_visit', 'yes'),
EQ('has_diarrhea_since_last_visit', 'yes')])])),
DatabaseColumn("ARI (Pneumonia)",
CountUniqueColumn('doc_id', alias="ari_cases",
filters=self.filters + [EQ('pneumonia_since_last_visit', 'yes')])),
DatabaseColumn("Diarrhea",
CountUniqueColumn('doc_id', alias="diarrhea_cases",
filters=self.filters + [EQ('has_diarrhea_since_last_visit', 'yes')])),
DatabaseColumn("ORS given during diarrhea",
CountUniqueColumn('doc_id', alias="ors",
filters=self.filters + [EQ('dairrhea_treated_with_ors', 'yes')])),
DatabaseColumn("Zinc given during diarrhea",
CountUniqueColumn('doc_id', alias="zinc",
filters=self.filters + [EQ('dairrhea_treated_with_zinc', 'yes')]))
]
class ImmunizationDetailsFirstYear(ImmunizationOverview):
title = 'Immunization Overview (0 - 1 yrs)'
slug = 'immunization_first_year_overview'
@property
def columns(self):
columns = super(ImmunizationDetailsFirstYear, self).columns
del columns[6:8]
del columns[-2:]
cols1 = [
DatabaseColumn("OPV0",
CountUniqueColumn('doc_id', alias="opv0", filters=self.filters + [EQ('opv0', 'yes')])
),
DatabaseColumn("HEP0",
CountUniqueColumn('doc_id', alias="hep0", filters=self.filters + [EQ('hepb0', 'yes')])
),
DatabaseColumn("OPV1",
CountUniqueColumn('doc_id', alias="opv1", filters=self.filters + [EQ('opv1', 'yes')])
),
DatabaseColumn("HEP1",
CountUniqueColumn('doc_id', alias="hep1", filters=self.filters + [EQ('hepb1', 'yes')])
),
DatabaseColumn("DPT1",
CountUniqueColumn('doc_id', alias="dpt1", filters=self.filters + [EQ('dpt1', 'yes')])
),
DatabaseColumn("OPV2",
CountUniqueColumn('doc_id', alias="opv2", filters=self.filters + [EQ('opv2', 'yes')])
),
DatabaseColumn("HEP2",
CountUniqueColumn('doc_id', alias="hep2", filters=self.filters + [EQ('hepb2', 'yes')])
),
DatabaseColumn("DPT2",
CountUniqueColumn('doc_id', alias="dpt2", filters=self.filters + [EQ('dpt2', 'yes')])
),
]
cols2 = [
DatabaseColumn("OPV0 Total Eligible",
CountUniqueColumn('doc_id', alias="opv0_eligible", filters=self.filters)),
DatabaseColumn("HEP0 Total Eligible",
CountUniqueColumn('doc_id', alias="hep0_eligible", filters=self.filters)),
DatabaseColumn("OPV1 Total Eligible",
CountUniqueColumn('doc_id', alias="opv1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("HEP1 Total Eligible",
CountUniqueColumn('doc_id', alias="hep1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("DPT1 Total Eligible",
CountUniqueColumn('doc_id', alias="dpt1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_40')])),
DatabaseColumn("OPV2 Total Eligible",
CountUniqueColumn('doc_id', alias="opv2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')])),
DatabaseColumn("HEP2 Total Eligible",
CountUniqueColumn('doc_id', alias="hep2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')])),
DatabaseColumn("DPT2 Total Eligible",
CountUniqueColumn('doc_id', alias="dpt2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_75')]))
]
cols3 = [
DatabaseColumn("VitA1",
CountUniqueColumn('doc_id', alias="vita1", filters=self.filters + [EQ('vita1', 'yes')]))
]
cols4 = [
DatabaseColumn("VitA1 Total Eligible",
CountUniqueColumn('doc_id', alias="vita1_eligible",
filters=self.filters + [LTE('dob', 'today_minus_273')]))
]
return columns[:1] + cols1 + columns[1:5] + cols3 + columns[5:-5] \
+ cols2 + columns[-5:-1] + cols4 + columns[-1:]
class ImmunizationDetailsSecondYear(ImmunizationOverview):
title = 'Immunization Overview (1 - 2 yrs)'
slug = 'immunization_second_year_overview'
@property
def columns(self):
return [
DatabaseColumn("VitA2", CountUniqueColumn('doc_id', alias="vita2",
filters=self.filters + [EQ('vita2', 'yes')])),
DatabaseColumn("DPT-OPT Booster",
CountUniqueColumn('doc_id', alias="dpt_opv_booster",
filters=self.filters + [EQ('dpt_opv_booster', 'yes')])),
DatabaseColumn("VitA3",
CountUniqueColumn('doc_id', alias="vita3",
filters=self.filters + [EQ('vita3', 'yes')])),
DatabaseColumn("VitA2 Total Eligible",
CountUniqueColumn('doc_id', alias="vita2_eligible",
filters=self.filters + [LTE('dob', 'today_minus_547')])),
DatabaseColumn("DPT-OPT Booster Total Eligible",
CountUniqueColumn('doc_id', alias="dpt_opv_booster_eligible",
filters=self.filters + [LTE('dob', 'today_minus_548')])),
DatabaseColumn("VitA3 Total Eligible",
CountUniqueColumn('doc_id', alias="vita3_eligible",
filters=self.filters + [LTE('dob', 'today_minus_700')]))
]
class ChildDeworming(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'children_deworming'
title = 'Child Deworming'
@property
def headers(self):
return DataTablesHeader(*[DataTablesColumn('Entity'), DataTablesColumn('Number'), DataTablesColumn('Total Eligible'), DataTablesColumn('Percentage')])
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
return [[{'sort_key': self.columns[0].header, 'html': self.columns[0].header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'], self.columns[0].slug)},
{'sort_key': self.data[self.columns[0].slug], 'html': self.data[self.columns[0].slug]},
{'sort_key': self.data[self.columns[1].slug], 'html': self.data[self.columns[1].slug],
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['child_health_indicators'], self.columns[1].slug)},
{'sort_key': self.percent_fn(self.data[self.columns[1].slug], self.data[self.columns[0].slug]),
'html': self.percent_fn(self.data[self.columns[1].slug], self.data[self.columns[0].slug])}
]]
@property
def columns(self):
return [
DatabaseColumn("Deworming dose in last 6 months",
CountUniqueColumn('doc_id',
alias="deworming",
filters=self.filters + [EQ('deworm', 'yes')]
)
),
DatabaseColumn("Deworming Total Eligible",
CountUniqueColumn('doc_id',
alias="deworming_total_eligible",
filters=self.filters + [LTE('dob', 'today_minus_365')]
)
),
]
class EBFStoppingDetails(BaseSqlData):
table_name = "fluff_WorldVisionChildFluff"
slug = 'ebf_stopping_details'
title = 'EBF Stopping Details'
show_total = True
total_row_name = "EBF stopped"
@property
def filters(self):
filters = super(EBFStoppingDetails, self).filters
filters.append(EQ('exclusive_breastfeeding', 'no'))
filters.append(LTE('dob', 'today_minus_183'))
filters.append(NOTEQ('ebf_stop_age_month', 'empty'))
return filters
@property
def rows(self):
from custom.world_vision import CHILD_INDICATOR_TOOLTIPS
total = sum(v for v in self.data.values())
result = []
for column in self.columns:
percent = self.percent_fn(total, self.data[column.slug])
result.append([{'sort_key': column.header, 'html': column.header,
'tooltip': self.get_tooltip(CHILD_INDICATOR_TOOLTIPS['ebf_stopping_details'], column.slug)},
{'sort_key': self.data[column.slug], 'html': self.data[column.slug]},
{'sort_key': 'percentage', 'html': percent}
])
return result
@property
def columns(self):
return [
DatabaseColumn("EBF stopped between 0-1 month",
CountUniqueColumn('doc_id', alias="stopped_0_1",
filters=self.filters + [LTE('ebf_stop_age_month', '1')])
),
DatabaseColumn("EBF stopped between 1-3 month",
CountUniqueColumn('doc_id', alias="stopped_1_3",
filters=self.filters + [AND([GT('ebf_stop_age_month', '1'), LTE('ebf_stop_age_month', '3')])])
),
DatabaseColumn("EBF stopped between 3-5 month",
CountUniqueColumn('doc_id', alias="stopped_3_5",
filters=self.filters + [AND([GT('ebf_stop_age_month', '3'), LTE('ebf_stop_age_month', '5')])])
),
DatabaseColumn("EBF stopped between 5-6 month",
CountUniqueColumn('doc_id', alias="stopped_5_6",
filters=self.filters + [AND([GT('ebf_stop_age_month', '5'), LTE('ebf_stop_age_month', '6')])])
)
]
|
bsd-3-clause
| -8,137,775,323,974,969,000
| 46.163866
| 164
| 0.529681
| false
| 4.07984
| true
| false
| false
|
OnroerendErfgoed/pyramid_urireferencer
|
pyramid_urireferencer/models.py
|
1
|
4161
|
# -*- coding: utf-8 -*-
import json
class RegistryResponse:
"""
Represents what the registry will send back to a client when asked if
a certain uri is used somewhere.
:param string query_uri: Uri of the resource unser survey.
:param boolean success: Were all the queries successful?
:param boolean has_references: Were any references found?
:param int count: How many references were found?
:param list applications: A list of application results.
"""
def __init__(self, query_uri, success, has_references, count, applications):
self.query_uri = query_uri
self.success = success
self.has_references = has_references
self.count = count
self.applications = applications
@staticmethod
def load_from_json(data):
"""
Load a :class:`RegistryReponse` from a dictionary or a string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
applications = [
ApplicationResponse.load_from_json(a) for a in data['applications']
] if data['applications'] is not None else []
return RegistryResponse(
data['query_uri'], data['success'],
data['has_references'], data['count'], applications
)
def to_json(self):
return {
"query_uri": self.query_uri,
"success": self.success,
"has_references": self.has_references,
"count": self.count,
"applications": [app.to_json() for app in self.applications]
}
class ApplicationResponse:
"""
Represents what a certain application will send back to the registry when
asked if a certain uri is used by the application.
:param string title: Title of the application
:param string uri: A uri for the application, not guaranteed to be a http url.
:param string service_url: The url that answered the question
:param boolean success: Was the querie successful?
:param boolean has_references: Were any references found?
:param int count: How many references were found?
:param list items: A list of items that have a reference to the \
uri under survey. Limited to 5 items for performance reasons.
"""
def __init__(self, title, uri, service_url, success, has_references, count, items):
self.title = title
self.uri = uri
self.service_url = service_url
self.success = success
self.has_references = has_references
self.count = count
self.items = items
@staticmethod
def load_from_json(data):
"""
Load a :class:`ApplicationResponse` from a dictionary or string (that
will be parsed as json).
"""
if isinstance(data, str):
data = json.loads(data)
items = [Item.load_from_json(a) for a in data['items']] if data['items'] is not None else []
return ApplicationResponse(
data['title'], data['uri'], data['service_url'],
data['success'], data['has_references'], data['count'], items
)
def to_json(self):
return {
"title": self.title,
"uri": self.uri,
"service_url": self.service_url,
"success": self.success,
"has_references": self.has_references,
"count": self.count,
"items": [item.to_json() for item in self.items] if self.items else []
}
class Item:
"""
A single item that holds a reference to the queried uri.
:param string title: Title of the item.
:param string uri: Uri of the item.
"""
def __init__(self, title, uri):
self.title = title
self.uri = uri
@staticmethod
def load_from_json(data):
"""
Load a :class:`Item` from a dictionary ot string (that will be parsed
as json)
"""
if isinstance(data, str):
data = json.loads(data)
return Item(data['title'], data['uri'])
def to_json(self):
return {
"title": self.title,
"uri": self.uri
}
|
mit
| -6,828,652,018,221,024,000
| 31.76378
| 100
| 0.596972
| false
| 4.267692
| false
| false
| false
|
googleads/google-ads-python
|
google/ads/googleads/v6/resources/types/currency_constant.py
|
1
|
1954
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.ads.googleads.v6.resources",
marshal="google.ads.googleads.v6",
manifest={"CurrencyConstant",},
)
class CurrencyConstant(proto.Message):
r"""A currency constant.
Attributes:
resource_name (str):
Output only. The resource name of the currency constant.
Currency constant resource names have the form:
``currencyConstants/{code}``
code (str):
Output only. ISO 4217 three-letter currency
code, e.g. "USD".
name (str):
Output only. Full English name of the
currency.
symbol (str):
Output only. Standard symbol for describing
this currency, e.g. '$' for US Dollars.
billable_unit_micros (int):
Output only. The billable unit for this
currency. Billed amounts should be multiples of
this value.
"""
resource_name = proto.Field(proto.STRING, number=1)
code = proto.Field(proto.STRING, number=6, optional=True)
name = proto.Field(proto.STRING, number=7, optional=True)
symbol = proto.Field(proto.STRING, number=8, optional=True)
billable_unit_micros = proto.Field(proto.INT64, number=9, optional=True)
__all__ = tuple(sorted(__protobuf__.manifest))
|
apache-2.0
| -3,993,784,063,970,901,500
| 32.118644
| 76
| 0.660184
| false
| 4.028866
| false
| false
| false
|
santoshghimire/IL-Jobcrawl
|
jobcrawl/dump_client_changes.py
|
1
|
4316
|
import warnings
warnings.filterwarnings("ignore")
DATE_FMT = "%d/%m/%Y"
def parse_dates(sd, ed):
if not ed:
ed = datetime.today().strftime(DATE_FMT)
return datetime.strptime(sd, DATE_FMT), datetime.strptime(ed, DATE_FMT)
def main(site, start_date, end_date):
start_date, end_date = parse_dates(start_date, end_date)
if start_date > end_date:
print("Start date is greater than end date")
return
print("\nGetting data from {} to {}\n".format(
start_date.strftime(DATE_FMT), end_date.strftime(DATE_FMT)))
conn = pymysql.connect(
host=settings.MYSQL_HOST, port=3306, user=settings.MYSQL_USER,
passwd=settings.MYSQL_PASSWORD, db=settings.MYSQL_DBNAME,
charset='utf8'
)
df_all = []
current_date = None
while True:
if current_date is None:
current_date = start_date - timedelta(days=1)
current_date_str = current_date.strftime(DATE_FMT)
sql = """SELECT distinct(Company) FROM sites_datas
WHERE Site='%s' and Crawl_Date='%s'""" % (site, current_date_str)
data_df = pd.read_sql(sql, conn)
print("Date: {}, Unique company size = {}".format(
current_date_str, data_df.shape[0]))
df_all.append((current_date_str, data_df))
if current_date >= end_date:
break
current_date += timedelta(days=1)
print("\nTotal df retrieved = {}".format(len(df_all)))
print("Dates of all dfs = {}\n".format([i[0] for i in df_all]))
yest_df = None
new_companies = pd.DataFrame.from_dict({'Company': [], 'Report Date': []})
removed_companies = pd.DataFrame.from_dict({'Company': [], 'Report Date': []})
for date_str, df in df_all:
if yest_df is None:
yest_df = df
continue
yest_list = yest_df['Company'].tolist()
# if None in yest_list:
# yest_list.remove(None)
today_list = df['Company'].tolist()
# if None in today_list:
# today_list.remove(None)
new = list(set(today_list) - set(yest_list))
removed = list(set(yest_list) - set(today_list))
new_temp = pd.DataFrame.from_dict({'Company': new,
'Report Date': [date_str] * len(new)})
removed_temp = pd.DataFrame.from_dict({'Company': removed,
'Report Date': [date_str] * len(removed)})
print("Report: Date {}: New={}, Removed={}".format(
date_str, new_temp.shape[0], removed_temp.shape[0]))
new_companies = new_companies.append(new_temp, ignore_index=True)
removed_companies = removed_companies.append(removed_temp, ignore_index=True)
print("Combined Report: Date {}: New={}, Removed={}".format(
date_str, new_companies.shape[0], removed_companies.shape[0]))
yest_df = df
prefix = "{}_to_{}".format(
start_date.strftime("%d-%m-%y"), end_date.strftime("%d-%m-%y"))
new_companies.to_csv("{}_{}".format(prefix, "new_company_report_dump.csv"),
index=False, encoding='utf-8')
removed_companies.to_csv("{}_{}".format(prefix, "removed_company_report_dump.csv"),
index=False, encoding='utf-8')
total_new = new_companies['Company'].tolist()
total_removed = removed_companies['Company'].tolist()
total_new_distinct = set(total_new)
total_removed_distinct = set(total_removed)
print("Distinct companies in New companies report = {}".format(
len(total_new_distinct)))
print("Distinct companies in Removed companies report = {}".format(
len(total_removed_distinct)))
print("\nDone")
if __name__ == '__main__':
import argparse
from datetime import datetime, timedelta
import pymysql
import pandas as pd
from jobcrawl import settings
parser = argparse.ArgumentParser(description='Dump Client Changes')
parser.add_argument('-s', '--site', help="Site", required=True)
parser.add_argument('-sd', '--start_date', help="Start Date (dd/mm/yyyy)",
required=True)
parser.add_argument('-ed', '--end_date', help="End Date (dd/mm/yyyy)",
required=False)
args = parser.parse_args()
main(args.site, args.start_date, args.end_date)
|
mit
| -7,969,661,471,214,422,000
| 38.236364
| 87
| 0.593605
| false
| 3.575808
| false
| false
| false
|
tchakravarty/PythonExamples
|
Code/Miscellaneous/FileReadWithNA.py
|
1
|
1710
|
__author__ = 'tirthankar'
import pandas as pd
import xlrd as xl
import numpy as np
# straight read
pdata = pd.read_csv(
"Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv")
# passing a string
pdata2 = pd.read_csv("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv",
na_values = "AFG")
pdata2["isocode"]
# passing a list
pdata3 = pd.read_csv("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_wo_country_names_wo_g_vars.csv",
na_values = ["AFG"])
pdata3["isocode"]
# read the file directly from Excel using xlrd
file_location = "Code/Miscellaneous/Data/pwt71_11302012version/pwt71_vars_forWeb.xls"
xlPWT = xl.open_workbook(file_location)
xlPWT1 = xlPWT.sheet_by_index(0)
xlPWT1.cell_value(3, 1)
xlPWT1.nrows
xlPWT1.ncols
# read file directly using pd.read_excel
pmetadata = pd.read_excel("Code/Miscellaneous/Data/pwt71_11302012version/pwt71_vars_forWeb.xls")
pd.read_csv("Code/Miscellaneous/Data/pwt.csv", na_values = ["na"])
textPWT = """
country ccode year Pop XRAT currency ppp t1
Afghanistan AFG 1950 8150.368 na na na na
Afghanistan AFG 1951 8284.473 na na na na
Afghanistan AFG 1952 8425.333 na na na na
Afghanistan AFG 1953 8573.217 na na na na
"""
liPWT = textPWT.split("\n")
liPWT = [x.split() for x in liPWT][1:6]
npPWT = np.array(liPWT)
pdPWT = pd.DataFrame(npPWT[1:, :], columns=npPWT[0, :])
pdPWT = pdPWT.replace('na', np.nan, regex=True)
pdPWT = pdPWT.convert_objects(convert_numeric=True)
|
apache-2.0
| -7,479,459,970,993,196,000
| 24.147059
| 106
| 0.645614
| false
| 2.676056
| false
| false
| false
|
CarterBain/AlephNull
|
alephnull/sources/data_frame_source.py
|
1
|
4637
|
#
# Copyright 2013 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools to generate data sources.
"""
import pandas as pd
from alephnull.gens.utils import hash_args
from alephnull.sources.data_source import DataSource
class DataFrameSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.index, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.columns)
self.start = kwargs.get('start', data.index[0])
self.end = kwargs.get('end', data.index[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
return {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt, series in self.data.iterrows():
for sid, price in series.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
'price': price,
'volume': 1000,
}
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
class DataPanelSource(DataSource):
"""
Yields all events in event_list that match the given sid_filter.
If no event_list is specified, generates an internal stream of events
to filter. Returns all events if filter is None.
Configuration options:
sids : list of values representing simulated internal sids
start : start date
delta : timedelta between internal events
filter : filter to remove the sids
"""
def __init__(self, data, **kwargs):
assert isinstance(data.major_axis, pd.tseries.index.DatetimeIndex)
self.data = data
# Unpack config dictionary with default values.
self.sids = kwargs.get('sids', data.items)
self.start = kwargs.get('start', data.major_axis[0])
self.end = kwargs.get('end', data.major_axis[-1])
# Hash_value for downstream sorting.
self.arg_string = hash_args(data, **kwargs)
self._raw_data = None
@property
def mapping(self):
mapping = {
'dt': (lambda x: x, 'dt'),
'sid': (lambda x: x, 'sid'),
'price': (float, 'price'),
'volume': (int, 'volume'),
}
# Add additional fields.
for field_name in self.data.minor_axis:
if field_name in ['price', 'volume', 'dt', 'sid']:
continue
mapping[field_name] = (lambda x: x, field_name)
return mapping
@property
def instance_hash(self):
return self.arg_string
def raw_data_gen(self):
for dt in self.data.major_axis:
df = self.data.major_xs(dt)
for sid, series in df.iterkv():
if sid in self.sids:
event = {
'dt': dt,
'sid': sid,
}
for field_name, value in series.iteritems():
event[field_name] = value
yield event
@property
def raw_data(self):
if not self._raw_data:
self._raw_data = self.raw_data_gen()
return self._raw_data
|
apache-2.0
| 4,802,013,538,821,213,000
| 29.11039
| 74
| 0.574941
| false
| 4.110816
| false
| false
| false
|
CMUSV-VisTrails/WorkflowRecommendation
|
vistrails/gui/modules/query_configuration.py
|
1
|
6088
|
###############################################################################
##
## Copyright (C) 2006-2011, University of Utah.
## All rights reserved.
## Contact: vistrails@sci.utah.edu
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the University of Utah nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from PyQt4 import QtCore, QtGui
from core.utils import any, expression
from core import system
from constant_configuration import StandardConstantWidget, ColorWidget
############################################################################
class QueryWidgetMixin(object):
def __init__(self, contents=None, query_method=None):
self._last_contents = contents
self._last_query_method = query_method
# updateMethod intercepts calls from a child widget like the
# contents_widget
def updateMethod(self):
self.update_parent()
def update_parent(self):
new_contents = self.contents()
new_query_method = self.query_method()
if (new_contents != self._last_contents or
new_query_method != self._last_query_method):
if self.parent() and hasattr(self.parent(), 'updateMethod'):
self.parent().updateMethod()
self._last_contents = new_contents
self._last_query_method = new_query_method
self.emit(QtCore.SIGNAL('contentsChanged'), (self,new_contents))
class BaseQueryWidget(QtGui.QWidget, QueryWidgetMixin):
def __init__(self, contents_klass, query_methods, param, parent=None):
QtGui.QWidget.__init__(self, parent)
QueryWidgetMixin.__init__(self, param.strValue, param.queryMethod)
contents = param.strValue
queryMethod = param.queryMethod
layout = QtGui.QHBoxLayout()
self.op_button = QtGui.QToolButton()
self.op_button.setPopupMode(QtGui.QToolButton.InstantPopup)
self.op_button.setArrowType(QtCore.Qt.NoArrow)
action_group = QtGui.QActionGroup(self.op_button)
actions = []
checked_exists = False
for method in query_methods:
action = QtGui.QAction(method, self)
action.setCheckable(True)
action_group.addAction(action)
if method == queryMethod:
action.setChecked(True)
checked_exists = True
actions.append(action)
if not checked_exists:
actions[0].setChecked(True)
self._last_query_method = str(actions[0].text())
menu = QtGui.QMenu(self.op_button)
menu.addActions(actions)
self.op_button.setMenu(menu)
self.op_button.setText(action_group.checkedAction().text())
self.contents_widget = contents_klass(param)
self.contents_widget.setContents(contents)
layout.setMargin(0)
layout.setSpacing(0)
layout.addWidget(self.op_button)
layout.addWidget(self.contents_widget)
self.setLayout(layout)
self.connect(self.op_button, QtCore.SIGNAL('triggered(QAction*)'),
self.update_action)
def contents(self):
return self.contents_widget.contents()
def setContents(self, strValue, silent=True):
self.contents_widget.setContents(strValue)
if not silent:
self.update_parent()
def update_action(self, action):
self.op_button.setText(action.text())
self.update_parent()
def query_method(self):
for action in self.op_button.menu().actions():
if action.isChecked():
return str(action.text())
class StandardQueryWidget(BaseQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget, ["==", "!="],
param, parent)
class StringQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget,
["*[]*", "==", "=~"],
param, parent)
class NumericQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, StandardConstantWidget,
["==", "<", ">", "<=", ">="],
param, parent)
class ColorQueryWidget(StandardQueryWidget):
def __init__(self, param, parent=None):
BaseQueryWidget.__init__(self, ColorWidget, ["2.3", "5", "10", "50"],
param, parent)
|
bsd-3-clause
| -6,465,648,025,933,796,000
| 40.69863
| 79
| 0.621058
| false
| 4.398844
| false
| false
| false
|
NicoVarg99/daf-recipes
|
ckan/ckan/ckan/ckan/tests/legacy/functional/api/model/test_ratings.py
|
2
|
4089
|
# encoding: utf-8
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
from ckan import model
from ckan.lib.create_test_data import CreateTestData
from ckan.tests.legacy.functional.api.base import BaseModelApiTestCase
from ckan.tests.legacy.functional.api.base import Api1TestCase as Version1TestCase
from ckan.tests.legacy.functional.api.base import Api2TestCase as Version2TestCase
class RatingsTestCase(BaseModelApiTestCase):
@classmethod
def setup_class(cls):
CreateTestData.create()
cls.testsysadmin = model.User.by_name(u'testsysadmin')
cls.comment = u'Comment umlaut: \xfc.'
cls.user_name = u'annafan' # created in CreateTestData
cls.init_extra_environ(cls.user_name)
@classmethod
def teardown_class(cls):
model.repo.rebuild_db()
def test_register_get(self):
raise SkipTest('"Rating register get" functionality is not implemented')
rating1 = model.Rating(user_ip_address='1.2.3.4',
package=self.anna,
rating=4.0)
rating2 = model.Rating(user=model.User.by_name(u'annafan'),
package=self.anna,
rating=2.0)
model.Session.add_all((rating1, rating2))
model.repo.commit_and_remove()
offset = self.rating_offset()
res = self.app.get(offset, status=[200])
def test_entity_get(self):
raise SkipTest('"Rating entity get" functionality is not implemented')
rating = model.Rating(user_ip_address='1.2.3.4',
package=self.anna,
rating=4.0)
model.Session.add(rating)
model.repo.commit_and_remove()
offset = self.rating_offset(self.anna.name)
res = self.app.get(offset, status=[200])
assert_equal(res, rating_opts['rating'])
def test_register_post(self):
# Test Rating Register Post 200.
self.clear_all_tst_ratings()
offset = self.rating_offset()
rating_opts = {'package':u'warandpeace',
'rating':5}
pkg_name = rating_opts['package']
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[201],
extra_environ=self.extra_environ)
model.Session.remove()
pkg = self.get_package_by_name(pkg_name)
assert pkg
assert len(pkg.ratings) == 1
assert pkg.ratings[0].rating == rating_opts['rating'], pkg.ratings
# Get package to see rating
offset = self.package_offset(pkg_name)
res = self.app.get(offset, status=[200])
assert pkg_name in res, res
assert '"ratings_average": %s.0' % rating_opts['rating'] in res, res
assert '"ratings_count": 1' in res, res
model.Session.remove()
# Rerate package
offset = self.rating_offset()
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[201],
extra_environ=self.extra_environ)
model.Session.remove()
pkg = self.get_package_by_name(pkg_name)
assert pkg
assert len(pkg.ratings) == 1
assert pkg.ratings[0].rating == rating_opts['rating'], pkg.ratings
def test_entity_post_invalid(self):
self.clear_all_tst_ratings()
offset = self.rating_offset()
rating_opts = {'package':u'warandpeace',
'rating':0}
postparams = '%s=1' % self.dumps(rating_opts)
res = self.app.post(offset, params=postparams, status=[409],
extra_environ=self.extra_environ)
self.assert_json_response(res, 'rating')
model.Session.remove()
pkg = self.get_package_by_name(rating_opts['package'])
assert pkg
assert len(pkg.ratings) == 0
class TestRatingsVersion1(Version1TestCase, RatingsTestCase): pass
class TestRatingsVersion2(Version2TestCase, RatingsTestCase): pass
|
gpl-3.0
| 3,643,774,490,865,021,400
| 38.317308
| 83
| 0.60944
| false
| 3.73766
| true
| false
| false
|
snufiks/nmap2db
|
nmap2db/database.py
|
1
|
27070
|
#!/usr/bin/env python
#
# Copyright (c) 2014 Rafael Martinez Guerrero (PostgreSQL-es)
# rafael@postgresql.org.es / http://www.postgresql.org.es/
#
# This file is part of Nmap2db
# https://github.com/rafaelma/nmap2db
#
# Nmap2db is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Nmap2db is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Nmap2db. If not, see <http://www.gnu.org/licenses/>.
import sys
import psycopg2
import psycopg2.extensions
from psycopg2.extras import wait_select
from nmap2db.prettytable import *
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
#
# Class: pg_database
#
# This class is used to interact with a postgreSQL database
# It is used to open and close connections to the database
# and to set/get some information for/of the connection.
#
class nmap2db_db():
"""This class is used to interact with a postgreSQL database"""
# ############################################
# Constructor
# ############################################
def __init__(self, dsn,logs,application):
""" The Constructor."""
self.dsn = dsn
self.logs = logs
self.application = application
self.conn = None
self.server_version = None
self.cur = None
self.output_format = 'table'
# ############################################
# Method pg_connect()
#
# A generic function to connect to PostgreSQL using Psycopg2
# We will define the application_name parameter if it is not
# defined in the DSN and the postgreSQL server version >= 9.0
# ############################################
def pg_connect(self):
"""A generic function to connect to PostgreSQL using Psycopg2"""
try:
self.conn = psycopg2.connect(self.dsn)
if self.conn:
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
wait_select(self.conn)
self.cur = self.conn.cursor()
self.server_version = self.conn.server_version
if (self.server_version >= 90000 and 'application_name=' not in self.dsn):
try:
self.cur.execute('SET application_name TO %s',(self.application,))
self.conn.commit()
except psycopg2.Error as e:
self.logs.logger.error('Could not define the application_name parameter: - %s', e)
except psycopg2.Error as e:
raise e
# ############################################
# Method pg_close()
# ############################################
def pg_close(self):
"""A generic function to close a postgreSQL connection using Psycopg2"""
if self.cur:
try:
self.cur.close()
except psycopg2.Error as e:
print "\n* ERROR - Could not close the cursor used in this connection: \n%s" % e
if self.conn:
try:
self.conn.close()
except psycopg2.Error as e:
print "\n* ERROR - Could not close the connection to the database: \n%s" % e
# ############################################
# Method
# ############################################
def show_network_definitions(self):
"""A function to get a list with the networks defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_network_definitions')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["Network","Remarks"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_scan_definitions(self):
"""A function to get a list with the scans defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_scan_definitions')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Remarks","Arguments"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_scan_jobs(self,network_cidr):
"""A function to get a list with the scans jobs defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
if network_cidr == 'ALL':
self.cur.execute('SELECT * FROM show_scan_jobs')
self.conn.commit()
else:
self.cur.execute('SELECT * FROM show_scan_jobs WHERE "Network" = %s',(network_cidr,))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Remarks","Arguments"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_reports(self,host,from_timestamp,to_timestamp):
"""A function to get a list or scan reports for a host"""
try:
self.pg_connect()
if self.cur:
try:
if (host.replace('.','')).replace('/','').isdigit():
self.cur.execute('SELECT * FROM show_host_reports WHERE "IPaddress" = %s AND "Registered" >= %s AND "Registered" <= %s',(host,from_timestamp, to_timestamp))
else:
self.cur.execute('SELECT * FROM show_host_reports WHERE "Hostname" @> %s AND "Registered" >= %s AND "Registered" <= %s',([host],from_timestamp, to_timestamp))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["ScanID","Finished","Duration","IPaddress","Hostname","State"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_details(self,report_id):
"""A function to get host details for a reportID"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT * FROM show_host_details WHERE "ReportID" = %s',(report_id,))
self.conn.commit()
x = PrettyTable([".",".."],header = False)
x.align["."] = "r"
x.align[".."] = "l"
x.padding_width = 1
for record in self.cur:
x.add_row(["ReportID:",record[0]])
x.add_row(["Registered:",str(record[1])])
x.add_row(["ScanID:",record[2]])
x.add_row(["",""])
x.add_row(["Network:",record[3]])
x.add_row(["Network info:",record[4]])
x.add_row(["",""])
x.add_row(["IPaddress:",record[5]])
x.add_row(["Addrtype:",record[6]])
x.add_row(["Hostname:",record[7]])
x.add_row(["Hostname type:",record[8]])
x.add_row(["",""])
x.add_row(["OStype:",record[9]])
x.add_row(["OSvendor:",record[10]])
x.add_row(["OSfamily:",record[11]])
x.add_row(["OSgen:",record[12]])
x.add_row(["OSname:",record[13]])
x.add_row(["",""])
x.add_row(["State:",record[14]])
x.add_row(["State reason:",record[15]])
print x
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_services_details(self,report_id):
"""A function to get a list of services found in a scan report"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "Prot","Port","State","Reason","Service","Method","Product","Prod.ver","Prod.info" FROM show_services_details WHERE report_id = %s',(report_id,))
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["Port","State","Reason","Service","Method","Product","Prod.ver","Prod.info"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_ports(self,network_list,port_list,service_list,from_timestamp,to_timestamp):
"""A function to get a list of ports"""
try:
self.pg_connect()
if self.cur:
try:
if network_list != None:
network_sql = 'AND (FALSE '
for network in network_list:
network_sql = network_sql + 'OR "IPaddress" <<= \'' + network + '\' '
network_sql = network_sql + ') '
else:
network_sql = ''
if port_list != None:
port_sql = 'AND "Port" IN (' + ','.join(port_list) + ') '
else:
port_sql = ''
if service_list != None:
service_sql = 'AND (FALSE '
for service in service_list:
service_sql = service_sql + 'OR "Service" LIKE \'' + service + '\' '
service_sql = service_sql + ') '
else:
service_sql = ''
self.cur.execute('WITH port_list AS(' +
'SELECT DISTINCT ON ("Port","Prot","IPaddress") ' +
'"IPaddress",' +
'"Port",' +
'"Prot",' +
'"State",' +
'"Service",' +
'"Product",' +
'"Prod.ver",' +
'"Prod.info" ' +
'FROM show_ports ' +
'WHERE registered >= \'' + str(from_timestamp) + '\' AND registered <= \'' + str(to_timestamp) + '\' ' +
network_sql +
port_sql +
service_sql + ')' +
'SELECT DISTINCT ON ("Port","Prot","IPaddress") ' +
'a."IPaddress",' +
'array_to_string(b.hostname,\' \') AS "Hostname",' +
'a."Port",' +
'a."Prot",' +
'a."State",' +
'a."Service",' +
'a."Product",' +
'a."Prod.ver",' +
'a."Prod.info" ' +
'FROM port_list a ' +
'JOIN host_info b ON a."IPaddress" = b.hostaddr ' +
'WHERE b.registered >= \'' + str(from_timestamp) + '\' AND b.registered <= \'' + str(to_timestamp) + '\' '
)
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","Hostname","Port","Prot","State","Service","Product","Prod.ver","Prod.info"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_os(self,network_list,os_list,from_timestamp,to_timestamp):
"""A function to get a list og hostnames running an OS"""
try:
self.pg_connect()
if self.cur:
try:
if network_list != None:
network_sql = 'AND (FALSE '
for network in network_list:
network_sql = network_sql + 'OR "Network" <<= \'' + network + '\' '
network_sql = network_sql + ') '
else:
network_sql = ''
if os_list != None:
os_sql = 'AND (FALSE '
for osname in os_list:
os_sql = os_sql + 'OR "OSname" LIKE \'' + osname + '\' '
os_sql = os_sql + ') '
else:
os_sql = ''
fullstmt = 'SELECT DISTINCT ON ("IPaddress") "Registered", "IPaddress", "Hostname",' + '"OSname" ' + 'FROM show_host_details ' + 'WHERE "Registered" >= \'' + str(from_timestamp) + '\' AND "Registered" <= \'' + str(to_timestamp) + '\' ' + network_sql + os_sql + 'ORDER BY "IPaddress"'
self.logs.logger.info(fullstmt)
self.cur.execute(fullstmt)
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","Hostname","OSname"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def show_host_without_hostname(self):
"""A function to get a list of host without a hostname"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "IPaddress","State","Last registration" FROM show_host_without_hostname')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IPaddress","State","Last registration"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def register_network(self,network_cidr,remarks):
"""A method to register a network_cidr"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT register_network(%s,%s)',(network_cidr,remarks))
self.conn.commit()
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def register_scan_job(self,network_cidr,scan_id,execution_interval,is_active):
"""A method to register a scan job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT register_scan_job(%s,%s,%s,%s)',(network_cidr,scan_id,execution_interval,is_active))
self.conn.commit()
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_next_scan_job(self):
"""A method to get the next scan job to run"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_next_scan_job()')
self.conn.commit()
scan_job_id = self.cur.fetchone()[0]
return scan_job_id
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_scan_job_args(self,scan_job_id):
"""A method to get the arguments for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_args(%s)',(scan_job_id,))
self.conn.commit()
scan_job_args = self.cur.fetchone()[0]
return scan_job_args
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_command
# ############################################
def get_scan_job_command(self,scan_job_id):
"""A method to get the scan executable for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_command(%s)',(scan_job_id,))
self.conn.commit()
scan_job_command = self.cur.fetchone()[0]
return scan_job_command
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_parsemethod
# ############################################
def get_scan_job_parsemethod(self,scan_job_id):
"""A method to get the parse method for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_parsemethod(%s)',(scan_job_id,))
self.conn.commit()
scan_job_parsemethod = self.cur.fetchone()[0]
return scan_job_parsemethod
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def get_scan_job_network(self,scan_job_id):
"""A method to get the network for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT get_scan_job_network_addr(%s)',(scan_job_id,))
self.conn.commit()
scan_job_network = self.cur.fetchone()[0]
return scan_job_network
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method get_scan_job_scope
# ############################################
def get_scan_job_scope(self,scan_job_id):
"""A method to get the scan scope for a scan_job"""
try:
self.pg_connect()
if self.cur:
try:
query = "SELECT scan_scope FROM scan_job WHERE id=%s"
self.logs.logger.info("SELECT scan_scope FROM scan_job WHERE id=%d",scan_job_id)
self.cur.execute(query, [scan_job_id])
self.conn.commit()
scan_job_scope = self.cur.fetchone()[0]
return scan_job_scope
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def save_scan_report(self,scan_job_id, report, scan_type):
"""A method to save a scan report"""
# self.logs.logger.info("save_scan_report PRE _%s_", scan_type)
sqlstmt = False
if scan_type == 'nmap_default':
# self.logs.logger.info("save_scan_report (nmap_default) _%s_", scan_type)
sqlstmt = 'SELECT save_scan_report_xml(%s,%s)'
elif scan_type == 'testssl':
# self.logs.logger.info("save_scan_report (testssl) _%s_", scan_type)
sqlstmt = 'SELECT save_ssl_report_json(%s,%s)'
else:
self.logs.logger.info("save_scan_report ELSE _%s_", scan_type)
# self.logs.logger.info("save_scan_report (report) _%s_", report)
# self.logs.logger.info("save_scan_report (scan_job_id) _%s_", scan_job_id)
try:
self.pg_connect()
if self.cur and sqlstmt:
try:
# self.logs.logger.info("save_scan_report (sqlstmt) _%s_", sqlstmt)
self.cur.execute(sqlstmt, (scan_job_id, report))
self.conn.commit()
return True
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def expand_network(self,scan_job_network):
"""A method to get all IPs in a network"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT expand_network(%s)',(scan_job_network,))
self.conn.commit()
return self.cur
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
# ############################################
# Method
# ############################################
def print_results(self,cur,colnames,left_columns):
'''A function to print a table with sql results'''
if self.output_format == 'table':
x = PrettyTable(colnames)
x.padding_width = 1
for column in left_columns:
x.align[column] = "l"
for records in cur:
columns = []
for index in range(len(colnames)):
columns.append(records[index])
x.add_row(columns)
print x.get_string()
print
elif self.output_format == 'csv':
for records in cur:
columns = []
for index in range(len(colnames)):
columns.append(str(records[index]))
print ','.join(columns)
# ############################################
# Method
# ############################################
def show_hosts(self):
"""A function to get a list with the scans defined in the system"""
try:
self.pg_connect()
if self.cur:
try:
self.cur.execute('SELECT "IP-address","Hostname","Last registration", "First registration" FROM show_hosts')
self.conn.commit()
colnames = [desc[0] for desc in self.cur.description]
self.print_results(self.cur,colnames,["IP-address","Hostname","Last registration", "First registration"])
except psycopg2.Error as e:
raise e
self.pg_close()
except psycopg2.Error as e:
raise e
|
gpl-3.0
| -4,005,357,262,043,700,700
| 32.669154
| 305
| 0.412893
| false
| 4.848648
| false
| false
| false
|
TunnelBlanket/Spirit
|
Spirit/Data/User.py
|
1
|
1753
|
# coding: utf-8
from sqlalchemy import Column, Integer, String, Boolean, Text, text
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
metadata = Base.metadata
class User(Base):
__tablename__ = 'users'
Id = Column(Integer, primary_key=True)
Username = Column(String(12, u'utf8mb4_unicode_ci'), nullable=False, unique=True)
Password = Column(String(128, u'utf8mb4_unicode_ci'), nullable=False)
Swid = Column(String(39, u'utf8mb4_unicode_ci'), nullable=False)
LoginKey = Column(String(32, u'utf8mb4_unicode_ci'))
ConfirmationHash = Column(String(128, u'utf8mb4_unicode_ci'))
Avatar = Column(Integer, nullable=False)
AvatarAttributes = Column(String(98, u'utf8mb4_unicode_ci'), nullable=False,server_default=text(
"""'{"spriteScale":100,"spriteSpeed":100,"ignoresBlockLayer":false,"invisible":false,"floating":false}'"""))
Coins = Column(Integer, nullable=False, server_default=text("'10000'"))
Moderator = Column(Boolean, nullable=False, default=False)
Inventory = Column(Text(collation=u'utf8mb4_unicode_ci'), nullable=False)
Color = Column(Integer, nullable=False, server_default=text("'1'"))
Head = Column(Integer, nullable=False, server_default=text("'0'"))
Face = Column(Integer, nullable=False, server_default=text("'0'"))
Neck = Column(Integer, nullable=False, server_default=text("'0'"))
Body = Column(Integer, nullable=False, server_default=text("'0'"))
Hands = Column(Integer, nullable=False, server_default=text("'0'"))
Feet = Column(Integer, nullable=False, server_default=text("'0'"))
Photo = Column(Integer, nullable=False, server_default=text("'0'"))
Pin = Column(Integer, nullable=False, server_default=text("'0'"))
|
gpl-3.0
| 4,836,467,860,719,150,000
| 55.548387
| 113
| 0.705077
| false
| 3.38417
| false
| false
| false
|
qguv/config
|
weechat/plugins/python/imap_status.py
|
1
|
5857
|
# -*- coding: utf-8 -*-
# Copyright (c) 2009-2015 by xt <xt@bash.no>
# (this script requires WeeChat 0.4.2 or newer)
#
# History:
# 2019-01-26, nils_2@freenode
# version 0.9: make script python3 compatible
# : remove option "message_color" and "separator_color"
# 2016-05-07, Sebastien Helleu <flashcode@flashtux.org>:
# version 0.8: add options "mailbox_color", "separator", "separator_color",
# remove extra colon in bar item content, use hook_process
# to prevent any freeze in WeeChat >= 1.5
# 2015-01-09, nils_2
# version 0.7: use eval_expression()
# 2010-07-12, TenOfTen
# version 0.6: beautify notification area
# 2010-03-17, xt
# version 0.5: fix caching of return message
# 2010-01-19, xt
# version 0.4: only run check when timer expired
# 2009-11-03, xt
# version 0.3: multiple mailbox support
# 2009-11-02, xt
# version 0.2: remove the imap "client" buffer, just do the unread count
# 2009-06-18, xt <xt@bash.no>
# version 0.1: initial release.
#
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
'''
Usage: put [imap] in your status bar items. (Or any other bar to your liking)
"/set weechat.bar.status.items".
'''
import imaplib as i
import re
import weechat as w
SCRIPT_NAME = "imap_status"
SCRIPT_AUTHOR = "xt <xt@bash.no>"
SCRIPT_VERSION = "0.9"
SCRIPT_LICENSE = "GPL3"
SCRIPT_DESC = "Bar item with unread imap messages count"
WEECHAT_VERSION = 0
IMAP_UNREAD = ''
# script options
settings = {
'username': '',
'password': '',
'hostname': '', # gmail uses imap.gmail.com
'port': '993',
'mailboxes': 'INBOX', # comma separated list of mailboxes (gmail: "Inbox")
'message': '${color:default}Mail: ',
'mailbox_color': 'default',
'separator': '${color:default}, ',
'count_color': 'default',
'interval': '5',
}
def string_eval_expression(text):
return w.string_eval_expression(text, {}, {}, {})
class Imap(object):
"""Simple helper class for interfacing with IMAP server."""
iRe = re.compile(br"UNSEEN (\d+)")
conn = False
def __init__(self):
'''Connect and login.'''
username = string_eval_expression(w.config_get_plugin('username'))
password = string_eval_expression(w.config_get_plugin('password'))
hostname = string_eval_expression(w.config_get_plugin('hostname'))
port = int(w.config_get_plugin('port'))
if username and password and hostname and port:
M = i.IMAP4_SSL(hostname, port)
M.login(username, password)
self.conn = M
def unreadCount(self, mailbox='INBOX'):
if self.conn:
unreadCount = int(
self.iRe.search(
self.conn.status(mailbox, "(UNSEEN)")[1][0]).group(1))
return unreadCount
else:
w.prnt('', 'Problem with IMAP connection. Please check settings.')
return 0
def logout(self):
if not self.conn:
return
try:
self.conn.close()
except Exception:
self.conn.logout()
def imap_get_unread(data):
"""Return the unread count."""
imap = Imap()
if not w.config_get_plugin('message'):
output = ""
else:
output = '%s' % (
string_eval_expression(w.config_get_plugin('message')))
any_with_unread = False
mailboxes = w.config_get_plugin('mailboxes').split(',')
count = []
for mailbox in mailboxes:
mailbox = mailbox.strip()
unreadCount = imap.unreadCount(mailbox)
if unreadCount > 0:
any_with_unread = True
count.append('%s%s: %s%s' % (
w.color(w.config_get_plugin('mailbox_color')),
mailbox,
w.color(w.config_get_plugin('count_color')),
unreadCount))
imap.logout()
sep = '%s' % (
string_eval_expression(w.config_get_plugin('separator')))
output = output + sep.join(count) + w.color('reset')
return output if any_with_unread else ''
def imap_item_cb(data, item, window):
return IMAP_UNREAD
def imap_update_content(content):
global IMAP_UNREAD
if content != IMAP_UNREAD:
IMAP_UNREAD = content
w.bar_item_update('imap')
def imap_process_cb(data, command, rc, out, err):
if rc == 0:
imap_update_content(out)
return w.WEECHAT_RC_OK
def imap_timer_cb(data, remaining_calls):
"""Timer callback to update imap bar item."""
if WEECHAT_VERSION >= 0x01050000:
w.hook_process('func:imap_get_unread', 30 * 1000,
'imap_process_cb', '')
else:
imap_update_content(imap_get_unread(None)) # this can block WeeChat!
return w.WEECHAT_RC_OK
if w.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION, SCRIPT_LICENSE,
SCRIPT_DESC, '', ''):
for option, default_value in settings.items():
if not w.config_is_set_plugin(option):
w.config_set_plugin(option, default_value)
WEECHAT_VERSION = int(w.info_get("version_number", "") or 0)
w.bar_item_new('imap', 'imap_item_cb', '')
imap_timer_cb(None, None)
w.hook_timer(
int(w.config_get_plugin('interval'))*1000*60,
0,
0,
'imap_timer_cb',
'')
|
gpl-3.0
| 1,555,020,943,178,162,000
| 30.320856
| 79
| 0.616869
| false
| 3.435191
| true
| false
| false
|
google/deepvariant
|
third_party/nucleus/util/sequence_utils.py
|
1
|
4197
|
# Copyright 2018 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Utility functions for manipulating DNA sequences."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Error(Exception):
"""Base error class."""
def _add_lowercase(d):
"""Returns a dictionary with the lowercase keys and values entered."""
retval = d.copy()
retval.update({k.lower(): v.lower() for k, v in d.items()})
return retval
STRICT_DNA_COMPLEMENT_UPPER = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C'}
DNA_COMPLEMENT_UPPER = {'A': 'T', 'T': 'A', 'C': 'G', 'G': 'C', 'N': 'N'}
IUPAC_DNA_COMPLEMENT_UPPER = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C',
'R': 'Y', # R is A/G
'Y': 'R', # Y is C/T
'S': 'S', # S is C/G
'W': 'W', # W is A/T
'K': 'M', # K is G/T
'M': 'K', # M is A/C
'B': 'V', # B is C/G/T
'V': 'B', # V is A/C/G
'D': 'H', # D is A/G/T
'H': 'D', # H is A/C/T
'N': 'N', # N is any base
}
IUPAC_TO_CANONICAL_BASES_UPPER = {
'A': ['A'],
'T': ['T'],
'C': ['C'],
'G': ['G'],
'R': ['A', 'G'],
'Y': ['C', 'T'],
'S': ['C', 'G'],
'W': ['A', 'T'],
'K': ['G', 'T'],
'M': ['A', 'C'],
'B': ['C', 'G', 'T'],
'V': ['A', 'C', 'G'],
'D': ['A', 'G', 'T'],
'H': ['A', 'C', 'T'],
'N': ['A', 'C', 'G', 'T'],
}
STRICT_DNA_COMPLEMENT = _add_lowercase(STRICT_DNA_COMPLEMENT_UPPER)
DNA_COMPLEMENT = _add_lowercase(DNA_COMPLEMENT_UPPER)
IUPAC_DNA_COMPLEMENT = _add_lowercase(IUPAC_DNA_COMPLEMENT_UPPER)
STRICT_DNA_BASES_UPPER = frozenset(['A', 'C', 'G', 'T'])
STRICT_DNA_BASES = frozenset(['a', 'c', 'g', 't', 'A', 'C', 'G', 'T'])
DNA_BASES_UPPER = frozenset(['A', 'C', 'G', 'T', 'N'])
DNA_BASES = frozenset(['a', 'c', 'g', 't', 'n', 'A', 'C', 'G', 'T', 'N'])
def reverse_complement(sequence, complement_dict=None):
"""Returns the reverse complement of a DNA sequence.
By default this will successfully reverse complement sequences comprised
solely of A, C, G, and T letters. Other complement dictionaries can be
passed in for more permissive matching.
Args:
sequence: str. The input sequence to reverse complement.
complement_dict: dict[str, str]. The lookup dictionary holding the
complement base pairs.
Returns:
The reverse complement DNA sequence.
Raises:
Error: The sequence contains letters not present in complement_dict.
"""
if complement_dict is None:
complement_dict = STRICT_DNA_COMPLEMENT_UPPER
try:
return ''.join(complement_dict[nt] for nt in reversed(sequence))
except KeyError:
raise Error('Unknown base in {}, cannot reverse complement using {}'.format(
sequence, str(complement_dict)))
|
bsd-3-clause
| -3,947,212,933,746,330,000
| 33.68595
| 80
| 0.635692
| false
| 3.235929
| false
| false
| false
|
faircloth-lab/sh_t
|
sh_t/core.py
|
1
|
3611
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
(c) 2014 Brant Faircloth || http://faircloth-lab.org/
All rights reserved.
This code is distributed under a 3-clause BSD license. Please see
LICENSE.txt for more information.
Created on 21 April 2014 20:54 PDT (-0700)
"""
import os
import sys
import glob
import shutil
import argparse
import subprocess
from Bio import AlignIO
import pdb
class FullPaths(argparse.Action):
"""Expand user- and relative-paths"""
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, os.path.abspath(os.path.expanduser(values)))
class CreateDir(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
# get the full path
d = os.path.abspath(os.path.expanduser(values))
# check to see if directory exists
if os.path.exists(d):
answer = raw_input("[WARNING] Output directory exists, REMOVE [Y/n]? ")
if answer == "Y":
shutil.rmtree(d)
else:
print "[QUIT]"
sys.exit()
# create the new directory
os.makedirs(d)
# return the full path
setattr(namespace, self.dest, d)
class GroupError(Exception):
def __init__(self, message, group, alignment):
# Call the base class constructor with the parameters it needs
Exception.__init__(self, message)
# Now for your custom code...
self.group = group
self.alignment = alignment
def is_dir(dirname):
if not os.path.isdir(dirname):
msg = "{0} is not a directory".format(dirname)
raise argparse.ArgumentTypeError(msg)
else:
return dirname
def is_file(filename):
if not os.path.isfile:
msg = "{0} is not a file".format(filename)
raise argparse.ArgumentTypeError(msg)
else:
return filename
def which(prog):
cmd = ["which", prog]
proc = subprocess.Popen(
cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE
)
stdout, stderr = proc.communicate()
if stderr:
raise EnvironmentError("Program {} does not appear to be installed")
else:
return stdout.strip()
def get_alignments(alignment_dir):
alignments = []
for ftype in ('.phylip', '.phy'):
alignments.extend(glob.glob(os.path.join(alignment_dir, "*{}".format(ftype))))
return alignments
def satisfy_one_taxon_group(taxa_in_align, taxon_group):
try:
isinstance(taxon_group, list)
except:
raise AssertionError("Taxon group is not a list.")
group_set = set(taxon_group)
# ensure there is at least one member in each group
if len(taxa_in_align.intersection(group_set)) >= 1:
return True
else:
return False
def get_taxa_in_alignment(alignment):
aln = AlignIO.read(alignment, "phylip-relaxed")
taxa_in_align = set([taxon.id for taxon in aln])
return taxa_in_align
def satisfy_all_taxon_groups(alignment, taxon_groups):
"""given an input alignment, see if any taxa in list are in file"""
taxa_in_align = get_taxa_in_alignment(alignment)
taxa_present = []
for group_name, taxon_group in taxon_groups.iteritems():
if satisfy_one_taxon_group(taxa_in_align, taxon_group):
taxa_present.append(True)
else:
taxa_present.append(False)
if all(taxa_present):
return True
else:
raise GroupError(
"Not all taxa present in Group",
group_name,
os.path.basename(alignment),
)
|
bsd-3-clause
| -438,796,830,149,700,200
| 26.356061
| 86
| 0.626696
| false
| 3.73423
| false
| false
| false
|
ciudadanointeligente/votainteligente-portal-electoral
|
proposal_subscriptions/migrations/0001_initial.py
|
1
|
1395
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.7 on 2017-06-27 21:20
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import picklefield.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='SearchSubscription',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('keyword_args', picklefield.fields.PickledObjectField(editable=False)),
('search_params', picklefield.fields.PickledObjectField(editable=False)),
('filter_class_module', models.CharField(max_length=254)),
('filter_class_name', models.CharField(max_length=254)),
('oftenity', models.DurationField()),
('created', models.DateTimeField(auto_now_add=True, null=True)),
('updated', models.DateTimeField(auto_now=True, null=True)),
('last_run', models.DateTimeField(blank=True, null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
|
gpl-3.0
| -2,690,039,399,188,640,000
| 38.857143
| 118
| 0.62724
| false
| 4.201807
| false
| false
| false
|
JoshData/django-annotator-store
|
annotator/views.py
|
1
|
4909
|
from django.core.exceptions import PermissionDenied, ObjectDoesNotExist
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseServerError, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseForbidden
from django.views.generic import View
from django.views.generic.base import TemplateView
from django.shortcuts import get_object_or_404
from django.contrib.auth.decorators import login_required
from django.contrib.auth.decorators import permission_required
from django.conf import settings
import json, re
from annotator.models import Document, Annotation
class BaseStorageView(View):
def dispatch(self, request, *args, **kwargs):
# All PUT/POST requests must contain a JSON body. We decode that here and
# interpolate the value into the view argument list.
if request.method in ('PUT', 'POST'):
if not re.match("application/json(; charset=UTF-8)?", request.META['CONTENT_TYPE'], re.I):
return HttpResponseBadRequest("Request must have application/json content type.")
try:
body = json.loads(request.body.decode("utf8"))
except:
return HttpResponseBadRequest("Request body is not JSON.")
if not isinstance(body, dict):
return HttpResponseBadRequest("Request body is not a JSON object.")
# Interpolate the parsed JSON body into the arg list.
args = [body] + list(args)
# All requets return JSON on success, or some other HttpResponse.
try:
ret = super(BaseStorageView, self).dispatch(request, *args, **kwargs)
if isinstance(ret, HttpResponse):
return ret
# DELETE requests, when successful, return a 204 NO CONTENT.
if request.method == 'DELETE':
return HttpResponse(status=204)
ret = json.dumps(ret)
resp = HttpResponse(ret, mimetype="application/json")
resp["Content-Length"] = len(ret)
return resp
except ValueError as e:
return HttpResponseBadRequest(str(e))
except PermissionDenied as e:
return HttpResponseForbidden(str(e))
except ObjectDoesNotExist as e:
return HttpResponseNotFound(str(e))
except Exception as e:
if settings.DEBUG: raise # when debugging, don't trap
return HttpResponseServerError(str(e))
return ret
class Root(BaseStorageView):
http_method_names = ['get']
def get(self, request):
return {
"name": "Django Annotator Store",
"version": "0.0.1",
}
class Index(BaseStorageView):
http_method_names = ['get', 'post']
def get(self, request):
# index. Returns ALL annotation objects. Seems kind of not scalable.
return Annotation.as_list()
def post(self, request, client_data):
# create. Creates an annotation object and returns a 303.
obj = Annotation()
obj.owner = request.user if request.user.is_authenticated() else None
try:
obj.document = Document.objects.get(id=client_data.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in annotation data.")
obj.set_guid()
obj.data = "{ }"
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
class Annot(BaseStorageView):
http_method_names = ['get', 'put', 'delete']
def get(self, request, guid):
# read. Returns the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
return obj.as_json(request.user)
def put(self, request, client_data, guid):
# update. Updates the annotation.
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to modify someone else's annotation.")
obj.update_from_json(client_data)
obj.save()
return obj.as_json(request.user) # Spec wants redirect but warns of browser bugs, so return the object.
def delete(self, request, guid):
obj = Annotation.objects.get(guid=guid) # exception caught by base view
if not obj.can_edit(request.user):
raise PermissionDenied("You do not have permission to delete someone else's annotation.")
obj.delete()
return None # response handled by the base view
class Search(BaseStorageView):
http_method_names = ['get']
def get(self, request):
try:
document = Document.objects.get(id=request.GET.get("document"))
except:
raise ValueError("Invalid or missing 'document' value passed in the query string.")
qs = Annotation.objects.filter(document=document)
return {
"total": qs.count(),
"rows": Annotation.as_list(qs=qs, user=request.user)
}
class EditorView(TemplateView):
template_name = 'annotator/editor.html'
def get_context_data(self, **kwargs):
context = super(EditorView, self).get_context_data(**kwargs)
context['storage_api_base_url'] = reverse('annotator.root')[0:-1] # chop off trailing slash
context['document'] = get_object_or_404(Document, id=kwargs['doc_id'])
return context
|
unlicense
| -49,917,196,215,574,370
| 34.316547
| 130
| 0.727032
| false
| 3.622878
| false
| false
| false
|
bbxyard/bbxyard
|
yard/skills/36-spider/spider-so/stackoverflow/spiders/stackoverflow_spider.py
|
1
|
2417
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
import scrapy
from stackoverflow.spiders.items import StackoverflowItem
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger('monitor')
logger.setLevel(logging.INFO)
fh = logging.FileHandler('monitor.log')
fh.setLevel(logging.INFO)
fh.setFormatter(formatter)
logger.addHandler(fh)
class StackoverflowSpider(scrapy.Spider):
name = "stackoverflow"
def __init__(self):
self.count = 1
def start_requests(self):
_url = 'https://stackoverflow.com/questions?page={page}&sort=votes&pagesize=50'
urls = [_url.format(page=page) for page in range(1, 100001)]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
nodeList = response.xpath('//*[@id="questions"]/*[@class="question-summary"]')
for sel in nodeList:
self.count += 1
if self.count % 100 == 0:
logger.info(self.count)
item = StackoverflowItem()
item['votes'] = sel.xpath('./*/div[@class="stats"]/div[@class="vote"]/div[@class="votes"]/span/strong/text()').extract()[0]
item['answers'] = sel.xpath('./*/div[@class="stats"]/div[2]/strong/text()').extract()[0]
item['views'] = sel.xpath('./*/div[@class="views supernova"]/@title').extract()[0].split()[0].replace(',','')
item['questions'] = sel.xpath('./div[@class="summary"]/*/a[@class="question-hyperlink"]/text()').extract()[0]
item['links'] = sel.xpath('./div[@class="summary"]/*/a[@class="question-hyperlink"]/@href').extract()[0]
item['tags'] = sel.xpath('./div[@class="summary"]/div[2]/a/text()').extract()
yield item
# item[''] = sel.xpath('//div[@class="votes"]/span/strong/text()').extract()
# for index in range(1, 51):
# sel = response.xpath('//*[@id="questions"]/div[{index}]'.format(index=index))
# item = StackoverflowItem()
# item['votes'] = sel.xpath(
# 'div[1]/div[2]/div[1]/div[1]/span/strong/text()').extract()
# item['answers'] = sel.xpath(
# 'div[1]/div[2]/div[2]/strong/text()').extract()
# item['links'] = "".join(
# sel.xpath('div[2]/h3/a/@href').extract()).split("/")[2]
|
apache-2.0
| 7,355,543,177,940,404,000
| 36.184615
| 135
| 0.570542
| false
| 3.477698
| false
| false
| false
|
iJebus/CITS4406-Assignment2
|
data.py
|
1
|
8727
|
"""Reads CSV file for information, provides basic cleaning of data and then
runs analysis on said data."""
import csv
import re
from collections import Counter
from statistics import mean, mode, median_low, median, median_high, \
StatisticsError, Decimal
# Config
threshold = 0.9
invalid_values = ['-', '*', '_']
re_float = re.compile('^\d*?\.\d+$')
re_int = re.compile('^[1-9]\d*$')
class Analyser(object):
"""Base analysis class object. Initiate the object, and assigns the
statistical mode, if any.
Class variables:
mode -- Returns the mode of the column analysed.
Child Classes and associated variables:
StringAnalyser -- String column analysis.
EnumAnalyser -- Enumerated column analysis.
NumericalAnalyser - String/Float column analysis.
min -- Minimum value in column values.
max -- Maximum value in column values.
mean -- Mean value in column values.
median_low -- Low median for column values.
median -- Median value for column values.
median_high -- High median for column values.
"""
def __init__(self, values):
try:
self.mode = mode(values)
except StatisticsError:
self.mode = 'N/A'
class StringAnalyser(Analyser):
"""Run string analysis."""
def __init__(self, values):
super().__init__(values)
# TODO Implement some string exclusive statistics.
class EnumAnalyser(Analyser):
"""Run enumeration analysis."""
def __init__(self, values):
super().__init__(values)
# TODO Implement some enum exclusive statistics.
class NumericalAnalyser(Analyser):
"""Runs numeric analysis."""
def __init__(self, values):
values = [eval(i) for i in values]
super().__init__(values)
self.min = min(values)
self.max = max(values)
self.mean = Decimal(mean(values)).quantize(Decimal('.00000'))
self.median_low = median_low(values)
self.median = median(values)
self.median_high = median_high(values)
class Column(object):
"""Object to hold data from each column within the provided CSV file.
Methods:
change_misc_values -- Removes misc/unclear values from column
values.
drop_greater_than -- Removes '<', '>' from column values.
define_most_common -- Sets object variable to hold 15 most common values
for that column.
define_type -- Sets object variable to type (e.g., String) according
to column values.
Variables:
most_common -- <= 15 most common results within the column values.
empty -- Boolean value of whether the column holds values or not.
header -- Column header/title.
type -- The type of data in column, e.g., String, Float, Integer,
Enumerated.
values -- List of CSV values for the column.
analysis -- Analysis object associated with this column.
outliers -- List of values in column but outside threshold of column type.
"""
def __init__(self, header=''):
self.most_common = []
self.empty = False
self.header = header
self.type = ''
self.values = []
self.analysis = None
self.outliers = []
# Todo: Does initialising as None even make sense?
def change_misc_values(self):
"""
Replaces identified values of unclear meaning or inexact value, i.e.,
'-', with an agreed value.
"""
for index, value in enumerate(self.values):
if value in invalid_values:
self.values[index] = ''
def drop_greater_than(self):
pass
# Todo: Implement method to handle (strip?) '<', '>'.
def define_most_common(self):
"""Set 15 most common results to class variable, and set object variable
empty if appropriate.
"""
self.most_common = Counter(self.values).most_common(15)
if self.most_common[0][0] == '' \
and self.most_common[0][1] / len(self.values) >= threshold:
self.empty = True
def define_type(self):
"""Run column data against regex filters and assign object variable type
as appropriate.
"""
float_count = 0
int_count = 0
boolean = ['true', 'false']
# Todo: Define date type.
for value in self.values:
if re_float.match(value):
float_count += 1
elif re_int.match(value):
int_count += 1
if float_count / len(self.values) >= threshold:
self.type = 'Float'
elif int_count / len(self.values) >= threshold:
self.type = 'Integer'
elif len(self.most_common) <= 2:
if self.most_common[0][0].lower() in boolean:
self.type = 'Bool'
elif len(self.most_common) < 10:
self.type = 'Enum'
else:
self.type = 'String'
def define_outliers(self):
if self.type == 'Float':
for value in self.values:
if not re_float.match(value):
self.outliers.append(value)
elif self.type == 'Integer':
for value in self.values:
if not re_int.match(value):
self.outliers.append(value)
class Data(object):
"""Main store for CSV data, reading the data from the CSV file and then
assigning out to relevant variables.
Methods:
read -- Reads the CSV file and outputs to raw_data variable.
remove_invalid -- Reads from raw_data variable and assigns rows to
valid_rows or invalid_rows according to their length.
create_columns -- Creates column object according to valid_rows, assigning
column header and column values.
clean -- Calls column cleaning methods to run 'cleaning' on all columns.
analyse -- Calls column analysis methods to run 'analysis' on all columns.
Variables:
columns -- List of column objects.
headers -- List of column headers.
invalid_rows -- List of invalid rows (i.e., more or less columns than
number of headers).
raw_data -- List of raw CSV data as rows.
valid_rows -- List of valid rows (i.e., same number of columns as headers).
"""
def __init__(self, csv_file):
self.columns = []
self.headers = []
self.invalid_rows = []
self.raw_data = []
self.valid_rows = []
self.read(csv_file)
self.remove_invalid()
self.create_columns()
def read(self, csv_file):
"""Opens and reads the CSV file, line by line, to raw_data variable."""
f = csv.reader(open(csv_file))
for row in f:
self.raw_data.append(row)
def remove_invalid(self):
"""For each row in raw_data variable, checks row length and appends to
valid_rows variable if same length as headers, else appends to
invalid_rows variable.
"""
for index, row in enumerate(self.raw_data):
if len(row) != len(self.raw_data[0]):
self.invalid_rows.append([index + 1, row])
else:
self.valid_rows.append(row)
def create_columns(self):
"""For each row in raw_data variable, assigns the first value to the
headers variable and creates a Column object with that header provided.
Then removes header row from valid_rows. (Todo: Maybe can read straight
from valid rows? Why/Why not?). Then for each row in valid_rows,
populates relevant column object with row data.
"""
for value in self.raw_data[0]:
self.columns.append(Column(header=value))
self.headers.append(value)
self.valid_rows.pop(0)
for row in self.valid_rows:
for index, value in enumerate(row):
self.columns[index].values.append(value)
def clean(self):
"""Calls cleaning methods on all columns."""
for column in self.columns:
column.change_misc_values()
column.drop_greater_than()
def analyse(self):
"""Calls analysis methods on all columns, checking if they are empty
first.
"""
analysers = {'String': StringAnalyser, 'Integer': NumericalAnalyser,
'Float': NumericalAnalyser, 'Enum': EnumAnalyser}
for column in self.columns:
column.define_most_common()
if not column.empty:
column.define_type()
column.define_outliers()
if column.type in analysers:
column.analysis = analysers[column.type](column.values)
|
mit
| -7,077,307,649,676,576,000
| 34.620408
| 81
| 0.595623
| false
| 4.273751
| false
| false
| false
|
septag/termite
|
scripts/texture-tools/etc2pack.py
|
1
|
9820
|
import os
import sys
import subprocess
import shutil
import optparse
import lz4.block
import json
import hashlib
import traceback
import timeit
import tempfile
from PIL import Image
ARG_InputFile = ''
ARG_ListFile = ''
ARG_OutputDir = '.'
ARG_Encoder = 'etc2_alpha'
ARG_Quality = 'normal'
ARG_FixImageSizeModulo = 4
C_TexturePackerPath = 'TexturePacker'
C_EtcToolPath = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'EtcTool')
gFileHashes = {} # key: filepath, value: sha1
gProcessedFileCount = 0
def readListFile():
global ARG_ListFile
with open(ARG_ListFile) as f:
lines = f.readlines()
f.close()
return tuple([l.strip() for l in lines])
def readHashFile():
global ARG_ListFile
global gFileHashes
hashFilepath = ARG_ListFile + '.sha1'
if not os.path.isfile(hashFilepath):
return
with open(hashFilepath) as f:
lines = f.readlines()
stripLines = [l.strip() for l in lines]
for l in stripLines:
key, value = l.split(';', 1)
gFileHashes[key] = value
f.close()
def writeHashFile():
global ARG_ListFile
global gFileHashes
with open(ARG_ListFile + '.sha1', 'w') as f:
for key, value in gFileHashes.items():
f.write(key + ';' + value + '\n')
f.close()
def compressLz4(filepath):
with open(filepath, 'rb') as f:
srcData = f.read()
srcDataLen = len(srcData)
f.close()
compressed = lz4.block.compress(srcData, mode='high_compression', compression=9, store_size=True)
os.remove(filepath)
with open(filepath + '.lz4', 'wb') as f:
f.write(compressed)
f.close()
compressedLen = len(compressed)
print('\tLZ4 compressed (%dkb -> %dkb), Ratio: %.1f' % (srcDataLen/1024, compressedLen/1024,
srcDataLen/compressedLen))
def encodeEtc2(filepath):
global ARG_OutputDir, ARG_Quality, ARG_Encoder, ARG_ListFile, ARG_FixImageSizeModulo
global C_EtcToolPath
global gFileHashes, gProcessedFileCount
if not os.path.isfile(filepath):
print("Image file '%s' does not exist" % filepath)
return False
filedir = os.path.dirname(filepath)
destdir = os.path.join(ARG_OutputDir, filedir)
if not os.path.isdir(destdir):
os.makedirs(destdir, exist_ok=True)
# Check source file hash with the data we cached
# If file didn't change, return immediately
if ARG_ListFile:
sha1 = hashlib.sha1()
sha1.update(open(filepath, 'rb').read())
hashVal = sha1.hexdigest()
if filepath in gFileHashes and gFileHashes[filepath] == hashVal:
return True
tpFmt = ''
if ARG_Encoder == 'etc2':
tpFmt = 'RGB8'
elif ARG_Encoder == 'etc2_alpha':
tpFmt = 'RGBA8'
tpQuality = ''
if ARG_Quality == 'low':
tpQuality = ['-effort', '30']
elif ARG_Quality == 'normal':
tpQuality = ['-effort', '60']
elif ARG_Quality == 'high':
tpQuality = ['-effort', '100']
filename, fileext = os.path.splitext(filepath)
outputFilepath = os.path.join(destdir, os.path.basename(filename)) + '.ktx'
print(filepath + ' -> ' + os.path.relpath(outputFilepath, ARG_OutputDir))
modifiedFilepath = filepath
# check if have a json file with the same name (TexturePacker spritesheet)
# then change it's size in the json too, or just copy the file to target path
spritesheet_filepath = filename + '.json'
if (os.path.isfile(spritesheet_filepath)):
jdata = json.load(open(spritesheet_filepath))
else:
jdata = None
# Open the image file, check the size to be a modulo of the argument
if (ARG_FixImageSizeModulo != 0):
img = Image.open(filepath)
width, height = img.size
if (width % ARG_FixImageSizeModulo != 0 or height % ARG_FixImageSizeModulo != 0):
prevWidth = width
prevHeight = height
if (width % ARG_FixImageSizeModulo != 0):
width = width + (ARG_FixImageSizeModulo - (width % ARG_FixImageSizeModulo))
if (height % ARG_FixImageSizeModulo != 0):
height = height + (ARG_FixImageSizeModulo - (height % ARG_FixImageSizeModulo))
print('\tFixing size (%d, %d) -> (%d, %d)' % (prevWidth, prevHeight, width, height))
tmpImageFilepath = os.path.join(tempfile.gettempdir(), os.path.basename(filename)) + fileext
newImage = Image.new('RGBA', (width, height))
newImage.paste(img)
newImage.save(tmpImageFilepath, fileext[1:])
modifiedFilepath = tmpImageFilepath
# modify image size inside the spritesheet 'meta' tag
if jdata:
jdata['meta']['size']['w'] = width
jdata['meta']['size']['h'] = height
# trim/modify spritesheet json data for the image, and put them into target
if jdata:
json_filepath = os.path.join(destdir, os.path.basename(filename)) + '.json'
with open(json_filepath, 'w', encoding='utf8') as f:
f.write(json.dumps(jdata, sort_keys=False))
f.close()
print('\t' + spritesheet_filepath + ' -> ' + os.path.relpath(json_filepath, ARG_OutputDir))
# ETC2 convert the file
args = [C_EtcToolPath, modifiedFilepath, '-j', '4']
if tpFmt:
args.extend(['-format', tpFmt])
if tpQuality:
args.extend(tpQuality)
args.extend(['-errormetric', 'rec709'])
#args.extend(['-m', '2'])
args.extend(['-output', outputFilepath])
r = subprocess.call(args)
if r == 0:
compressLz4(outputFilepath)
if ARG_ListFile:
gFileHashes[filepath] = hashVal
gProcessedFileCount = gProcessedFileCount + 1
if modifiedFilepath != filepath:
os.remove(modifiedFilepath)
return (r == 0)
def encodeWithTexturePacker(filepath):
global ARG_OutputDir
global C_TexturePackerPath
filename, fileext = os.path.splitext(filepath)
outputFilepath = os.path.join(ARG_OutputDir, os.path.basename(filename)) + '.json'
args = [C_TexturePackerPath, '--data', outputFilepath, filepath]
r = subprocess.call(args)
if r == 0:
# read json and extract output file
jdata = json.load(open(outputFilepath))
imgfile = jdata['meta']['image']
imgdir = os.path.dirname(outputFilepath)
imgFilepath = os.path.join(imgdir, imgfile)
res = encodeEtc2(imgFilepath)
os.remove(imgFilepath)
return res
else:
return False
def encodeFile(filepath):
# determine the file type (TexturePacker or plain image)
filename, fileext = os.path.splitext(filepath)
if fileext == '.tps':
return encodeWithTexturePacker(filepath)
if fileext == '.png' or fileext == '.jpg':
return encodeEtc2(filepath)
else:
return False
def main():
global ARG_ListFile, ARG_Quality, ARG_Encoder, ARG_OutputDir, ARG_InputFile, ARG_FixImageSizeModulo
global gProcessedFileCount
cmdParser = optparse.OptionParser()
cmdParser.add_option('--file', action='store', type='string', dest='ARG_InputFile',
help = 'Input image file', default=ARG_InputFile)
cmdParser.add_option('--listfile', action='store', type='string', dest='ARG_ListFile',
help = 'Text file which lists input image files', default=ARG_ListFile)
cmdParser.add_option('--outdir', action='store', type='string', dest='ARG_OutputDir',
help = 'Output file(s) directory', default=ARG_OutputDir)
cmdParser.add_option('--enc', action='store', type='choice', dest='ARG_Encoder',
choices=['etc2', 'etc2_alpha'], help = 'Choose encoder', default=ARG_Encoder)
cmdParser.add_option('--quality', action='store', type='choice', dest='ARG_Quality',
choices = ['low', 'normal', 'high'], help = '', default=ARG_Quality)
cmdParser.add_option('--msize', action='store', type='int', dest='ARG_FixImageSizeModulo',
default=4, help='Fix output image size to be a multiply of specified argument')
cmdParser.add_option('--exclude-hd', action='store_true', default=False, dest='ARG_ExcludeHD')
(options, args) = cmdParser.parse_args()
if options.ARG_InputFile:
ARG_InputFile = os.path.abspath(options.ARG_InputFile)
if options.ARG_ListFile:
ARG_ListFile = os.path.abspath(options.ARG_ListFile)
ARG_OutputDir = os.path.abspath(options.ARG_OutputDir)
ARG_Encoder = options.ARG_Encoder
ARG_Quality = options.ARG_Quality
ARG_FixImageSizeModulo = options.ARG_FixImageSizeModulo
if not ARG_InputFile and not ARG_ListFile:
raise Exception('Must provide either --file or --listfile arguments. See --help')
if not os.path.isdir(ARG_OutputDir):
raise Exception(ARG_OutputDir + ' is not a valid directory')
startTm = timeit.default_timer()
if ARG_ListFile:
readHashFile()
files = readListFile()
# Remove all files that have -sd versions
if (options.ARG_ExcludeHD):
for f in files:
(first_part, ext) = os.path.splitext(f)
sd_version = first_part + '-sd' + ext
if not os.path.isfile(sd_version):
encodeFile(os.path.normpath(f))
else:
for f in files:
encodeFile(os.path.normpath(f))
writeHashFile()
elif ARG_InputFile:
encodeFile(ARG_InputFile)
print('Total %d file(s) processed' % gProcessedFileCount)
print('Took %.3f secs' % (timeit.default_timer() - startTm))
if __name__ == '__main__':
try:
main()
except Exception as e:
print('Error:')
print(e)
print('CallStack:')
traceback.print_exc(file=sys.stdout)
except:
raise
|
bsd-2-clause
| -2,274,651,020,613,943,800
| 34.839416
| 104
| 0.626782
| false
| 3.561843
| false
| false
| false
|
igraph/python-igraph
|
tests/test_edgeseq.py
|
1
|
16063
|
# vim:ts=4 sw=4 sts=4:
import unittest
from igraph import *
from .utils import is_pypy
try:
import numpy as np
except ImportError:
np = None
class EdgeTests(unittest.TestCase):
def setUp(self):
self.g = Graph.Full(10)
def testHash(self):
data = {}
n = self.g.ecount()
for i in range(n):
code1 = hash(self.g.es[i])
code2 = hash(self.g.es[i])
self.assertEqual(code1, code2)
data[self.g.es[i]] = i
for i in range(n):
self.assertEqual(i, data[self.g.es[i]])
def testRichCompare(self):
idxs = [2, 5, 9, 13, 42]
g2 = Graph.Full(10)
for i in idxs:
for j in idxs:
self.assertEqual(i == j, self.g.es[i] == self.g.es[j])
self.assertEqual(i != j, self.g.es[i] != self.g.es[j])
self.assertEqual(i < j, self.g.es[i] < self.g.es[j])
self.assertEqual(i > j, self.g.es[i] > self.g.es[j])
self.assertEqual(i <= j, self.g.es[i] <= self.g.es[j])
self.assertEqual(i >= j, self.g.es[i] >= self.g.es[j])
self.assertFalse(self.g.es[i] == g2.es[j])
self.assertFalse(self.g.es[i] != g2.es[j])
self.assertFalse(self.g.es[i] < g2.es[j])
self.assertFalse(self.g.es[i] > g2.es[j])
self.assertFalse(self.g.es[i] <= g2.es[j])
self.assertFalse(self.g.es[i] >= g2.es[j])
self.assertFalse(self.g.es[2] == self.g.vs[2])
def testRepr(self):
output = repr(self.g.es[0])
self.assertEqual(output, "igraph.Edge(%r, 0, {})" % self.g)
self.g.es["weight"] = list(range(10, 0, -1))
output = repr(self.g.es[3])
self.assertEqual(output, "igraph.Edge(%r, 3, {'weight': 7})" % self.g)
def testUpdateAttributes(self):
e = self.g.es[0]
e.update_attributes(a=2)
self.assertEqual(e["a"], 2)
e.update_attributes([("a", 3), ("b", 4)], c=5, d=6)
self.assertEqual(e.attributes(), dict(a=3, b=4, c=5, d=6))
e.update_attributes(dict(b=44, c=55))
self.assertEqual(e.attributes(), dict(a=3, b=44, c=55, d=6))
def testPhantomEdge(self):
e = self.g.es[self.g.ecount() - 1]
e.delete()
# v is now a phantom edge; try to freak igraph out now :)
self.assertRaises(ValueError, e.update_attributes, a=2)
self.assertRaises(ValueError, e.__getitem__, "a")
self.assertRaises(ValueError, e.__setitem__, "a", 4)
self.assertRaises(ValueError, e.__delitem__, "a")
self.assertRaises(ValueError, e.attributes)
self.assertRaises(ValueError, getattr, e, "source")
self.assertRaises(ValueError, getattr, e, "source_vertex")
self.assertRaises(ValueError, getattr, e, "target")
self.assertRaises(ValueError, getattr, e, "target_vertex")
self.assertRaises(ValueError, getattr, e, "tuple")
self.assertRaises(ValueError, getattr, e, "vertex_tuple")
@unittest.skipIf(is_pypy, "skipped on PyPy because we do not have access to docstrings")
def testProxyMethods(self):
g = Graph.GRG(10, 0.5)
e = g.es[0]
# - delete() is ignored because it mutates the graph
ignore = "delete"
ignore = set(ignore.split())
# Methods not listed here are expected to return an int or a float
return_types = {}
for name in Edge.__dict__:
if name in ignore:
continue
func = getattr(e, name)
docstr = func.__doc__
if not docstr.startswith("Proxy method"):
continue
result = func()
self.assertEqual(
getattr(g, name)(e.index),
result,
msg=("Edge.%s proxy method misbehaved" % name),
)
return_type = return_types.get(name, (int, float))
self.assertTrue(
isinstance(result, return_type),
msg=("Edge.%s proxy method did not return %s" % (name, return_type)),
)
class EdgeSeqTests(unittest.TestCase):
def assert_edges_unique_in(self, es):
pairs = sorted(e.tuple for e in es)
self.assertEqual(pairs, sorted(set(pairs)))
def setUp(self):
self.g = Graph.Full(10)
self.g.es["test"] = list(range(45))
def testCreation(self):
self.assertTrue(len(EdgeSeq(self.g)) == 45)
self.assertTrue(len(EdgeSeq(self.g, 2)) == 1)
self.assertTrue(len(EdgeSeq(self.g, [1, 2, 3])) == 3)
self.assertTrue(EdgeSeq(self.g, [1, 2, 3]).indices == [1, 2, 3])
self.assertRaises(ValueError, EdgeSeq, self.g, 112)
self.assertRaises(ValueError, EdgeSeq, self.g, [112])
self.assertTrue(self.g.es.graph == self.g)
def testIndexing(self):
n = self.g.ecount()
for i in range(n):
self.assertEqual(i, self.g.es[i].index)
self.assertEqual(n - i - 1, self.g.es[-i - 1].index)
self.assertRaises(IndexError, self.g.es.__getitem__, n)
self.assertRaises(IndexError, self.g.es.__getitem__, -n - 1)
self.assertRaises(TypeError, self.g.es.__getitem__, 1.5)
@unittest.skipIf(np is None, "test case depends on NumPy")
def testNumPyIndexing(self):
n = self.g.ecount()
for i in range(n):
arr = np.array([i])
self.assertEqual(i, self.g.es[arr[0]].index)
arr = np.array([n])
self.assertRaises(IndexError, self.g.es.__getitem__, arr[0])
arr = np.array([-n - 1])
self.assertRaises(IndexError, self.g.es.__getitem__, arr[0])
arr = np.array([1.5])
self.assertRaises(TypeError, self.g.es.__getitem__, arr[0])
ind = [1, 3, 5, 8, 3, 2]
arr = np.array(ind)
self.assertEqual(ind, [edge.index for edge in self.g.es[arr.tolist()]])
self.assertEqual(ind, [edge.index for edge in self.g.es[list(arr)]])
def testPartialAttributeAssignment(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = [0] * len(only_even)
expected = [[0, i][i % 2] for i in range(self.g.ecount())]
self.assertTrue(self.g.es["test"] == expected)
only_even["test2"] = list(range(23))
expected = [[i // 2, None][i % 2] for i in range(self.g.ecount())]
self.assertTrue(self.g.es["test2"] == expected)
def testSequenceReusing(self):
if "test" in self.g.edge_attributes():
del self.g.es["test"]
self.g.es["test"] = ["A", "B", "C"]
self.assertTrue(self.g.es["test"] == ["A", "B", "C"] * 15)
self.g.es["test"] = "ABC"
self.assertTrue(self.g.es["test"] == ["ABC"] * 45)
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
only_even["test"] = ["D", "E"]
expected = ["D", "ABC", "E", "ABC"] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
del self.g.es["test"]
only_even["test"] = ["D", "E"]
expected = ["D", None, "E", None] * 12
expected = expected[0:45]
self.assertTrue(self.g.es["test"] == expected)
def testAllSequence(self):
self.assertTrue(len(self.g.es) == 45)
self.assertTrue(self.g.es["test"] == list(range(45)))
def testEmptySequence(self):
empty_es = self.g.es.select(None)
self.assertTrue(len(empty_es) == 0)
self.assertRaises(IndexError, empty_es.__getitem__, 0)
self.assertRaises(KeyError, empty_es.__getitem__, "nonexistent")
self.assertTrue(empty_es["test"] == [])
empty_es = self.g.es[[]]
self.assertTrue(len(empty_es) == 0)
empty_es = self.g.es[()]
self.assertTrue(len(empty_es) == 0)
def testCallableFilteringFind(self):
edge = self.g.es.find(lambda e: (e.index % 2 == 1))
self.assertTrue(edge.index == 1)
self.assertRaises(IndexError, self.g.es.find, lambda e: (e.index % 2 == 3))
def testCallableFilteringSelect(self):
only_even = self.g.es.select(lambda e: (e.index % 2 == 0))
self.assertTrue(len(only_even) == 23)
self.assertRaises(KeyError, only_even.__getitem__, "nonexistent")
self.assertTrue(only_even["test"] == [i * 2 for i in range(23)])
def testChainedCallableFilteringSelect(self):
only_div_six = self.g.es.select(
lambda e: (e.index % 2 == 0), lambda e: (e.index % 3 == 0)
)
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
only_div_six = self.g.es.select(lambda e: (e.index % 2 == 0)).select(
lambda e: (e.index % 3 == 0)
)
self.assertTrue(len(only_div_six) == 8)
self.assertTrue(only_div_six["test"] == [0, 6, 12, 18, 24, 30, 36, 42])
def testIntegerFilteringFind(self):
self.assertEqual(self.g.es.find(3).index, 3)
self.assertEqual(self.g.es.select(2, 3, 4, 2).find(3).index, 2)
self.assertRaises(IndexError, self.g.es.find, 178)
def testIntegerFilteringSelect(self):
subset = self.g.es.select(2, 3, 4, 2)
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2, 3, 4, 2])
self.assertRaises(TypeError, self.g.es.select, 2, 3, 4, 2, None)
subset = self.g.es[2, 3, 4, 2]
self.assertTrue(len(subset) == 4)
self.assertTrue(subset["test"] == [2, 3, 4, 2])
def testIterableFilteringSelect(self):
subset = self.g.es.select(list(range(5, 8)))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5, 6, 7])
def testSliceFilteringSelect(self):
subset = self.g.es.select(slice(5, 8))
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [5, 6, 7])
subset = self.g.es[40:56:2]
self.assertTrue(len(subset) == 3)
self.assertTrue(subset["test"] == [40, 42, 44])
def testKeywordFilteringSelect(self):
g = Graph.Barabasi(1000, 2)
g.es["betweenness"] = g.edge_betweenness()
g.es["parity"] = [i % 2 for i in range(g.ecount())]
self.assertTrue(len(g.es(betweenness_gt=10)) < 2000)
self.assertTrue(len(g.es(betweenness_gt=10, parity=0)) < 2000)
def testSourceTargetFiltering(self):
g = Graph.Barabasi(1000, 2, directed=True)
es1 = set(e.source for e in g.es.select(_target_in=[2, 4]))
es2 = set(v1 for v1, v2 in g.get_edgelist() if v2 in [2, 4])
self.assertTrue(es1 == es2)
def testWithinFiltering(self):
g = Graph.Lattice([10, 10])
vs = [0, 1, 2, 10, 11, 12, 20, 21, 22]
vs2 = (0, 1, 10, 11)
es1 = g.es.select(_within=vs)
es2 = g.es.select(_within=VertexSeq(g, vs))
for es in [es1, es2]:
self.assertTrue(len(es) == 12)
self.assertTrue(all(e.source in vs and e.target in vs for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_within=vs2)
self.assertTrue(len(es_filtered) == 4)
self.assertTrue(
all(e.source in vs2 and e.target in vs2 for e in es_filtered)
)
self.assert_edges_unique_in(es_filtered)
def testBetweenFiltering(self):
g = Graph.Lattice([10, 10])
vs1, vs2 = [10, 11, 12], [20, 21, 22]
es1 = g.es.select(_between=(vs1, vs2))
es2 = g.es.select(_between=(VertexSeq(g, vs1), VertexSeq(g, vs2)))
for es in [es1, es2]:
self.assertTrue(len(es) == 3)
self.assertTrue(
all(
(e.source in vs1 and e.target in vs2)
or (e.target in vs1 and e.source in vs2)
for e in es
)
)
self.assert_edges_unique_in(es)
def testIncidentFiltering(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
vs2 = (11, 0, 24)
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_incident=vs)
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_incident=vs2)
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testIncidentFilteringByNames(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
g.vs[vs]["name"] = ["A", "B", "C", "D"]
vs2 = (11, 0, 24)
g.vs[24]["name"] = "X"
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_incident=("A", "B", "C", "D"))
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_incident=("D", "A", "X"))
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
es_filtered = es_filtered.select(_from="A")
self.assertEqual(2, len(es_filtered))
self.assertTrue(all((e.source == 0 or e.target == 0) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testSourceAndTargetFilteringForUndirectedGraphs(self):
g = Graph.Lattice([10, 10], circular=False)
vs = (0, 1, 10, 11)
vs2 = (11, 0, 24)
vs3 = sorted(set(vs).intersection(set(vs2)))
es = g.es.select(_from=vs)
self.assertEqual(8, len(es))
self.assertTrue(all((e.source in vs or e.target in vs) for e in es))
self.assert_edges_unique_in(es)
es_filtered = es.select(_to_in=vs2)
self.assertEqual(6, len(es_filtered))
self.assertTrue(all((e.source in vs3 or e.target in vs3) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
es_filtered = es_filtered.select(_from_eq=0)
self.assertEqual(2, len(es_filtered))
self.assertTrue(all((e.source == 0 or e.target == 0) for e in es_filtered))
self.assert_edges_unique_in(es_filtered)
def testIndexOutOfBoundsSelect(self):
g = Graph.Full(3)
self.assertRaises(ValueError, g.es.select, 4)
self.assertRaises(ValueError, g.es.select, 4, 5)
self.assertRaises(ValueError, g.es.select, (4, 5))
self.assertRaises(ValueError, g.es.select, 2, -1)
self.assertRaises(ValueError, g.es.select, (2, -1))
self.assertRaises(ValueError, g.es.__getitem__, (0, 1000000))
def testIndexAndKeywordFilteringFind(self):
self.assertRaises(ValueError, self.g.es.find, 2, test=4)
self.assertTrue(self.g.es.find(2, test=2) == self.g.es[2])
def testGraphMethodProxying(self):
idxs = [1, 3, 5, 7, 9]
g = Graph.Barabasi(100)
es = g.es(*idxs)
ebs = g.edge_betweenness()
self.assertEqual([ebs[i] for i in idxs], es.edge_betweenness())
idxs = [1, 3]
g = Graph([(0, 1), (1, 2), (2, 0), (1, 0)], directed=True)
es = g.es(*idxs)
mutual = g.is_mutual(es)
self.assertEqual(mutual, es.is_mutual())
for e, m in zip(es, mutual):
self.assertEqual(e.is_mutual(), m)
def testIsAll(self):
g = Graph.Full(5)
self.assertTrue(g.es.is_all())
self.assertFalse(g.es.select(1, 2, 3).is_all())
self.assertFalse(g.es.select(_within=[1, 2, 3]).is_all())
def suite():
edge_suite = unittest.makeSuite(EdgeTests)
es_suite = unittest.makeSuite(EdgeSeqTests)
return unittest.TestSuite([edge_suite, es_suite])
def test():
runner = unittest.TextTestRunner()
runner.run(suite())
if __name__ == "__main__":
test()
|
gpl-2.0
| -759,501,945,597,758,200
| 36.355814
| 92
| 0.560481
| false
| 3.190268
| true
| false
| false
|
DailyActie/Surrogate-Model
|
surrogate/crossover/tests/test_cxUniform.py
|
1
|
1730
|
# MIT License
#
# Copyright (c) 2016 Daily Actie
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Author: Quan Pan <quanpan302@hotmail.com>
# License: MIT License
# Create: 2016-12-02
import numpy as np
from surrogate.crossover import cxUniform
print '\nTest.crossover.cxUniform: cxUniform'
ind1 = np.array(range(0, 10))
ind2 = np.array(range(10, 20))
# ind2 = np.array(range(9,-1,-1))
print '\tInput: ind1_desVar=\t' + '\t'.join(map(str, ind1)) + ''
print '\tInput: ind2_desVar=\t' + '\t'.join(map(str, ind2)) + ''
[out1, out2] = cxUniform(var1=ind1.tolist(), var2=ind2.tolist())
print '\tOutput: out1_desVar=\t' + '\t'.join(map(str, out1)) + ''
print '\tOutput: out2_desVar=\t' + '\t'.join(map(str, out2)) + ''
|
mit
| -7,992,576,133,490,520,000
| 43.358974
| 80
| 0.731792
| false
| 3.418972
| false
| false
| false
|
atomman/nmrglue
|
examples/jbnmr_examples/s7-s9_s3e_processing/convert.py
|
4
|
1126
|
import nmrglue as ng
# read in the sum data set
dic, data = ng.varian.read('.', fid_file='fid_sum', as_2d=True)
# set the spectral parameters
udic = ng.varian.guess_udic(dic, data)
udic[1]['size'] = 1500 ; udic[0]['size'] = 256
udic[1]['complex'] = True ; udic[0]['complex'] = True
udic[1]['encoding'] = 'direct' ; udic[0]['encoding'] = 'states'
udic[1]['sw'] = 50000.000 ; udic[0]['sw'] = 5000.0
udic[1]['obs'] = 125.690 ; udic[0]['obs'] = 50.648
udic[1]['car'] = 174.538 * 125.690; udic[0]['car'] = 119.727 * 50.648
udic[1]['label'] = 'C13' ; udic[0]['label'] = 'N15'
# convert to NMRPipe format
C = ng.convert.converter()
C.from_varian(dic, data, udic)
pdic, pdata = C.to_pipe()
# write out the NMRPipe file
ng.pipe.write("test_sum.fid", pdic, pdata, overwrite=True)
# repeat for the difference data set
dic, data = ng.varian.read('.', fid_file='fid_dif', as_2d=True)
C = ng.convert.converter()
C.from_varian(dic, data, udic)
pdic, pdata = C.to_pipe()
ng.pipe.write("test_dif.fid", pdic, pdata, overwrite=True)
|
bsd-3-clause
| 718,106,159,891,570,200
| 37.827586
| 79
| 0.581705
| false
| 2.600462
| false
| false
| false
|
YannThorimbert/ThePhantomRacer
|
levelgen.py
|
1
|
1323
|
import random
import parameters
import track
import obstacle
class LevelGenerator:
def __init__(self, zfinish, nx, ny):
self.zfinish = zfinish
self.nx = nx
self.ny = ny
self.track = track.Track(zfinish,nx,ny)
parameters.scene.track = self.track
def add_static_obstacles(self, density, zmin, zmax, objects):
"""Density: average number of obstacles per 100 m"""
n = density * self.zfinish / 100.
done = set([])
i = 0
while i < n:
x = random.randint(0,self.nx-1)
y = random.randint(0,self.ny-1)
z = random.randint(zmin,zmax)
if (x,y,z) not in done:
done.add((x,y,z))
obj = random.choice(objects).get_copy()
damage = 1
obstacle.Obstacle(damage,x,y,z,obj)
i += 1
def random_gen(self, nparts, objects, min_density=0.1, max_density=1.8):
zpart = self.zfinish // nparts
for i in range(nparts):
density = random.random()*(max_density-min_density) + min_density
print("random gen", density)
if i == 0:
begin = 50
else:
begin = i*zpart
self.add_static_obstacles(density, begin, (i+1)*zpart, objects)
|
mit
| -7,094,742,456,078,290,000
| 30.5
| 77
| 0.530612
| false
| 3.509284
| false
| false
| false
|
boldprogressives/trac-GitolitePlugin
|
trac_gitolite/repo_manager.py
|
1
|
2805
|
import getpass
import pkg_resources
from trac.admin import IAdminPanelProvider
from trac.core import *
from trac.config import Option, BoolOption
from trac.perm import IPermissionRequestor
from trac.util.translation import _
from trac.web.chrome import ITemplateProvider
from trac.web.chrome import add_notice, add_warning
from trac_gitolite import utils
class GitoliteRepositoryManager(Component):
implements(IPermissionRequestor, IAdminPanelProvider, ITemplateProvider)
gitolite_admin_reponame = Option('trac-gitolite', 'admin_reponame',
default="gitolite-admin")
gitolite_admin_ssh_path = Option('trac-gitolite', 'admin_ssh_path',
default="git@localhost:gitolite-admin.git")
def read_config(self):
node = utils.get_repo_node(self.env, self.gitolite_admin_reponame,
"conf/gitolite.conf")
fp = node.get_content()
return utils.read_config(fp)
## IPermissionRequestor methods
def get_permission_actions(self):
return [('VERSIONCONTROL_ADMIN', ['REPOSITORY_CREATE']),
'REPOSITORY_CREATE']
## IAdminPanelProvider methods
def get_admin_panels(self, req):
if 'REPOSITORY_CREATE' in req.perm:
yield ('versioncontrol', _('Version Control'), 'gitolite',
_('Gitolite Repositories'))
def render_admin_panel(self, req, category, page, path_info):
req.perm.require('REPOSITORY_CREATE')
if req.method == 'POST':
repo_name = req.args['name']
perms = self.read_config()
if repo_name in perms:
add_warning(req, _('A repository named %s already exists; maybe you just need to tell Trac about it using the Repositories panel?'))
req.redirect(req.href.admin(category, page))
perms[repo_name] = repo_perms = {}
trac_user = getpass.getuser()
for perm in ['R', 'W', '+']:
repo_perms[perm] = [trac_user]
utils.save_file(self.gitolite_admin_ssh_path, 'conf/gitolite.conf',
utils.to_string(perms),
_('Adding new repository %s' % repo_name))
add_notice(req, _('Repository "%s" has been created. Now you should give some users permissions on it using the Version Control Permissions panel.' % repo_name))
req.redirect(req.href.admin(category, page))
data = {'repos': sorted(self.read_config())}
return 'admin_repository_gitolite.html', data
# ITemplateProvider methods
def get_htdocs_dirs(self):
return []
def get_templates_dirs(self):
return [pkg_resources.resource_filename('trac_gitolite', 'templates')]
|
bsd-3-clause
| 6,899,371,258,026,229,000
| 39.652174
| 174
| 0.62139
| false
| 3.934081
| false
| false
| false
|
DaveBackus/Data_Bootcamp
|
Code/Lab/SPF_forecasts.py
|
1
|
1126
|
"""
Survey of Professional Forecasters
The Philly Fed has been polling forecasters for years and posting both
summary statistics (mean forecasts, for example) and individual numbers
(suitably anonymized). We take a look at the recent data, see what's there.
Link
* https://www.philadelphiafed.org/research-and-data/real-time-center/survey-of-professional-forecasters/
Prepared for Data Bootcamp course at NYU
* http://databootcamp.nyuecon.com/
* https://github.com/DaveBackus/Data_Bootcamp/Code/Lab
Written by Dave Backus and Chase Coleman, March 2016
Created with Python 3.5
"""
"""
import packages, check versions
"""
import sys
import pandas as pd
#import numpy as np
#import matplotlib.pyplot as plt
print('\nPython version: ', sys.version)
print('Pandas version: ', pd.__version__, '\n')
#%%
"""
read data
"""
url1 = 'https://www.philadelphiafed.org/-/media/research-and-data/'
url2 = 'real-time-center/survey-of-professional-forecasters/'
url3 = 'historical-data/micro5.xls'
url = url1 + url2 + url3
spf = pd.read_excel(url)
print('Dimensions:', spf.shape)
print('\nData types:\n', spf.dtypes, sep='')
#%%
#%%
|
mit
| 8,940,016,088,585,971,000
| 24.590909
| 104
| 0.737123
| false
| 2.994681
| false
| false
| false
|
parksandwildlife/wastd
|
wastd/observations/migrations/0021_auto_20200622_1218.py
|
1
|
1991
|
# Generated by Django 2.2.10 on 2020-06-22 04:18
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('observations', '0020_auto_20200622_1045'),
]
operations = [
migrations.AddField(
model_name='encounter',
name='location_accuracy_m',
field=models.FloatField(blank=True, help_text='The accuracy of the supplied location in metres, if given.', null=True, verbose_name='Location accuracy (m)'),
),
migrations.AlterField(
model_name='encounter',
name='location_accuracy',
field=models.CharField(choices=[('10', 'GPS reading at exact location (10 m)'), ('1000', 'Site centroid or place name (1 km)'), ('10000', 'Rough estimate (10 km)')], default='1000', help_text='The source of the supplied location implies a rough location accuracy.', max_length=300, verbose_name='Location accuracy class (m)'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='light_sources_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Light sources present during emergence'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceobservation',
name='outlier_tracks_present',
field=models.CharField(choices=[('na', 'NA'), ('absent', 'Confirmed absent'), ('present', 'Confirmed present')], default='na', help_text='', max_length=300, verbose_name='Outlier tracks present'),
),
migrations.AlterField(
model_name='turtlehatchlingemergenceoutlierobservation',
name='outlier_group_size',
field=models.PositiveIntegerField(blank=True, help_text='', null=True, verbose_name='Number of tracks in outlier group'),
),
]
|
mit
| -4,406,606,444,078,129,700
| 51.394737
| 338
| 0.637368
| false
| 4.23617
| false
| false
| false
|
maartenbreddels/vaex
|
packages/vaex-jupyter/vaex/jupyter/ipyleaflet.py
|
1
|
1541
|
import ipyleaflet as ll
import traitlets
import ipywidgets as widgets
import vaex.image
class IpyleafletImage(traitlets.HasTraits):
x_min = traitlets.CFloat()
x_max = traitlets.CFloat()
y_min = traitlets.CFloat(None, allow_none=True)
y_max = traitlets.CFloat(None, allow_none=True)
x_label = traitlets.Unicode()
y_label = traitlets.Unicode()
tool = traitlets.Unicode(None, allow_none=True)
def __init__(self, output, presenter, map=None, zoom=12, **kwargs):
super().__init__(**kwargs)
self.output = output
self.presenter = presenter
self.map = map
self._zoom = zoom
self.last_image_layer = None
center = self.x_min + (self.x_max - self.x_min) / 2, self.y_min + (self.y_max - self.y_min) / 2
center = center[1], center[0]
self.map = ll.Map(center=center, zoom=self._zoom)
widgets.dlink((self.map, 'west'), (self, 'x_min'))
widgets.dlink((self.map, 'east'), (self, 'x_max'))
widgets.dlink((self.map, 'north'), (self, 'y_min'))
widgets.dlink((self.map, 'south'), (self, 'y_max'))
self.widget = self.map
def set_rgb_image(self, rgb_image):
with self.output:
if self.last_image_layer:
self.map.remove_layer(self.last_image_layer)
url = vaex.image.rgba_to_url(rgb_image[::-1, ::].copy())
image = ll.ImageOverlay(url=url, bounds=list(self.map.bounds))
self.map.add_layer(image)
self.last_image_layer = image
|
mit
| 3,515,014,371,890,492,400
| 34.837209
| 103
| 0.598313
| false
| 3.125761
| false
| false
| false
|
jokajak/itweb
|
data/env/lib/python2.6/site-packages/transaction-1.1.1-py2.6.egg/transaction/tests/test_register_compat.py
|
1
|
4007
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
"""Test backwards compatibility for resource managers using register().
The transaction package supports several different APIs for resource
managers. The original ZODB3 API was implemented by ZODB.Connection.
The Connection passed persistent objects to a Transaction's register()
method. It's possible that third-party code also used this API, hence
these tests that the code that adapts the old interface to the current
API works.
These tests use a TestConnection object that implements the old API.
They check that the right methods are called and in roughly the right
order.
Common cases
------------
First, check that a basic transaction commit works.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.commit()
>>> len(cn.committed)
3
>>> len(cn.aborted)
0
>>> cn.calls
['begin', 'vote', 'finish']
Second, check that a basic transaction abort works. If the
application calls abort(), then the transaction never gets into the
two-phase commit. It just aborts each object.
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(Object())
>>> cn.register(Object())
>>> transaction.abort()
>>> len(cn.committed)
0
>>> len(cn.aborted)
3
>>> cn.calls
[]
Error handling
--------------
The tricky part of the implementation is recovering from an error that
occurs during the two-phase commit. We override the commit() and
abort() methods of Object to cause errors during commit.
Note that the implementation uses lists internally, so that objects
are committed in the order they are registered. (In the presence of
multiple resource managers, objects from a single resource manager are
committed in order. I'm not sure if this is an accident of the
implementation or a feature that should be supported by any
implementation.)
The order of resource managers depends on sortKey().
>>> cn = TestConnection()
>>> cn.register(Object())
>>> cn.register(CommitError())
>>> cn.register(Object())
>>> transaction.commit()
Traceback (most recent call last):
...
RuntimeError: commit
>>> len(cn.committed)
1
>>> len(cn.aborted)
3
Clean up:
>>> transaction.abort()
"""
import doctest
import transaction
class Object(object):
def commit(self):
pass
def abort(self):
pass
class CommitError(Object):
def commit(self):
raise RuntimeError("commit")
class AbortError(Object):
def abort(self):
raise RuntimeError("abort")
class BothError(CommitError, AbortError):
pass
class TestConnection:
def __init__(self):
self.committed = []
self.aborted = []
self.calls = []
def register(self, obj):
obj._p_jar = self
transaction.get().register(obj)
def sortKey(self):
return str(id(self))
def tpc_begin(self, txn):
self.calls.append("begin")
def tpc_vote(self, txn):
self.calls.append("vote")
def tpc_finish(self, txn):
self.calls.append("finish")
def tpc_abort(self, txn):
self.calls.append("abort")
def commit(self, obj, txn):
obj.commit()
self.committed.append(obj)
def abort(self, obj, txn):
obj.abort()
self.aborted.append(obj)
def test_suite():
return doctest.DocTestSuite()
# additional_tests is for setuptools "setup.py test" support
additional_tests = test_suite
|
gpl-3.0
| 2,778,489,074,686,703,000
| 24.685897
| 78
| 0.670826
| false
| 4.031187
| true
| false
| false
|
intuition-io/insights
|
insights/plugins/hipchat.py
|
1
|
2386
|
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
'''
hipchat Bot
-----------
:copyright (c) 2014 Xavier Bruhiere.
:license: Apache 2.0, see LICENSE for more details.
'''
import os
import requests
import dna.logging
log = dna.logging.logger(__name__)
class Bot(object):
'''
Hipchat api client that sends notifications to a specified room
Doc: https://www.hipchat.com/docs/api
'''
api_key = os.environ.get('HIPCHAT_API')
api_url = 'https://api.hipchat.com/v1'
name = 'Intuition Bot'
bg_color = 'green'
intro = 'Hey guys, I detected an opportunity'
def __init__(self, room_id, name=None, api_key=None):
self.room_id = room_id
if api_key:
self.api_key = api_key
if name:
self.name = name
def _test_token(self):
''' TODO '''
pass
def _api_call(self, path, data={}, http_method=requests.get):
''' Process an http call against the hipchat api '''
log.info('performing api request', path=path)
response = http_method('/'.join([self.api_url, path]),
params={'auth_token': self.api_key},
data=data)
log.debug('{} remaining calls'.format(
response.headers['x-ratelimit-remaining']))
return response.json()
def message(self, body, room_id, style='text'):
''' Send a message to the given room '''
# TODO Automatically detect body format ?
path = 'rooms/message'
data = {
'room_id': room_id,
'message': body,
'from': self.name,
'notify': 1,
'message_format': style,
'color': self.bg_color
}
log.info('sending message to hipchat', message=body, room=room_id)
feedback = self._api_call(path, data, requests.post)
log.debug(feedback)
return feedback
def notify(self, datetime, orderbook):
# TODO Same flood security as mobile
if orderbook:
body = '<strong>{} - {}</strong><ul><li>{}</li></ul>'.format(
datetime,
self.intro,
'</li><li>'.join(
['{}: {}'.format(sid, quantity)
for sid, quantity in orderbook.iteritems()])
)
self.message(body, self.room_id, style='html')
|
apache-2.0
| -3,800,319,069,206,980,000
| 28.825
| 74
| 0.532272
| false
| 3.787302
| false
| false
| false
|
Valeureux/wezer-exchange
|
__unreviewed__/community_project/community_project.py
|
1
|
2428
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Author: Yannick Buron and Valeureux Copyright Valeureux.org
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import orm
class GroupsView(orm.Model):
"""
Add group in user simplified form
"""
_inherit = 'res.groups'
def get_simplified_groups_by_application(self, cr, uid, context=None):
""" return all groups classified by application (module category),
as a list of pairs: [(app, kind, [group, ...]), ...],
where app and group are browse records, and kind is either 'boolean'
or 'selection'. Applications are given in sequence order. If kind is
'selection', the groups are given in reverse implication order.
"""
model = self.pool.get('ir.model.data')
res = super(GroupsView, self).get_simplified_groups_by_application(
cr, uid, context=context
)
# We need to catch the exception for the community module installation,
# the records are not created at this point
try:
category = model.get_object(
cr, uid, 'base', 'module_category_project_management'
)
group_project_user = model.get_object(
cr, uid, 'project', 'group_project_user'
)
group_project_manager = model.get_object(
cr, uid, 'project', 'group_project_manager'
)
res.append((
category, 'selection',
[group_project_user, group_project_manager]
))
except ValueError:
pass
return res
|
agpl-3.0
| 9,197,470,633,125,033,000
| 36.353846
| 79
| 0.583196
| false
| 4.504638
| false
| false
| false
|
tensorflow/federated
|
tensorflow_federated/experimental/python/learning/jax_components.py
|
1
|
5080
|
# Copyright 2021, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Experimental federated learning components for JAX."""
import collections
import jax
import numpy as np
from tensorflow_federated.experimental.python.core.api import computations as experimental_computations
from tensorflow_federated.python.common_libs import py_typecheck
from tensorflow_federated.python.common_libs import structure
from tensorflow_federated.python.core.api import computations
from tensorflow_federated.python.core.impl.federated_context import intrinsics
from tensorflow_federated.python.core.impl.types import computation_types
from tensorflow_federated.python.core.impl.types import placements
from tensorflow_federated.python.core.impl.types import type_conversions
from tensorflow_federated.python.core.templates import iterative_process
# TODO(b/175888145): Evolve this to reach parity with TensorFlow-specific helper
# and eventually unify the two.
def build_jax_federated_averaging_process(batch_type, model_type, loss_fn,
step_size):
"""Constructs an iterative process that implements simple federated averaging.
Args:
batch_type: An instance of `tff.Type` that represents the type of a single
batch of data to use for training. This type should be constructed with
standard Python containers (such as `collections.OrderedDict`) of the sort
that are expected as parameters to `loss_fn`.
model_type: An instance of `tff.Type` that represents the type of the model.
Similarly to `batch_size`, this type should be constructed with standard
Python containers (such as `collections.OrderedDict`) of the sort that are
expected as parameters to `loss_fn`.
loss_fn: A loss function for the model. Must be a Python function that takes
two parameters, one of them being the model, and the other being a single
batch of data (with types matching `batch_type` and `model_type`).
step_size: The step size to use during training (an `np.float32`).
Returns:
An instance of `tff.templates.IterativeProcess` that implements federated
training in JAX.
"""
batch_type = computation_types.to_type(batch_type)
model_type = computation_types.to_type(model_type)
py_typecheck.check_type(batch_type, computation_types.Type)
py_typecheck.check_type(model_type, computation_types.Type)
py_typecheck.check_callable(loss_fn)
py_typecheck.check_type(step_size, np.float)
def _tensor_zeros(tensor_type):
return jax.numpy.zeros(
tensor_type.shape.dims, dtype=tensor_type.dtype.as_numpy_dtype)
@experimental_computations.jax_computation
def _create_zero_model():
model_zeros = structure.map_structure(_tensor_zeros, model_type)
return type_conversions.type_to_py_container(model_zeros, model_type)
@computations.federated_computation
def _create_zero_model_on_server():
return intrinsics.federated_eval(_create_zero_model, placements.SERVER)
def _apply_update(model_param, param_delta):
return model_param - step_size * param_delta
@experimental_computations.jax_computation(model_type, batch_type)
def _train_on_one_batch(model, batch):
params = structure.flatten(structure.from_container(model, recursive=True))
grads = structure.flatten(
structure.from_container(jax.api.grad(loss_fn)(model, batch)))
updated_params = [_apply_update(x, y) for (x, y) in zip(params, grads)]
trained_model = structure.pack_sequence_as(model_type, updated_params)
return type_conversions.type_to_py_container(trained_model, model_type)
local_dataset_type = computation_types.SequenceType(batch_type)
@computations.federated_computation(model_type, local_dataset_type)
def _train_on_one_client(model, batches):
return intrinsics.sequence_reduce(batches, model, _train_on_one_batch)
@computations.federated_computation(
computation_types.FederatedType(model_type, placements.SERVER),
computation_types.FederatedType(local_dataset_type, placements.CLIENTS))
def _train_one_round(model, federated_data):
locally_trained_models = intrinsics.federated_map(
_train_on_one_client,
collections.OrderedDict([('model',
intrinsics.federated_broadcast(model)),
('batches', federated_data)]))
return intrinsics.federated_mean(locally_trained_models)
return iterative_process.IterativeProcess(
initialize_fn=_create_zero_model_on_server, next_fn=_train_one_round)
|
apache-2.0
| 357,904,846,135,631,740
| 46.037037
| 103
| 0.745276
| false
| 3.73255
| false
| false
| false
|
kcl-ddh/chopin-online
|
ocve/imagetools.py
|
1
|
5457
|
import re
import urllib
from ocve.models import PageLegacy, SourceLegacy
from ocve.models import PageImage
from django.conf import settings
from django.utils.html import escape
import logging
__author__ = 'Elliot'
logger = logging.getLogger(__name__)
def buildOldPath(pi):
p = pi.page
pl = PageLegacy.objects.get(pageimage=pi)
oldPath = 'ERRor'
if pl.cfeoKey > 0:
path = re.search("(.*?)_(.*)", pl.filename)
if path is not None:
oldPath = path.group(1) + "/" + path.group(1) + "_" + path.group(2) + ".jp2"
elif pl.storageStructure is not None:
path = re.search("(\d+)\/.*?\/(.*)", pl.storageStructure)
if path is not None:
sl = SourceLegacy.objects.get(source=p.sourcecomponent.source)
oldPath = path.group(1) + "/" + str(sl.witnessKey) + "/" + path.group(2) + ".jp2"
return oldPath
#Use the iip server to get width/height of an image
#Param full url to the image in iip format
def getImageDimensions(fullurl):
meta = urllib.urlopen(fullurl+ '&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number')
dimensions={'width':0,'height':0}
for line in meta.readlines():
m = re.search("Max-size:\s*(\d+)\s*(\d+)", line)
if m is not None:
width = int(m.group(1))
height = int(m.group(2))
dimensions['width']=width
dimensions['height']=height
if dimensions['width'] == 0:
logger.error('Image at '+fullurl+' not found')
return dimensions
#Uses iip server to make sure dimensions in db correct
#pi=pageimage to check
def verifyImageDimensions(pi, oldPath):
found=0
try:
fullurl = settings.IMAGE_SERVER_URL + '?FIF='
fullurl = fullurl + oldPath
dimensions=getImageDimensions(fullurl)
if dimensions['width'] >0:
if pi.width != dimensions['width'] or pi.height != dimensions['height']:
pi.width = dimensions['width']
pi.height= dimensions['height']
pi.permissionnote = ''
pl=PageLegacy.objects.filter(pageimage=pi)
if pl.count() >0:
if pl[0].jp2 == 'UNVERIFIED':
pl[0].jp2=oldPath
pl[0].save()
pi.save()
found=1
except IOError:
print("Could not contact server at "+fullurl)
return found
#Request image information from the iip serv
#to verify images paths are correct
#http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/ocvejp2-proc/20/1/01TP/20-1-BH_GBLbl_p01TP.jp2&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number
#iipsrv.fcgi?FIF=jp2/ocvejp2-proc/20/1/01TP/20-1-BH_GBLbl_p01TP.jp2&obj=IIP,1.0&obj=Max-size&obj=Tile-size&obj=Resolution-number
#jp2/ocvejp2-proc/20/1/02B/20-1-BH_GBLbl_p02B.jp2
def verifyImagesViaIIP():
log = '<html><head>IMAGE REPORT</head><body><ul>'
fullurl = settings.IMAGE_SERVER_URL + '?FIF=jp2/' #'http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/'
allpages = PageImage.objects.filter(pagelegacy__jp2='UNVERIFIED')
count=0
for pi in allpages:
#build old path
oldPath = buildOldPath(pi)
fullurl = settings.IMAGE_SERVER_URL + '?FIF=jp2/' #'http://ocve2-stg.cch.kcl.ac.uk/iip/iipsrv.fcgi?FIF=jp2/'
#Request iamge informaiton from iip
pl = PageLegacy.objects.get(pageimage=pi)
if pl.cfeoKey > 0:
fullurl = 'jp2/cfeojp2-proc/' + oldPath + '&obj=IIP,1.0&obj=Max-size'
else:
fullurl = 'jp2/ocvejp2-proc/' + oldPath + '&obj=IIP,1.0&obj=Max-size'
meta = urllib.urlopen(fullurl)
# found=0
# for line in meta.readlines():
# m = re.search("Max-size:\s*(\d+)\s*(\d+)", line)
# if m is not None:
# found=1
verifyImageDimensions(pi, oldPath)
if found is 0:
found=0
if pl.cfeoKey > 0:
#Check the _loose directory, they might be in there
pi.width=0
verifyImageDimensions(pi,'/_loose/'+pl.filename+'.jp2')
if pi.width>0:
pl.jp2='cfeojp2-proc/_loose/'+pl.filename+'.jp2'
if pl.storageStructure is None:
pl.storageStructure=''
pl.save()
found=1
#log=log+'<li>FOUND IN _loose: '+s.label+': '+pi.page.label+' key:'+str(pi.id)+' at path '+oldPath+':'+pl.filename+'</li>'
if found is 0:
#Image not found, write to log
s=pi.page.sourcecomponent.source
print str(pi.id)+' not found'
try:
log=log+'<li>'+s.label+': '+pi.page.label+' key:'+str(pi.id)+' at path '+oldPath+':'+pl.filename+'</li>'
except TypeError:
log=log+'<li> key:'+str(pi.id)+' </li>'
count+=1
else:
#Record correct path in pagelegacy.jp2
if pl.cfeoKey > 0:
pl.jp2='cfeojp2-proc/' + oldPath
else:
pl.jp2='ocvejp2-proc/' + oldPath
if pl.storageStructure is None:
pl.storageStructure=''
pl.save()
return log + '</ul><h2>Total: ' + str(count) + '</h2></body>'
|
gpl-3.0
| -8,405,203,046,656,316,000
| 40.302326
| 163
| 0.550852
| false
| 3.309278
| false
| false
| false
|
bhupennewalkar1337/erpnext
|
erpnext/utilities/transaction_base.py
|
1
|
5809
|
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
import frappe.share
from frappe import _
from frappe.utils import cstr, now_datetime, cint, flt
from erpnext.controllers.status_updater import StatusUpdater
class UOMMustBeIntegerError(frappe.ValidationError): pass
class TransactionBase(StatusUpdater):
def load_notification_message(self):
dt = self.doctype.lower().replace(" ", "_")
if int(frappe.db.get_value("Notification Control", None, dt) or 0):
self.set("__notification_message",
frappe.db.get_value("Notification Control", None, dt + "_message"))
def validate_posting_time(self):
if not self.posting_time:
self.posting_time = now_datetime().strftime('%H:%M:%S')
def add_calendar_event(self, opts, force=False):
if cstr(self.contact_by) != cstr(self._prev.contact_by) or \
cstr(self.contact_date) != cstr(self._prev.contact_date) or force:
self.delete_events()
self._add_calendar_event(opts)
def delete_events(self):
events = frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (self.doctype, self.name))
if events:
frappe.db.sql("delete from `tabEvent` where name in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
frappe.db.sql("delete from `tabEvent Role` where parent in (%s)"
.format(", ".join(['%s']*len(events))), tuple(events))
def _add_calendar_event(self, opts):
opts = frappe._dict(opts)
if self.contact_date:
event = frappe.get_doc({
"doctype": "Event",
"owner": opts.owner or self.owner,
"subject": opts.subject,
"description": opts.description,
"starts_on": self.contact_date,
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
})
event.insert(ignore_permissions=True)
if frappe.db.exists("User", self.contact_by):
frappe.share.add("Event", event.name, self.contact_by,
flags={"ignore_share_permission": True})
def validate_uom_is_integer(self, uom_field, qty_fields):
validate_uom_is_integer(self, uom_field, qty_fields)
def validate_with_previous_doc(self, ref):
for key, val in ref.items():
is_child = val.get("is_child_table")
ref_doc = {}
item_ref_dn = []
for d in self.get_all_children(self.doctype + " Item"):
ref_dn = d.get(val["ref_dn_field"])
if ref_dn:
if is_child:
self.compare_values({key: [ref_dn]}, val["compare_fields"], d)
if ref_dn not in item_ref_dn:
item_ref_dn.append(ref_dn)
elif not val.get("allow_duplicate_prev_row_id"):
frappe.throw(_("Duplicate row {0} with same {1}").format(d.idx, key))
elif ref_dn:
ref_doc.setdefault(key, [])
if ref_dn not in ref_doc[key]:
ref_doc[key].append(ref_dn)
if ref_doc:
self.compare_values(ref_doc, val["compare_fields"])
def compare_values(self, ref_doc, fields, doc=None):
for reference_doctype, ref_dn_list in ref_doc.items():
for reference_name in ref_dn_list:
prevdoc_values = frappe.db.get_value(reference_doctype, reference_name,
[d[0] for d in fields], as_dict=1)
if not prevdoc_values:
frappe.throw(_("Invalid reference {0} {1}").format(reference_doctype, reference_name))
for field, condition in fields:
if prevdoc_values[field] is not None:
self.validate_value(field, condition, prevdoc_values[field], doc)
def validate_rate_with_reference_doc(self, ref_details):
for ref_dt, ref_dn_field, ref_link_field in ref_details:
for d in self.get("items"):
if d.get(ref_link_field):
ref_rate = frappe.db.get_value(ref_dt + " Item", d.get(ref_link_field), "rate")
if abs(flt(d.rate - ref_rate, d.precision("rate"))) >= .01:
frappe.throw(_("Row #{0}: Rate must be same as {1}: {2} ({3} / {4}) ")
.format(d.idx, ref_dt, d.get(ref_dn_field), d.rate, ref_rate))
def get_link_filters(self, for_doctype):
if hasattr(self, "prev_link_mapper") and self.prev_link_mapper.get(for_doctype):
fieldname = self.prev_link_mapper[for_doctype]["fieldname"]
values = filter(None, tuple([item.as_dict()[fieldname] for item in self.items]))
if values:
ret = {
for_doctype : {
"filters": [[for_doctype, "name", "in", values]]
}
}
else:
ret = None
else:
ret = None
return ret
def delink_advance_entries(self, linked_doc_name):
total_allocated_amount = 0
for adv in self.advances:
consider_for_total_advance = True
if adv.reference_name == linked_doc_name:
frappe.db.sql("""delete from `tab{0} Advance`
where name = %s""".format(self.doctype), adv.name)
consider_for_total_advance = False
if consider_for_total_advance:
total_allocated_amount += flt(adv.allocated_amount, adv.precision("allocated_amount"))
frappe.db.set_value(self.doctype, self.name, "total_advance", total_allocated_amount, update_modified=False)
def delete_events(ref_type, ref_name):
frappe.delete_doc("Event", frappe.db.sql_list("""select name from `tabEvent`
where ref_type=%s and ref_name=%s""", (ref_type, ref_name)), for_reload=True)
def validate_uom_is_integer(doc, uom_field, qty_fields, child_dt=None):
if isinstance(qty_fields, basestring):
qty_fields = [qty_fields]
distinct_uoms = list(set([d.get(uom_field) for d in doc.get_all_children()]))
integer_uoms = filter(lambda uom: frappe.db.get_value("UOM", uom,
"must_be_whole_number") or None, distinct_uoms)
if not integer_uoms:
return
for d in doc.get_all_children(parenttype=child_dt):
if d.get(uom_field) in integer_uoms:
for f in qty_fields:
if d.get(f):
if cint(d.get(f))!=d.get(f):
frappe.throw(_("Quantity cannot be a fraction in row {0}").format(d.idx), UOMMustBeIntegerError)
|
gpl-3.0
| -7,810,579,147,257,779,000
| 34.638037
| 110
| 0.667413
| false
| 2.968319
| false
| false
| false
|
Patola/patolascripts
|
cam.py
|
1
|
7929
|
#!/usr/bin/python
from gi.repository import Gtk
import sys,re,os,time
import urllib2
import subprocess,signal
bitRates={
"QVGA":(1,[128,256,384,512]),
"VGA":(0,[128,256,384,512,640,768,896,1024]),
"720P":(3,[128,256,384,512,640,768,896,1024,1280,1536,1792,2048,2560,3072,3584,4096])
}
frameRates=range(1,31)
# default values
wCamDefaultPort=81
wCamDefaultMode="VGA"
wCamDefaultBitRate=1024
wCamDefaultFrameRate=15
wCamAddress="10.0.0.54" # wCamAddress is mandatory
wCamDefaultUser="admin"
wCamDefaultPassWord="888888"
wCamPort = wCamDefaultPort
wCamMode=wCamDefaultMode
wCamBitRate=wCamDefaultBitRate
wCamFrameRate=wCamDefaultFrameRate
wCamAddress=None # wCamAddress is mandatory
wCamUser=wCamDefaultUser
wCamPassWord=wCamDefaultPassWord
wCamTitle=None
mplayerPid=None
def usage():
print "Usage : %s <OPTIONS>" % (sys.argv[0])
print "-h, --help show this help"
print "-u, --user=<user> set user ( default is [%s] )" % (wCamDefaultUser)
print "-x, --password=<password> set password ( default is [%s] )" % (wCamDefaultPassWord)
print "-a, --address=<webcam address> set webcam address e.g -i 192.168.0.253 or -i=starcam.myhome.lan ( mandatory )"
print "-p, --port=<webcam ip address> set webcam port e.g. -p 81 (default is [%s])" % (wCamDefaultPort)
print "-m, --mode=<mode> set output resolution: allowed values: QVGA, VGA, 720P e.g. -m VGA (default is [%s])" % (wCamDefaultMode)
print "-b, --bitrate=<bitrate> set bitrate: allowed values depends from mode: (default is [%s])" % (wCamDefaultBitRate)
for mode,rates in bitRates.iteritems():
print " for %s: %s" % (mode,rates[1])
print "-f, --framerate=<fps> set framerate: allowed values %s e.g -f 25 (default is [%s])" % (frameRates,wCamDefaultFrameRate)
sys.exit(1)
def kill_child_processes(parent_pid,sig=signal.SIGTERM):
cmd="ps -o pid --ppid %d --noheaders" % (parent_pid)
print "cmd [%s]" % (cmd)
ps_command = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE)
ps_output = ps_command.stdout.read()
retcode = ps_command.wait()
if retcode == 0:
for pid_str in ps_output.split("\n")[:-1]:
kill_child_processes (int(pid_str))
print "child kill pid %s" % (pid_str)
try:
os.kill(int(pid_str), sig)
except:
pass
else:
try:
os.kill(parent_pid, sig)
except:
pass
# http://starcam/camera_control.cgi?loginuse=admin&loginpas=888888¶m=13&value=512&140094356 38360.6156135550700128&_=140094356 3838
def httpGet(uri,params):
import random
import time
# params="%s&%f" % (params,time.time()*1000+random.random())
url="http://%s:%s/%s?loginuse=%s&loginpas=%s&%s" % (wCamAddress,wCamPort,uri,wCamUser,wCamPassWord,params)
print url
sock=urllib2.urlopen (url,None,4)
response = sock.read()
sock.close()
print response
class CamWindow(Gtk.Window):
def __init__(self):
Gtk.Window.__init__(self, title="CAM control")
self.set_border_width(10)
self.set_title(wCamTitle)
# http://python-gtk-3-tutorial.readthedocs.org/en/latest/layout.html#grid
grid = Gtk.Grid()
self.add(grid)
top = Gtk.Button("Up")
top.connect("pressed", self._pressed,"decoder_control.cgi","command=0&onestep=0")
top.connect("released", self._released,"decoder_control.cgi","command=1&onestep=0")
grid.attach(top, 1, 0, 1, 1)
left = Gtk.Button("Left")
left.connect("pressed", self._pressed,"decoder_control.cgi","command=4&onestep=0")
left.connect("released", self._released,"decoder_control.cgi","command=5&onestep=0")
grid.attach(left, 0, 1, 1, 1)
right = Gtk.Button("Right")
right.connect("pressed", self._pressed,"decoder_control.cgi","command=6&onestep=0")
right.connect("released", self._released,"decoder_control.cgi","command=7&onestep=0")
grid.attach(right, 2, 1, 1, 1)
bottom = Gtk.Button("Down")
bottom.connect("pressed", self._pressed,"decoder_control.cgi","command=2&onestep=0")
bottom.connect("released", self._released,"decoder_control.cgi","command=3&onestep=0")
grid.attach(bottom, 1, 2, 1, 1)
zoomout = Gtk.Button("Zoom Out")
zoomout.connect("pressed", self._pressed,"camera_control.cgi","param=17&value=1")
zoomout.connect("released", self._released,"camera_control.cgi","param=17&value=0")
grid.attach(zoomout, 3, 2, 1, 1)
zoomin = Gtk.Button("Zoom In")
zoomin.connect("pressed", self._pressed,"camera_control.cgi","param=18&value=1")
zoomin.connect("released", self._released,"camera_control.cgi","param=18&value=0")
grid.attach(zoomin, 3, 0, 1, 1)
def _pressed(self, button,uri,params):
print("press")
httpGet (uri,params)
def _released(self, button,uri,params):
print("release")
httpGet (uri,params)
def on_close_clicked(self, button):
print("Closing application")
Gtk.main_quit()
def go():
win = CamWindow()
win.connect("delete-event", Gtk.main_quit)
win.show_all()
Gtk.main()
if __name__ == '__main__':
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "a:p:m:b:f:h", ["help", "address=","port=","mode=","bitrate=","framerate="])
except getopt.GetoptError:
# print help information and exit:
usage()
sys.exit(2)
verbose = False
for o, a in opts:
if o in ( "-u","--user"):
wCamUser = a
if o in ( "-x","--password"):
wCamPassWord = a
if o in ( "-a","--address"):
wCamAddress = a
if o in ( "-p","--port"):
wCamPort = a
if o in ( "-m","--mode"):
wCamMode = a
if o in ( "-b","--bitrate"):
wCamBitRate = int(a)
if o in ( "-f","--framerate"):
wCamFrameRate = int(a)
if o in ( "-v","--verbose"):
verbose = a
if o in ("-h","--help"):
usage()
if (not wCamAddress):
usage()
if verbose:
print "Verbose is [%d]" % (verbose)
if wCamMode not in bitRates.keys():
print "Invalid Mode [%s]" % (wCamMode)
usage()
else:
if not wCamBitRate in bitRates[wCamMode][1]:
print "Invalid bitRate [%s] for mode [%s]" % (wCamBitRate, wCamMode)
usage()
if wCamFrameRate not in frameRates:
print "Invalid frameRate [%s]" % (wCamFrameRate)
usage()
wCamTitle="%s:%s" % (wCamAddress,wCamPort)
print "Using user %s:%s %s:%s, mode %s, bitrate %s, framerate %s" % (wCamUser,wCamPassWord,wCamAddress,wCamPort,wCamMode,wCamBitRate,wCamFrameRate)
# set framerate
httpGet ("camera_control.cgi","param=6&value=%d" % (wCamFrameRate))
time.sleep(1)
#httpGet ("get_camera_params.cgi","")
# set video format
httpGet ("camera_control.cgi","param=0&value=%s" % (bitRates[wCamMode][0]))
time.sleep(1)
httpGet ("get_camera_params.cgi","")
streamingUrl="http://%s:%s/livestream.cgi?user=%s&pwd=%s&streamid=0&audio=0&filename=" % (wCamAddress,wCamPort,wCamUser,wCamPassWord)
cmd="curl -s \"%s\" | mplayer -title \"%s\" -quiet -nocache -vc ffh264 -demuxer h264es -fps %s -noextbased -" % (streamingUrl,wCamTitle,wCamFrameRate)
mplayerPid=os.fork()
print "player pid %d" % (mplayerPid)
if not mplayerPid:
os.system (cmd)
else:
time.sleep(4)
# set bitrate
httpGet ("camera_control.cgi","param=13&value=%d" % (wCamBitRate))
go()
kill_child_processes (mplayerPid)
#os.kill (mplayerPid,signal.SIGTERM)
|
apache-2.0
| 4,252,622,409,302,086,000
| 29.496154
| 154
| 0.599319
| false
| 3.197177
| false
| false
| false
|
JackDanger/sentry
|
src/sentry/models/project.py
|
1
|
10877
|
"""
sentry.models.project
~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import, print_function
import logging
import six
import warnings
from bitfield import BitField
from django.conf import settings
from django.db import models
from django.db.models import F
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from sentry.app import locks
from sentry.constants import ObjectStatus
from sentry.db.models import (
BaseManager, BoundedPositiveIntegerField, FlexibleForeignKey, Model,
sane_repr
)
from sentry.db.models.utils import slugify_instance
from sentry.utils.colors import get_hashed_color
from sentry.utils.http import absolute_uri
from sentry.utils.retries import TimedRetryPolicy
# TODO(dcramer): pull in enum library
ProjectStatus = ObjectStatus
class ProjectManager(BaseManager):
# TODO(dcramer): we might want to cache this per user
def get_for_user(self, team, user, scope=None, _skip_team_check=False):
from sentry.models import Team
if not (user and user.is_authenticated()):
return []
if not _skip_team_check:
team_list = Team.objects.get_for_user(
organization=team.organization,
user=user,
scope=scope,
)
try:
team = team_list[team_list.index(team)]
except ValueError:
logging.info('User does not have access to team: %s', team.id)
return []
base_qs = self.filter(
team=team,
status=ProjectStatus.VISIBLE,
)
project_list = []
for project in base_qs:
project.team = team
project_list.append(project)
return sorted(project_list, key=lambda x: x.name.lower())
class Project(Model):
"""
Projects are permission based namespaces which generally
are the top level entry point for all data.
"""
__core__ = True
slug = models.SlugField(null=True)
name = models.CharField(max_length=200)
forced_color = models.CharField(max_length=6, null=True, blank=True)
organization = FlexibleForeignKey('sentry.Organization')
team = FlexibleForeignKey('sentry.Team')
public = models.BooleanField(default=False)
date_added = models.DateTimeField(default=timezone.now)
status = BoundedPositiveIntegerField(default=0, choices=(
(ObjectStatus.VISIBLE, _('Active')),
(ObjectStatus.PENDING_DELETION, _('Pending Deletion')),
(ObjectStatus.DELETION_IN_PROGRESS, _('Deletion in Progress')),
), db_index=True)
# projects that were created before this field was present
# will have their first_event field set to date_added
first_event = models.DateTimeField(null=True)
flags = BitField(flags=(
('has_releases', 'This Project has sent release data'),
), default=0, null=True)
objects = ProjectManager(cache_fields=[
'pk',
'slug',
])
class Meta:
app_label = 'sentry'
db_table = 'sentry_project'
unique_together = (('team', 'slug'), ('organization', 'slug'))
__repr__ = sane_repr('team_id', 'name', 'slug')
def __unicode__(self):
return u'%s (%s)' % (self.name, self.slug)
def next_short_id(self):
from sentry.models import Counter
return Counter.increment(self)
def save(self, *args, **kwargs):
if not self.slug:
lock = locks.get('slug:project', duration=5)
with TimedRetryPolicy(10)(lock.acquire):
slugify_instance(self, self.name, organization=self.organization)
super(Project, self).save(*args, **kwargs)
else:
super(Project, self).save(*args, **kwargs)
def get_absolute_url(self):
return absolute_uri('/{}/{}/'.format(self.organization.slug, self.slug))
def merge_to(self, project):
from sentry.models import (
Group, GroupTagValue, Event, TagValue
)
if not isinstance(project, Project):
project = Project.objects.get_from_cache(pk=project)
for group in Group.objects.filter(project=self):
try:
other = Group.objects.get(
project=project,
)
except Group.DoesNotExist:
group.update(project=project)
GroupTagValue.objects.filter(
project_id=self.id,
group_id=group.id,
).update(project_id=project.id)
else:
Event.objects.filter(
group_id=group.id,
).update(group_id=other.id)
for obj in GroupTagValue.objects.filter(group=group):
obj2, created = GroupTagValue.objects.get_or_create(
project_id=project.id,
group_id=group.id,
key=obj.key,
value=obj.value,
defaults={'times_seen': obj.times_seen}
)
if not created:
obj2.update(times_seen=F('times_seen') + obj.times_seen)
for fv in TagValue.objects.filter(project=self):
TagValue.objects.get_or_create(project=project, key=fv.key, value=fv.value)
fv.delete()
self.delete()
def is_internal_project(self):
for value in (settings.SENTRY_FRONTEND_PROJECT, settings.SENTRY_PROJECT):
if six.text_type(self.id) == six.text_type(value) or six.text_type(self.slug) == six.text_type(value):
return True
return False
def get_tags(self, with_internal=True):
from sentry.models import TagKey
if not hasattr(self, '_tag_cache'):
tags = self.get_option('tags', None)
if tags is None:
tags = [
t for t in TagKey.objects.all_keys(self)
if with_internal or not t.startswith('sentry:')
]
self._tag_cache = tags
return self._tag_cache
# TODO: Make these a mixin
def update_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.set_value(self, *args, **kwargs)
def get_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.get_value(self, *args, **kwargs)
def delete_option(self, *args, **kwargs):
from sentry.models import ProjectOption
return ProjectOption.objects.unset_value(self, *args, **kwargs)
@property
def callsign(self):
return self.slug.upper()
@property
def color(self):
if self.forced_color is not None:
return '#%s' % self.forced_color
return get_hashed_color(self.callsign or self.slug)
@property
def member_set(self):
from sentry.models import OrganizationMember
return self.organization.member_set.filter(
id__in=OrganizationMember.objects.filter(
organizationmemberteam__is_active=True,
organizationmemberteam__team=self.team,
).values('id'),
user__is_active=True,
).distinct()
def has_access(self, user, access=None):
from sentry.models import AuthIdentity, OrganizationMember
warnings.warn('Project.has_access is deprecated.', DeprecationWarning)
queryset = self.member_set.filter(user=user)
if access is not None:
queryset = queryset.filter(type__lte=access)
try:
member = queryset.get()
except OrganizationMember.DoesNotExist:
return False
try:
auth_identity = AuthIdentity.objects.get(
auth_provider__organization=self.organization_id,
user=member.user_id,
)
except AuthIdentity.DoesNotExist:
return True
return auth_identity.is_valid(member)
def get_audit_log_data(self):
return {
'id': self.id,
'slug': self.slug,
'name': self.name,
'status': self.status,
'public': self.public,
}
def get_full_name(self):
if self.team.name not in self.name:
return '%s %s' % (self.team.name, self.name)
return self.name
def get_notification_recipients(self, user_option):
from sentry.models import UserOption
alert_settings = dict(
(o.user_id, int(o.value))
for o in UserOption.objects.filter(
project=self,
key=user_option,
)
)
disabled = set(u for u, v in six.iteritems(alert_settings) if v == 0)
member_set = set(self.member_set.exclude(
user__in=disabled,
).values_list('user', flat=True))
# determine members default settings
members_to_check = set(u for u in member_set if u not in alert_settings)
if members_to_check:
disabled = set((
uo.user_id for uo in UserOption.objects.filter(
key='subscribe_by_default',
user__in=members_to_check,
)
if uo.value == '0'
))
member_set = [x for x in member_set if x not in disabled]
return member_set
def get_mail_alert_subscribers(self):
user_ids = self.get_notification_recipients('mail:alert')
if not user_ids:
return []
from sentry.models import User
return list(User.objects.filter(id__in=user_ids))
def is_user_subscribed_to_mail_alerts(self, user):
from sentry.models import UserOption
is_enabled = UserOption.objects.get_value(
user,
'mail:alert',
project=self
)
if is_enabled is None:
is_enabled = UserOption.objects.get_value(
user,
'subscribe_by_default',
'1'
) == '1'
else:
is_enabled = bool(is_enabled)
return is_enabled
def is_user_subscribed_to_workflow(self, user):
from sentry.models import UserOption, UserOptionValue
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
project=self
)
if opt_value is None:
opt_value = UserOption.objects.get_value(
user,
'workflow:notifications',
UserOptionValue.all_conversations
)
return opt_value == UserOptionValue.all_conversations
|
bsd-3-clause
| -3,192,964,341,637,174,300
| 31.861027
| 114
| 0.583249
| false
| 4.188294
| false
| false
| false
|
tovrstra/sympy
|
sympy/matrices/matrices.py
|
1
|
67422
|
import warnings
from sympy import Basic, Symbol, Integer
from sympy.core import sympify
from sympy.core.basic import S
from sympy.polys import Poly, roots, cancel
from sympy.simplify import simplify
from sympy.utilities import any
# from sympy.printing import StrPrinter /cyclic/
import random
class NonSquareMatrixException(Exception):
pass
class ShapeError(ValueError):
"""Wrong matrix shape"""
pass
class MatrixError(Exception):
pass
def _dims_to_nm(dims):
"""Converts dimensions tuple (or any object with length 1 or 2) or scalar
in dims to matrix dimensions n and m."""
try:
l = len(dims)
except TypeError:
dims = (dims,)
l = 1
# This will work for nd-array too when they are added to sympy.
try:
for dim in dims:
assert (dim > 0)
except AssertionError:
raise ValueError("Matrix dimensions should be positive integers!")
if l == 2:
n, m = map(int, dims)
elif l == 1:
n = m = int(dims[0])
else:
raise ValueError("Matrix dimensions should be a two-element tuple of ints or a single int!")
return n, m
def _iszero(x):
return x == 0
class DeferredVector(object):
def __init__(self,name):
self.name=name
def __getitem__(self,i):
component_name = '%s[%d]'%(self.name,i)
return Symbol(component_name)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
class Matrix(object):
# Added just for numpy compatibility
# TODO: investigate about __array_priority__
__array_priority__ = 10.0
def __init__(self, *args):
"""
Matrix can be constructed with values or a rule.
>>> from sympy import Matrix, I
>>> Matrix( ((1,2+I), (3,4)) ) #doctest:+NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> Matrix(2, 2, lambda i,j: (i+1)*j ) #doctest:+NORMALIZE_WHITESPACE
[0, 1]
[0, 2]
"""
if len(args) == 3 and callable(args[2]):
operation = args[2]
self.rows = int(args[0])
self.cols = int(args[1])
self.mat = []
for i in range(self.rows):
for j in range(self.cols):
self.mat.append(sympify(operation(i, j)))
elif len(args)==3 and isinstance(args[2], (list, tuple)):
self.rows=args[0]
self.cols=args[1]
mat = args[2]
if len(mat) != self.rows*self.cols:
raise MatrixError('List length should be equal to rows*columns')
self.mat = map(lambda i: sympify(i), mat)
elif len(args) == 1:
mat = args[0]
if isinstance(mat, Matrix):
self.rows = mat.rows
self.cols = mat.cols
self.mat = mat[:]
return
elif hasattr(mat, "__array__"):
# NumPy array or matrix or some other object that implements
# __array__. So let's first use this method to get a
# numpy.array() and then make a python list out of it.
arr = mat.__array__()
if len(arr.shape) == 2:
self.rows, self.cols = arr.shape[0], arr.shape[1]
self.mat = map(lambda i: sympify(i), arr.ravel())
return
elif len(arr.shape) == 1:
self.rows, self.cols = 1, arr.shape[0]
self.mat = [0]*self.cols
for i in xrange(len(arr)):
self.mat[i] = sympify(arr[i])
return
else:
raise NotImplementedError("Sympy supports just 1D and 2D matrices")
elif not isinstance(mat, (list, tuple)):
raise TypeError("Matrix constructor doesn't accept %s as input" % str(type(mat)))
self.rows = len(mat)
if len(mat) != 0:
if not isinstance(mat[0], (list, tuple)):
self.cols = 1
self.mat = map(lambda i: sympify(i), mat)
return
self.cols = len(mat[0])
else:
self.cols = 0
self.mat = []
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
elif len(args) == 0:
# Empty Matrix
self.rows = self.cols = 0
self.mat = []
else:
# TODO: on 0.7.0 delete this and uncomment the last line
mat = args
if not isinstance(mat[0], (list, tuple)):
# make each element a singleton
mat = [ [element] for element in mat ]
warnings.warn("Deprecated constructor, use brackets: Matrix(%s)" % str(mat))
self.rows=len(mat)
self.cols=len(mat[0])
self.mat=[]
for j in xrange(self.rows):
assert len(mat[j])==self.cols
for i in xrange(self.cols):
self.mat.append(sympify(mat[j][i]))
#raise TypeError("Data type not understood")
def key2ij(self,key):
"""Converts key=(4,6) to 4,6 and ensures the key is correct."""
if not (isinstance(key,(list, tuple)) and len(key) == 2):
raise TypeError("wrong syntax: a[%s]. Use a[i,j] or a[(i,j)]"
%repr(key))
i,j=key
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
print self.rows, " ", self.cols
raise IndexError("Index out of range: a[%s]"%repr(key))
return i,j
def transpose(self):
"""
Matrix transposition.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.transpose() #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 + I, 4]
>>> m.T == m.transpose()
True
"""
a = [0]*self.cols*self.rows
for i in xrange(self.cols):
a[i*self.rows:(i+1)*self.rows] = self.mat[i::self.cols]
return Matrix(self.cols,self.rows,a)
T = property(transpose,None,None,"Matrix transposition.")
def conjugate(self):
"""By-element conjugation."""
out = Matrix(self.rows,self.cols,
lambda i,j: self[i,j].conjugate())
return out
C = property(conjugate,None,None,"By-element conjugation.")
@property
def H(self):
"""
Hermite conjugation.
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m.H #doctest: +NORMALIZE_WHITESPACE
[ 1, 3]
[2 - I, 4]
"""
out = self.T.C
return out
@property
def D(self):
"""Dirac conjugation."""
from sympy.physics.matrices import mgamma
out = self.H * mgamma(0)
return out
def __getitem__(self,key):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]
3
>>> m.H[1,0]
2 - I
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
return self.submatrix(key)
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
return self.mat[i*self.cols + j]
else:
# row-wise decomposition of matrix
if type(key) is slice:
return self.mat[key]
else:
k = a2idx(key)
if k is not None:
return self.mat[k]
raise IndexError("Invalid index: a[%s]" % repr(key))
def __setitem__(self, key, value):
"""
>>> from sympy import Matrix, I
>>> m=Matrix(((1,2+I),(3,4)))
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[3, 4]
>>> m[1,0]=9
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 2 + I]
[9, 4]
"""
if type(key) is tuple:
i, j = key
if type(i) is slice or type(j) is slice:
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
return
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
return
else:
# a2idx inlined
try:
i = i.__int__()
except AttributeError:
try:
i = i.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
# a2idx inlined
try:
j = j.__int__()
except AttributeError:
try:
j = j.__index__()
except AttributeError:
raise IndexError("Invalid index a[%r]" % (key,))
if not (i>=0 and i<self.rows and j>=0 and j < self.cols):
raise IndexError("Index out of range: a[%s]" % (key,))
else:
self.mat[i*self.cols + j] = sympify(value)
return
else:
# row-wise decomposition of matrix
if type(key) is slice:
raise IndexError("Vector slices not implemented yet.")
else:
k = a2idx(key)
if k is not None:
self.mat[k] = sympify(value)
return
raise IndexError("Invalid index: a[%s]"%repr(key))
def __array__(self):
return matrix2numpy(self)
def tolist(self):
"""
Return the Matrix converted in a python list.
>>> from sympy import Matrix
>>> m=Matrix(3, 3, range(9))
>>> m
[0, 1, 2]
[3, 4, 5]
[6, 7, 8]
>>> m.tolist()
[[0, 1, 2], [3, 4, 5], [6, 7, 8]]
"""
ret = [0]*self.rows
for i in xrange(self.rows):
ret[i] = self.mat[i*self.cols:(i+1)*self.cols]
return ret
def copyin_matrix(self, key, value):
rlo, rhi = self.slice2bounds(key[0], self.rows)
clo, chi = self.slice2bounds(key[1], self.cols)
assert value.rows == rhi - rlo and value.cols == chi - clo
for i in range(value.rows):
for j in range(value.cols):
self[i+rlo, j+clo] = sympify(value[i,j])
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, Matrix(value))
def hash(self):
"""Compute a hash every time, because the matrix elements
could change."""
return hash(self.__str__() )
@property
def shape(self):
return (self.rows, self.cols)
def __rmul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(a,self)
out = Matrix(self.rows,self.cols,map(lambda i: a*i,self.mat))
return out
def expand(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.expand(), self.mat))
return out
def combine(self):
out = Matrix(self.rows,self.cols,map(lambda i: i.combine(),self.mat))
return out
def subs(self, *args):
out = Matrix(self.rows,self.cols,map(lambda i: i.subs(*args),self.mat))
return out
def __sub__(self,a):
return self + (-a)
def __mul__(self,a):
if hasattr(a, "__array__") and a.shape != ():
return matrix_multiply(self,a)
out = Matrix(self.rows,self.cols,map(lambda i: i*a,self.mat))
return out
def __pow__(self, num):
if not self.is_square:
raise NonSquareMatrixException()
if isinstance(num, int) or isinstance(num, Integer):
n = int(num)
if n < 0:
return self.inv() ** -n # A**-2 = (A**-1)**2
a = eye(self.cols)
while n:
if n % 2:
a = a * self
n -= 1
self = self * self
n = n // 2
return a
raise NotImplementedError('Can only raise to the power of an integer for now')
def __add__(self,a):
return matrix_add(self,a)
def __radd__(self,a):
return matrix_add(a,self)
def __div__(self,a):
return self * (S.One/a)
def __truediv__(self,a):
return self.__div__(a)
def multiply(self,b):
"""Returns self*b """
return matrix_multiply(self,b)
def add(self,b):
"""Return self+b """
return matrix_add(self,b)
def __neg__(self):
return -1*self
def __eq__(self, a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() == a.hash()
else:
return False
def __ne__(self,a):
if not isinstance(a, (Matrix, Basic)):
a = sympify(a)
if isinstance(a, Matrix):
return self.hash() != a.hash()
else:
return True
def _format_str(self, strfunc, rowsep='\n'):
# Build table of string representations of the elements
res = []
# Track per-column max lengths for pretty alignment
maxlen = [0] * self.cols
for i in range(self.rows):
res.append([])
for j in range(self.cols):
string = strfunc(self[i,j])
res[-1].append(string)
maxlen[j] = max(len(string), maxlen[j])
# Patch strings together
for i, row in enumerate(res):
for j, elem in enumerate(row):
# Pad each element up to maxlen so the columns line up
row[j] = elem.rjust(maxlen[j])
res[i] = "[" + ", ".join(row) + "]"
return rowsep.join(res)
def __str__(self):
return StrPrinter.doprint(self)
def __repr__(self):
return StrPrinter.doprint(self)
def inv(self, method="GE", iszerofunc=_iszero, try_block_diag=False):
"""
Calculates the matrix inverse.
According to the "method" parameter, it calls the appropriate method:
GE .... inverse_GE()
LU .... inverse_LU()
ADJ ... inverse_ADJ()
According to the "try_block_diag" parameter, it will try to form block
diagonal matrices using the method get_diag_blocks(), invert these
individually, and then reconstruct the full inverse matrix.
Note, the GE and LU methods may require the matrix to be simplified
before it is inverted in order to properly detect zeros during
pivoting. In difficult cases a custom zero detection function can
be provided by setting the iszerosfunc argument to a function that
should return True if its argument is zero.
"""
assert self.cols==self.rows
if try_block_diag:
blocks = self.get_diag_blocks()
r = []
for block in blocks:
r.append(block.inv(method=method, iszerofunc=iszerofunc))
return block_diag(r)
if method == "GE":
return self.inverse_GE(iszerofunc=iszerofunc)
elif method == "LU":
return self.inverse_LU(iszerofunc=iszerofunc)
elif method == "ADJ":
return self.inverse_ADJ()
else:
raise ValueError("Inversion method unrecognized")
def __mathml__(self):
mml = ""
for i in range(self.rows):
mml += "<matrixrow>"
for j in range(self.cols):
mml += self[i,j].__mathml__()
mml += "</matrixrow>"
return "<matrix>" + mml + "</matrix>"
def row(self, i, f):
"""Elementary row operation using functor"""
for j in range(0, self.cols):
self[i, j] = f(self[i, j], j)
def col(self, j, f):
"""Elementary column operation using functor"""
for i in range(0, self.rows):
self[i, j] = f(self[i, j], i)
def row_swap(self, i, j):
for k in range(0, self.cols):
self[i, k], self[j, k] = self[j, k], self[i, k]
def col_swap(self, i, j):
for k in range(0, self.rows):
self[k, i], self[k, j] = self[k, j], self[k, i]
def row_del(self, i):
self.mat = self.mat[:i*self.cols] + self.mat[(i+1)*self.cols:]
self.rows -= 1
def col_del(self, i):
"""
>>> import sympy
>>> M = sympy.matrices.eye(3)
>>> M.col_del(1)
>>> M #doctest: +NORMALIZE_WHITESPACE
[1, 0]
[0, 0]
[0, 1]
"""
for j in range(self.rows-1, -1, -1):
del self.mat[i+j*self.cols]
self.cols -= 1
def row_join(self, rhs):
"""
Concatenates two matrices along self's last and rhs's first column
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(3,1,lambda i,j: 3+i+j)
>>> M.row_join(V)
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
"""
assert self.rows == rhs.rows
newmat = self.zeros((self.rows, self.cols + rhs.cols))
newmat[:,:self.cols] = self[:,:]
newmat[:,self.cols:] = rhs
return newmat
def col_join(self, bott):
"""
Concatenates two matrices along self's last and bott's first row
>>> from sympy import Matrix
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> V = Matrix(1,3,lambda i,j: 3+i+j)
>>> M.col_join(V)
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
[3, 4, 5]
"""
assert self.cols == bott.cols
newmat = self.zeros((self.rows+bott.rows, self.cols))
newmat[:self.rows,:] = self[:,:]
newmat[self.rows:,:] = bott
return newmat
def row_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((1, 3))
>>> V
[0, 0, 0]
>>> M.row_insert(1,V)
[0, 1, 2]
[0, 0, 0]
[1, 2, 3]
[2, 3, 4]
"""
if pos is 0:
return mti.col_join(self)
assert self.cols == mti.cols
newmat = self.zeros((self.rows + mti.rows, self.cols))
newmat[:pos,:] = self[:pos,:]
newmat[pos:pos+mti.rows,:] = mti[:,:]
newmat[pos+mti.rows:,:] = self[pos:,:]
return newmat
def col_insert(self, pos, mti):
"""
>>> from sympy import Matrix, zeros
>>> M = Matrix(3,3,lambda i,j: i+j)
>>> M
[0, 1, 2]
[1, 2, 3]
[2, 3, 4]
>>> V = zeros((3, 1))
>>> V
[0]
[0]
[0]
>>> M.col_insert(1,V)
[0, 0, 1, 2]
[1, 0, 2, 3]
[2, 0, 3, 4]
"""
if pos is 0:
return mti.row_join(self)
assert self.rows == mti.rows
newmat = self.zeros((self.rows, self.cols + mti.cols))
newmat[:,:pos] = self[:,:pos]
newmat[:,pos:pos+mti.cols] = mti[:,:]
newmat[:,pos+mti.cols:] = self[:,pos:]
return newmat
def trace(self):
assert self.cols == self.rows
trace = 0
for i in range(self.cols):
trace += self[i,i]
return trace
def submatrix(self, keys):
"""
>>> from sympy import Matrix
>>> m = Matrix(4,4,lambda i,j: i+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3]
[1, 2, 3, 4]
[2, 3, 4, 5]
[3, 4, 5, 6]
>>> m[0:1, 1] #doctest: +NORMALIZE_WHITESPACE
[1]
>>> m[0:2, 0:1] #doctest: +NORMALIZE_WHITESPACE
[0]
[1]
>>> m[2:4, 2:4] #doctest: +NORMALIZE_WHITESPACE
[4, 5]
[5, 6]
"""
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
outLines, outCols = rhi-rlo, chi-clo
outMat = [0]*outLines*outCols
for i in xrange(outLines):
outMat[i*outCols:(i+1)*outCols] = self.mat[(i+rlo)*self.cols+clo:(i+rlo)*self.cols+chi]
return Matrix(outLines,outCols,outMat)
def slice2bounds(self, key, defmax):
"""
Takes slice or number and returns (min,max) for iteration
Takes a default maxval to deal with the slice ':' which is (none, none)
"""
if isinstance(key, slice):
lo, hi = 0, defmax
if key.start != None:
if key.start >= 0:
lo = key.start
else:
lo = defmax+key.start
if key.stop != None:
if key.stop >= 0:
hi = key.stop
else:
hi = defmax+key.stop
return lo, hi
elif isinstance(key, int):
if key >= 0:
return key, key+1
else:
return defmax+key, defmax+key+1
else:
raise IndexError("Improper index type")
def applyfunc(self, f):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,2,lambda i,j: i*2+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1]
[2, 3]
>>> m.applyfunc(lambda i: 2*i) #doctest: +NORMALIZE_WHITESPACE
[0, 2]
[4, 6]
"""
assert callable(f)
out = Matrix(self.rows,self.cols,map(f,self.mat))
return out
def evalf(self, prec=None, **options):
if prec is None:
return self.applyfunc(lambda i: i.evalf(**options))
else:
return self.applyfunc(lambda i: i.evalf(prec, **options))
def reshape(self, _rows, _cols):
"""
>>> from sympy import Matrix
>>> m = Matrix(2,3,lambda i,j: 1)
>>> m #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1]
[1, 1, 1]
>>> m.reshape(1,6) #doctest: +NORMALIZE_WHITESPACE
[1, 1, 1, 1, 1, 1]
>>> m.reshape(3,2) #doctest: +NORMALIZE_WHITESPACE
[1, 1]
[1, 1]
[1, 1]
"""
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
return Matrix(_rows, _cols, lambda i,j: self.mat[i*_cols + j])
def print_nonzero (self, symb="X"):
"""
Shows location of non-zero entries for fast shape lookup
>>> from sympy import Matrix, matrices
>>> m = Matrix(2,3,lambda i,j: i*3+j)
>>> m #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2]
[3, 4, 5]
>>> m.print_nonzero() #doctest: +NORMALIZE_WHITESPACE
[ XX]
[XXX]
>>> m = matrices.eye(4)
>>> m.print_nonzero("x") #doctest: +NORMALIZE_WHITESPACE
[x ]
[ x ]
[ x ]
[ x]
"""
s="";
for i in range(self.rows):
s+="["
for j in range(self.cols):
if self[i,j] == 0:
s+=" "
else:
s+= symb+""
s+="]\n"
print s
def LUsolve(self, rhs, iszerofunc=_iszero):
"""
Solve the linear system Ax = b for x.
self is the coefficient matrix A and rhs is the right side b.
This is for symbolic matrices, for real or complex ones use
sympy.mpmath.lu_solve or sympy.mpmath.qr_solve.
"""
assert rhs.rows == self.rows
A, perm = self.LUdecomposition_Simple(iszerofunc=_iszero)
n = self.rows
b = rhs.permuteFwd(perm)
# forward substitution, all diag entries are scaled to 1
for i in range(n):
for j in range(i):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
# backward substitution
for i in range(n-1,-1,-1):
for j in range(i+1, n):
b.row(i, lambda x,k: x - b[j,k]*A[i,j])
b.row(i, lambda x,k: x / A[i,i])
return b
def LUdecomposition(self, iszerofunc=_iszero):
"""
Returns the decomposition LU and the row swaps p.
"""
combined, p = self.LUdecomposition_Simple(iszerofunc=_iszero)
L = self.zeros(self.rows)
U = self.zeros(self.rows)
for i in range(self.rows):
for j in range(self.rows):
if i > j:
L[i,j] = combined[i,j]
else:
if i == j:
L[i,i] = 1
U[i,j] = combined[i,j]
return L, U, p
def LUdecomposition_Simple(self, iszerofunc=_iszero):
"""
Returns A comprised of L,U (L's diag entries are 1) and
p which is the list of the row swaps (in order).
"""
assert self.rows == self.cols
n = self.rows
A = self[:,:]
p = []
# factorization
for j in range(n):
for i in range(j):
for k in range(i):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
pivot = -1
for i in range(j,n):
for k in range(j):
A[i,j] = A[i,j] - A[i,k]*A[k,j]
# find the first non-zero pivot, includes any expression
if pivot == -1 and not iszerofunc(A[i,j]):
pivot = i
if pivot < 0:
raise ValueError("Error: non-invertible matrix passed to LUdecomposition_Simple()")
if pivot != j: # row must be swapped
A.row_swap(pivot,j)
p.append([pivot,j])
assert not iszerofunc(A[j,j])
scale = 1 / A[j,j]
for i in range(j+1,n):
A[i,j] = A[i,j] * scale
return A, p
def LUdecompositionFF(self):
"""
Returns 4 matrices P, L, D, U such that PA = L D**-1 U.
From the paper "fraction-free matrix factors..." by Zhou and Jeffrey
"""
n, m = self.rows, self.cols
U, L, P = self[:,:], eye(n), eye(n)
DD = zeros(n) # store it smarter since it's just diagonal
oldpivot = 1
for k in range(n-1):
if U[k,k] == 0:
kpivot = k+1
Notfound = True
while kpivot < n and Notfound:
if U[kpivot, k] != 0:
Notfound = False
else:
kpivot = kpivot + 1
if kpivot == n+1:
raise ValueError("Matrix is not full rank")
else:
swap = U[k, k:]
U[k,k:] = U[kpivot,k:]
U[kpivot, k:] = swap
swap = P[k, k:]
P[k, k:] = P[kpivot, k:]
P[kpivot, k:] = swap
assert U[k, k] != 0
L[k,k] = U[k,k]
DD[k,k] = oldpivot * U[k,k]
assert DD[k,k] != 0
Ukk = U[k,k]
for i in range(k+1, n):
L[i,k] = U[i,k]
Uik = U[i,k]
for j in range(k+1, m):
U[i,j] = (Ukk * U[i,j] - U[k,j]*Uik) / oldpivot
U[i,k] = 0
oldpivot = U[k,k]
DD[n-1,n-1] = oldpivot
return P, L, DD, U
def cofactorMatrix(self, method="berkowitz"):
out = Matrix(self.rows, self.cols, lambda i,j:
self.cofactor(i, j, method))
return out
def minorEntry(self, i, j, method="berkowitz"):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.minorMatrix(i,j).det(method)
def minorMatrix(self, i, j):
assert 0 <= i < self.rows and 0 <= j < self.cols
return self.delRowCol(i,j)
def cofactor(self, i, j, method="berkowitz"):
if (i+j) % 2 == 0:
return self.minorEntry(i, j, method)
else:
return -1 * self.minorEntry(i, j, method)
def jacobian(self, X):
"""
Calculates the Jacobian matrix (derivative of a vectorial function).
*self*
A vector of expressions representing functions f_i(x_1, ..., x_n).
*X*
The set of x_i's in order, it can be a list or a Matrix
Both self and X can be a row or a column matrix in any order
(jacobian() should always work).
Examples::
>>> from sympy import sin, cos, Matrix
>>> from sympy.abc import rho, phi
>>> X = Matrix([rho*cos(phi), rho*sin(phi), rho**2])
>>> Y = Matrix([rho, phi])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
[ 2*rho, 0]
>>> X = Matrix([rho*cos(phi), rho*sin(phi)])
>>> X.jacobian(Y)
[cos(phi), -rho*sin(phi)]
[sin(phi), rho*cos(phi)]
"""
if not isinstance(X, Matrix):
X = Matrix(X)
# Both X and self can be a row or a column matrix, so we need to make
# sure all valid combinations work, but everything else fails:
assert len(self.shape) == 2
assert len(X.shape) == 2
if self.shape[0] == 1:
m = self.shape[1]
elif self.shape[1] == 1:
m = self.shape[0]
else:
raise TypeError("self must be a row or a column matrix")
if X.shape[0] == 1:
n = X.shape[1]
elif X.shape[1] == 1:
n = X.shape[0]
else:
raise TypeError("X must be a row or a column matrix")
# m is the number of functions and n is the number of variables
# computing the Jacobian is now easy:
return Matrix(m, n, lambda j, i: self[j].diff(X[i]))
def QRdecomposition(self):
"""
Return Q,R where A = Q*R, Q is orthogonal and R is upper triangular.
Assumes full-rank square (for now).
"""
assert self.rows == self.cols
n = self.rows
Q, R = self.zeros(n), self.zeros(n)
for j in range(n): # for each column vector
tmp = self[:,j] # take original v
for i in range(j):
# subtract the project of self on new vector
tmp -= Q[:,i] * self[:,j].dot(Q[:,i])
tmp.expand()
# normalize it
R[j,j] = tmp.norm()
Q[:,j] = tmp / R[j,j]
assert Q[:,j].norm() == 1
for i in range(j):
R[i,j] = Q[:,i].dot(self[:,j])
return Q,R
def QRsolve(self, b):
"""
Solve the linear system 'Ax = b'.
'self' is the matrix 'A', the method argument is the vector
'b'. The method returns the solution vector 'x'. If 'b' is a
matrix, the system is solved for each column of 'b' and the
return value is a matrix of the same shape as 'b'.
This method is slower (approximately by a factor of 2) but
more stable for floating-point arithmetic than the LUsolve method.
However, LUsolve usually uses an exact arithmetic, so you don't need
to use QRsolve.
This is mainly for educational purposes and symbolic matrices, for real
(or complex) matrices use sympy.mpmath.qr_solve.
"""
Q, R = self.QRdecomposition()
y = Q.T * b
# back substitution to solve R*x = y:
# We build up the result "backwards" in the vector 'x' and reverse it
# only in the end.
x = []
n = R.rows
for j in range(n-1, -1, -1):
tmp = y[j,:]
for k in range(j+1, n):
tmp -= R[j,k] * x[n-1-k]
x.append(tmp/R[j,j])
return Matrix([row.mat for row in reversed(x)])
# Utility functions
def simplify(self):
"""Simplify the elements of a matrix in place."""
for i in xrange(len(self.mat)):
self.mat[i] = simplify(self.mat[i])
#def evaluate(self): # no more eval() so should be removed
# for i in range(self.rows):
# for j in range(self.cols):
# self[i,j] = self[i,j].eval()
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return Matrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def dot(self, b):
assert isinstance(b, (list, tuple, Matrix))
if isinstance(b, (list, tuple)):
m = len(b)
else:
m = b.rows * b.cols
assert self.cols*self.rows == m
prod = 0
for i in range(m):
prod += self[i] * b[i]
return prod
def norm(self):
assert self.rows == 1 or self.cols == 1
out = sympify(0)
for i in range(self.rows * self.cols):
out += self[i]*self[i]
return out**S.Half
def normalized(self):
assert self.rows == 1 or self.cols == 1
norm = self.norm()
out = self.applyfunc(lambda i: i / norm)
return out
def project(self, v):
"""Project onto v."""
return v * (self.dot(v) / v.dot(v))
def permuteBkwd(self, perm):
copy = self[:,:]
for i in range(len(perm)-1, -1, -1):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def permuteFwd(self, perm):
copy = self[:,:]
for i in range(len(perm)):
copy.row_swap(perm[i][0], perm[i][1])
return copy
def delRowCol(self, i, j):
# used only for cofactors, makes a copy
M = self[:,:]
M.row_del(i)
M.col_del(j)
return M
def zeronm(self, n, m):
# used so that certain functions above can use this
# then only this func need be overloaded in subclasses
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,m,[S.Zero]*n*m)
def zero(self, n):
"""Returns a n x n matrix of zeros."""
warnings.warn( 'Deprecated: use zeros() instead.' )
return Matrix(n,n,[S.Zero]*n*n)
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return Matrix(n,m,[S.Zero]*n*m)
def eye(self, n):
"""Returns the identity matrix of size n."""
tmp = self.zeros(n)
for i in range(tmp.rows):
tmp[i,i] = S.One
return tmp
@property
def is_square(self):
return self.rows == self.cols
def is_upper(self):
for i in range(self.cols):
for j in range(self.rows):
if i > j and self[i,j] != 0:
return False
return True
def is_lower(self):
for i in range(self.cols):
for j in range(self.rows):
if i < j and self[i, j] != 0:
return False
return True
def is_symbolic(self):
for i in range(self.cols):
for j in range(self.rows):
if self[i,j].atoms(Symbol):
return True
return False
def clone(self):
return Matrix(self.rows, self.cols, lambda i, j: self[i, j])
def det(self, method="bareis"):
"""
Computes the matrix determinant using the method "method".
Possible values for "method":
bareis ... det_bareis
berkowitz ... berkowitz_det
"""
if method == "bareis":
return self.det_bareis()
elif method == "berkowitz":
return self.berkowitz_det()
else:
raise ValueError("Determinant method unrecognized")
def det_bareis(self):
"""Compute matrix determinant using Bareis' fraction-free
algorithm which is an extension of the well known Gaussian
elimination method. This approach is best suited for dense
symbolic matrices and will result in a determinant with
minimal number of fractions. It means that less term
rewriting is needed on resulting formulae.
TODO: Implement algorithm for sparse matrices (SFF).
"""
if not self.is_square:
raise NonSquareMatrixException()
M, n = self[:,:], self.rows
if n == 1:
det = M[0, 0]
elif n == 2:
det = M[0, 0]*M[1, 1] - M[0, 1]*M[1, 0]
else:
sign = 1 # track current sign in case of column swap
for k in range(n-1):
# look for a pivot in the current column
# and assume det == 0 if none is found
if M[k, k] == 0:
for i in range(k+1, n):
if M[i, k] != 0:
M.row_swap(i, k)
sign *= -1
break
else:
return S.Zero
# proceed with Bareis' fraction-free (FF)
# form of Gaussian elimination algorithm
for i in range(k+1, n):
for j in range(k+1, n):
D = M[k, k]*M[i, j] - M[i, k]*M[k, j]
if k > 0:
D /= M[k-1, k-1]
if D.is_Atom:
M[i, j] = D
else:
M[i, j] = cancel(D)
det = sign * M[n-1, n-1]
return det.expand()
def adjugate(self, method="berkowitz"):
"""
Returns the adjugate matrix.
Adjugate matrix is the transpose of the cofactor matrix.
http://en.wikipedia.org/wiki/Adjugate
See also: .cofactorMatrix(), .T
"""
return self.cofactorMatrix(method).T
def inverse_LU(self, iszerofunc=_iszero):
"""
Calculates the inverse using LU decomposition.
"""
return self.LUsolve(self.eye(self.rows), iszerofunc=_iszero)
def inverse_GE(self, iszerofunc=_iszero):
"""
Calculates the inverse using Gaussian elimination.
"""
assert self.rows == self.cols
assert self.det() != 0
big = self.row_join(self.eye(self.rows))
red = big.rref(iszerofunc=iszerofunc)
return red[0][:,big.rows:]
def inverse_ADJ(self):
"""
Calculates the inverse using the adjugate matrix and a determinant.
"""
assert self.rows == self.cols
d = self.berkowitz_det()
assert d != 0
return self.adjugate()/d
def rref(self,simplified=False, iszerofunc=_iszero):
"""
Take any matrix and return reduced row-echelon form and indices of pivot vars
To simplify elements before finding nonzero pivots set simplified=True
"""
# TODO: rewrite inverse_GE to use this
pivots, r = 0, self[:,:] # pivot: index of next row to contain a pivot
pivotlist = [] # indices of pivot variables (non-free)
for i in range(r.cols):
if pivots == r.rows:
break
if simplified:
r[pivots,i] = simplify(r[pivots,i])
if iszerofunc(r[pivots,i]):
for k in range(pivots, r.rows):
if simplified and k>pivots:
r[k,i] = simplify(r[k,i])
if not iszerofunc(r[k,i]):
break
if k == r.rows - 1 and iszerofunc(r[k,i]):
continue
r.row_swap(pivots,k)
scale = r[pivots,i]
r.row(pivots, lambda x, _: x/scale)
for j in range(r.rows):
if j == pivots:
continue
scale = r[j,i]
r.row(j, lambda x, k: x - r[pivots,k]*scale)
pivotlist.append(i)
pivots += 1
return r, pivotlist
def nullspace(self,simplified=False):
"""
Returns list of vectors (Matrix objects) that span nullspace of self
"""
reduced, pivots = self.rref(simplified)
basis = []
# create a set of vectors for the basis
for i in range(self.cols - len(pivots)):
basis.append(zeros((self.cols, 1)))
# contains the variable index to which the vector corresponds
basiskey, cur = [-1]*len(basis), 0
for i in range(self.cols):
if i not in pivots:
basiskey[cur] = i
cur += 1
for i in range(self.cols):
if i not in pivots: # free var, just set vector's ith place to 1
basis[basiskey.index(i)][i,0] = 1
else: # add negative of nonpivot entry to corr vector
for j in range(i+1, self.cols):
line = pivots.index(i)
if reduced[line, j] != 0:
assert j not in pivots
basis[basiskey.index(j)][i,0] = -1 * reduced[line, j]
return basis
def berkowitz(self):
"""The Berkowitz algorithm.
Given N x N matrix with symbolic content, compute efficiently
coefficients of characteristic polynomials of 'self' and all
its square sub-matrices composed by removing both i-th row
and column, without division in the ground domain.
This method is particularly useful for computing determinant,
principal minors and characteristic polynomial, when 'self'
has complicated coefficients e.g. polynomials. Semi-direct
usage of this algorithm is also important in computing
efficiently sub-resultant PRS.
Assuming that M is a square matrix of dimension N x N and
I is N x N identity matrix, then the following following
definition of characteristic polynomial is begin used:
charpoly(M) = det(t*I - M)
As a consequence, all polynomials generated by Berkowitz
algorithm are monic.
>>> from sympy import Matrix
>>> from sympy.abc import x, y, z
>>> M = Matrix([ [x,y,z], [1,0,0], [y,z,x] ])
>>> p, q, r = M.berkowitz()
>>> print p # 1 x 1 M's sub-matrix
(1, -x)
>>> print q # 2 x 2 M's sub-matrix
(1, -x, -y)
>>> print r # 3 x 3 M's sub-matrix
(1, -2*x, -y - y*z + x**2, x*y - z**2)
For more information on the implemented algorithm refer to:
[1] S.J. Berkowitz, On computing the determinant in small
parallel time using a small number of processors, ACM,
Information Processing Letters 18, 1984, pp. 147-150
[2] M. Keber, Division-Free computation of sub-resultants
using Bezout matrices, Tech. Report MPI-I-2006-1-006,
Saarbrucken, 2006
"""
if not self.is_square:
raise NonSquareMatrixException()
A, N = self, self.rows
transforms = [0] * (N-1)
for n in xrange(N, 1, -1):
T, k = zeros((n+1,n)), n - 1
R, C = -A[k,:k], A[:k,k]
A, a = A[:k,:k], -A[k,k]
items = [ C ]
for i in xrange(0, n-2):
items.append(A * items[i])
for i, B in enumerate(items):
items[i] = (R * B)[0,0]
items = [ S.One, a ] + items
for i in xrange(n):
T[i:,i] = items[:n-i+1]
transforms[k-1] = T
polys = [ Matrix([S.One, -A[0,0]]) ]
for i, T in enumerate(transforms):
polys.append(T * polys[i])
return tuple(map(tuple, polys))
def berkowitz_det(self):
"""Computes determinant using Berkowitz method."""
poly = self.berkowitz()[-1]
sign = (-1)**(len(poly)-1)
return sign * poly[-1]
def berkowitz_minors(self):
"""Computes principal minors using Berkowitz method."""
sign, minors = S.NegativeOne, []
for poly in self.berkowitz():
minors.append(sign*poly[-1])
sign = -sign
return tuple(minors)
def berkowitz_charpoly(self, x):
"""Computes characteristic polynomial minors using Berkowitz method."""
coeffs, monoms = self.berkowitz()[-1], range(self.rows+1)
return Poly(dict(zip(reversed(monoms), coeffs)), x)
charpoly = berkowitz_charpoly
def berkowitz_eigenvals(self, **flags):
"""Computes eigenvalues of a Matrix using Berkowitz method. """
return roots(self.berkowitz_charpoly(Symbol('x', dummy=True)), **flags)
eigenvals = berkowitz_eigenvals
def eigenvects(self, **flags):
"""Return list of triples (eigenval, multiplicity, basis)."""
if 'multiple' in flags:
del flags['multiple']
out, vlist = [], self.eigenvals(**flags)
for r, k in vlist.iteritems():
tmp = self - eye(self.rows)*r
basis = tmp.nullspace()
# whether tmp.is_symbolic() is True or False, it is possible that
# the basis will come back as [] in which case simplification is
# necessary.
if not basis:
# The nullspace routine failed, try it again with simplification
basis = tmp.nullspace(simplified=True)
out.append((r, k, basis))
return out
def fill(self, value):
"""Fill the matrix with the scalar value."""
self.mat = [value] * self.rows * self.cols
def __getattr__(self, attr):
if attr in ('diff','integrate','limit'):
def doit(*args):
item_doit = lambda item: getattr(item, attr)(*args)
return self.applyfunc( item_doit )
return doit
else:
raise AttributeError()
def vec(self):
"""
Return the Matrix converted into a one column matrix by stacking columns
>>> from sympy import Matrix
>>> m=Matrix([ [1,3], [2,4] ])
>>> m
[1, 3]
[2, 4]
>>> m.vec()
[1]
[2]
[3]
[4]
"""
return Matrix(self.cols*self.rows, 1, self.transpose().mat)
def vech(self, diagonal=True, check_symmetry=True):
"""
Return the unique elements of a symmetric Matrix as a one column matrix
by stacking
the elements in the lower triangle
Arguments:
diagonal -- include the diagonal cells of self or not
check_symmetry -- checks symmetry of self but not completely reliably
>>> from sympy import Matrix
>>> m=Matrix([ [1,2], [2,3] ])
>>> m
[1, 2]
[2, 3]
>>> m.vech()
[1]
[2]
[3]
>>> m.vech(diagonal=False)
[2]
"""
c = self.cols
if c != self.rows:
raise TypeError("Matrix must be square")
if check_symmetry:
self.simplify()
if self != self.transpose():
raise ValueError("Matrix appears to be asymmetric; consider check_symmetry=False")
count = 0
if diagonal:
v = zeros( (c * (c + 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j,c):
v[count] = self[i,j]
count += 1
else:
v = zeros( (c * (c - 1) // 2, 1) )
for j in xrange(c):
for i in xrange(j+1,c):
v[count] = self[i,j]
count += 1
return v
def get_diag_blocks(self):
"""Obtains the square sub-matrices on the main diagonal of a square matrix.
Useful for inverting symbolic matrices or solving systems of
linear equations which may be decoupled by having a block diagonal
structure.
Example:
>>> from sympy import Matrix, symbols
>>> from sympy.abc import x, y, z
>>> A = Matrix([[1, 3, 0, 0], [y, z*z, 0, 0], [0, 0, x, 0], [0, 0, 0, 0]])
>>> a1, a2, a3 = A.get_diag_blocks()
>>> a1
[1, 3]
[y, z**2]
>>> a2
[x]
>>> a3
[0]
>>>
"""
sub_blocks = []
def recurse_sub_blocks(M):
i = 1
while i <= M.shape[0]:
if i == 1:
to_the_right = M[0, i:]
to_the_bottom = M[i:, 0]
else:
to_the_right = M[0:i, i:]
to_the_bottom = M[i:, 0:i]
if any(to_the_right) or any(to_the_bottom):
i += 1
continue
else:
sub_blocks.append(M[0:i, 0:i])
if M.shape == M[0:i, 0:i].shape:
return
else:
recurse_sub_blocks(M[i:, i:])
return
recurse_sub_blocks(self)
return sub_blocks
def matrix_multiply(A, B):
"""
Matrix product A*B.
A and B must be of appropriate dimensions. If A is a m x k matrix, and B
is a k x n matrix, the product will be an m x n matrix.
Example:
>>> from sympy import Matrix
>>> A = Matrix([[1, 2, 3], [4, 5, 6]])
>>> B = Matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
>>> A*B
[30, 36, 42]
[66, 81, 96]
>>> B*A
Traceback (most recent call last):
...
ShapeError
>>>
"""
# The following implmentation is equivalent, but about 5% slower
#ma, na = A.shape
#mb, nb = B.shape
#
#if na != mb:
# raise ShapeError()
#product = Matrix(ma, nb, lambda i,j: 0)
#for i in xrange(ma):
# for j in xrange(nb):
# s = 0
# for k in range(na):
# s += A[i, k]*B[k, j]
# product[i, j] = s
#return product
if A.shape[1] != B.shape[0]:
raise ShapeError()
blst = B.T.tolist()
alst = A.tolist()
return Matrix(A.shape[0], B.shape[1], lambda i, j:
reduce(lambda k, l: k+l,
map(lambda n, m: n*m,
alst[i],
blst[j])))
def matrix_add(A,B):
"""Return A+B"""
if A.shape != B.shape:
raise ShapeError()
alst = A.tolist()
blst = B.tolist()
ret = [0]*A.shape[0]
for i in xrange(A.shape[0]):
ret[i] = map(lambda j,k: j+k, alst[i], blst[i])
return Matrix(ret)
def zero(n):
"""Create square zero matrix n x n"""
warnings.warn( 'Deprecated: use zeros() instead.' )
return zeronm(n,n)
def zeronm(n,m):
"""Create zero matrix n x m"""
warnings.warn( 'Deprecated: use zeros() instead.' )
assert n>0
assert m>0
return Matrix(n,m,[S.Zero]*m*n)
def zeros(dims):
"""Create zero matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm(dims)
return Matrix(n, m, [S.Zero]*m*n)
def one(n):
"""Create square all-one matrix n x n"""
warnings.warn( 'Deprecated: use ones() instead.' )
return Matrix(n,n,[S.One]*n*n)
def ones(dims):
"""Create all-one matrix of dimensions dims = (d1,d2)"""
n, m = _dims_to_nm( dims )
return Matrix(n, m, [S.One]*m*n)
def eye(n):
"""Create square identity matrix n x n"""
n = int(n)
out = zeros(n)
for i in range(n):
out[i, i] = S.One
return out
def randMatrix(r,c,min=0,max=99,seed=[]):
"""Create random matrix r x c"""
if seed == []:
prng = random.Random() # use system time
else:
prng = random.Random(seed)
return Matrix(r,c,lambda i,j: prng.randint(min,max))
def hessian(f, varlist):
"""Compute Hessian matrix for a function f
see: http://en.wikipedia.org/wiki/Hessian_matrix
"""
# f is the expression representing a function f, return regular matrix
if isinstance(varlist, (list, tuple)):
m = len(varlist)
elif isinstance(varlist, Matrix):
m = varlist.cols
assert varlist.rows == 1
else:
raise ValueError("Improper variable list in hessian function")
assert m > 0
try:
f.diff(varlist[0]) # check differentiability
except AttributeError:
raise ValueError("Function %d is not differentiable" % i)
out = zeros(m)
for i in range(m):
for j in range(i,m):
out[i,j] = f.diff(varlist[i]).diff(varlist[j])
for i in range(m):
for j in range(i):
out[i,j] = out[j,i]
return out
def GramSchmidt(vlist, orthog=False):
out = []
m = len(vlist)
for i in range(m):
tmp = vlist[i]
for j in range(i):
tmp -= vlist[i].project(out[j])
if tmp == Matrix([[0,0,0]]):
raise ValueError("GramSchmidt: vector set not linearly independent")
out.append(tmp)
if orthog:
for i in range(len(out)):
out[i] = out[i].normalized()
return out
def wronskian(functions, var, method='bareis'):
"""Compute Wronskian for [] of functions
| f1 f2 ... fn |
| f1' f2' ... fn' |
| . . . . |
W(f1,...,fn) = | . . . . |
| . . . . |
| n n n |
| D(f1) D(f2) ... D(fn)|
see: http://en.wikipedia.org/wiki/Wronskian
"""
for index in xrange(0, len(functions)):
functions[index] = sympify(functions[index])
n = len(functions)
if n == 0:
return 1
W = Matrix(n, n, lambda i,j: functions[i].diff(var, j) )
return W.det(method)
def casoratian(seqs, n, zero=True):
"""Given linear difference operator L of order 'k' and homogeneous
equation Ly = 0 we want to compute kernel of L, which is a set
of 'k' sequences: a(n), b(n), ... z(n).
Solutions of L are linearly independent iff their Casoratian,
denoted as C(a, b, ..., z), do not vanish for n = 0.
Casoratian is defined by k x k determinant:
+ a(n) b(n) . . . z(n) +
| a(n+1) b(n+1) . . . z(n+1) |
| . . . . |
| . . . . |
| . . . . |
+ a(n+k-1) b(n+k-1) . . . z(n+k-1) +
It proves very useful in rsolve_hyper() where it is applied
to a generating set of a recurrence to factor out linearly
dependent solutions and return a basis.
>>> from sympy import Symbol, casoratian, factorial
>>> n = Symbol('n', integer=True)
Exponential and factorial are linearly independent:
>>> casoratian([2**n, factorial(n)], n) != 0
True
"""
seqs = map(sympify, seqs)
if not zero:
f = lambda i, j: seqs[j].subs(n, n+i)
else:
f = lambda i, j: seqs[j].subs(n, i)
k = len(seqs)
return Matrix(k, k, f).det()
def block_diag(matrices):
"""
Constructs a block diagonal matrix from a list of square matrices.
Example:
>>> from sympy import block_diag, symbols, Matrix
>>> from sympy.abc import a, b, c, x, y, z
>>> a = Matrix([[1, 2], [2, 3]])
>>> b = Matrix([[3, x], [y, 3]])
>>> block_diag([a, b, b])
[1, 2, 0, 0, 0, 0]
[2, 3, 0, 0, 0, 0]
[0, 0, 3, x, 0, 0]
[0, 0, y, 3, 0, 0]
[0, 0, 0, 0, 3, x]
[0, 0, 0, 0, y, 3]
"""
rows = 0
for m in matrices:
assert m.rows == m.cols, "All matrices must be square."
rows += m.rows
A = zeros((rows, rows))
i = 0
for m in matrices:
A[i+0:i+m.rows, i+0:i+m.cols] = m
i += m.rows
return A
class SMatrix(Matrix):
"""Sparse matrix"""
def __init__(self, *args):
if len(args) == 3 and callable(args[2]):
op = args[2]
assert isinstance(args[0], int) and isinstance(args[1], int)
self.rows = args[0]
self.cols = args[1]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(op(i,j))
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], (list, tuple)):
self.rows = args[0]
self.cols = args[1]
mat = args[2]
self.mat = {}
for i in range(self.rows):
for j in range(self.cols):
value = sympify(mat[i*self.cols+j])
if value != 0:
self.mat[(i,j)] = value
elif len(args)==3 and isinstance(args[0],int) and \
isinstance(args[1],int) and isinstance(args[2], dict):
self.rows = args[0]
self.cols = args[1]
self.mat = {}
# manual copy, copy.deepcopy() doesn't work
for key in args[2].keys():
self.mat[key] = args[2][key]
else:
if len(args) == 1:
mat = args[0]
else:
mat = args
if not isinstance(mat[0], (list, tuple)):
mat = [ [element] for element in mat ]
self.rows = len(mat)
self.cols = len(mat[0])
self.mat = {}
for i in range(self.rows):
assert len(mat[i]) == self.cols
for j in range(self.cols):
value = sympify(mat[i][j])
if value != 0:
self.mat[(i,j)] = value
def __getitem__(self, key):
if isinstance(key, slice) or isinstance(key, int):
lo, hi = self.slice2bounds(key, self.rows*self.cols)
L = []
for i in range(lo, hi):
m,n = self.rowdecomp(i)
if self.mat.has_key((m,n)):
L.append(self.mat[(m,n)])
else:
L.append(0)
if len(L) == 1:
return L[0]
else:
return L
assert len(key) == 2
if isinstance(key[0], int) and isinstance(key[1], int):
i,j=self.key2ij(key)
if (i, j) in self.mat:
return self.mat[(i,j)]
else:
return 0
elif isinstance(key[0], slice) or isinstance(key[1], slice):
return self.submatrix(key)
else:
raise IndexError("Index out of range: a[%s]"%repr(key))
def rowdecomp(self, num):
assert (0 <= num < self.rows * self.cols) or \
(0 <= -1*num < self.rows * self.cols)
i, j = 0, num
while j >= self.cols:
j -= self.cols
i += 1
return i,j
def __setitem__(self, key, value):
# almost identical, need to test for 0
assert len(key) == 2
if isinstance(key[0], slice) or isinstance(key[1], slice):
if isinstance(value, Matrix):
self.copyin_matrix(key, value)
if isinstance(value, (list, tuple)):
self.copyin_list(key, value)
else:
i,j=self.key2ij(key)
testval = sympify(value)
if testval != 0:
self.mat[(i,j)] = testval
elif self.mat.has_key((i,j)):
del self.mat[(i,j)]
def row_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if i==k:
pass
elif i > k:
newD[i-1,j] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.rows -= 1
def col_del(self, k):
newD = {}
for (i,j) in self.mat.keys():
if j==k:
pass
elif j > k:
newD[i,j-1] = self.mat[i,j]
else:
newD[i,j] = self.mat[i,j]
self.mat = newD
self.cols -= 1
def toMatrix(self):
l = []
for i in range(self.rows):
c = []
l.append(c)
for j in range(self.cols):
if (i, j) in self.mat:
c.append(self[i, j])
else:
c.append(0)
return Matrix(l)
# from here to end all functions are same as in matrices.py
# with Matrix replaced with SMatrix
def copyin_list(self, key, value):
assert isinstance(value, (list, tuple))
self.copyin_matrix(key, SMatrix(value))
def multiply(self,b):
"""Returns self*b """
def dotprod(a,b,i,j):
assert a.cols == b.rows
r=0
for x in range(a.cols):
r+=a[i,x]*b[x,j]
return r
r = SMatrix(self.rows, b.cols, lambda i,j: dotprod(self,b,i,j))
if r.rows == 1 and r.cols ==1:
return r[0,0]
return r
def submatrix(self, keys):
assert isinstance(keys[0], slice) or isinstance(keys[1], slice)
rlo, rhi = self.slice2bounds(keys[0], self.rows)
clo, chi = self.slice2bounds(keys[1], self.cols)
if not ( 0<=rlo<=rhi and 0<=clo<=chi ):
raise IndexError("Slice indices out of range: a[%s]"%repr(keys))
return SMatrix(rhi-rlo, chi-clo, lambda i,j: self[i+rlo, j+clo])
def reshape(self, _rows, _cols):
if self.rows*self.cols != _rows*_cols:
print "Invalid reshape parameters %d %d" % (_rows, _cols)
newD = {}
for i in range(_rows):
for j in range(_cols):
m,n = self.rowdecomp(i*_cols + j)
if self.mat.has_key((m,n)):
newD[(i,j)] = self.mat[(m,n)]
return SMatrix(_rows, _cols, newD)
def cross(self, b):
assert isinstance(b, (list, tuple, Matrix))
if not (self.rows == 1 and self.cols == 3 or \
self.rows == 3 and self.cols == 1 ) and \
(b.rows == 1 and b.cols == 3 or \
b.rows == 3 and b.cols == 1):
raise ValueError("Dimensions incorrect for cross product")
else:
return SMatrix(1,3,((self[1]*b[2] - self[2]*b[1]),
(self[2]*b[0] - self[0]*b[2]),
(self[0]*b[1] - self[1]*b[0])))
def zeronm(self,n,m):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,m,{})
def zero(self, n):
warnings.warn( 'Deprecated: use zeros() instead.' )
return SMatrix(n,n,{})
def zeros(self, dims):
"""Returns a dims = (d1,d2) matrix of zeros."""
n, m = _dims_to_nm( dims )
return SMatrix(n,m,{})
def eye(self, n):
tmp = SMatrix(n,n,lambda i,j:0)
for i in range(tmp.rows):
tmp[i,i] = 1
return tmp
def list2numpy(l):
"""Converts python list of SymPy expressions to a NumPy array."""
from numpy import empty
a = empty(len(l), dtype=object)
for i, s in enumerate(l):
a[i] = s
return a
def matrix2numpy(m):
"""Converts SymPy's matrix to a NumPy array."""
from numpy import empty
a = empty(m.shape, dtype=object)
for i in range(m.rows):
for j in range(m.cols):
a[i, j] = m[i, j]
return a
def a2idx(a):
"""
Tries to convert "a" to an index, returns None on failure.
The result of a2idx() (if not None) can be safely used as an index to
arrays/matrices.
"""
if hasattr(a, "__int__"):
return int(a)
if hasattr(a, "__index__"):
return a.__index__()
def symarray(prefix, shape):
"""Create a numpy ndarray of symbols (as an object array).
The created symbols are named prefix_i1_i2_... You should thus provide a
non-empty prefix if you want your symbols to be unique for different output
arrays, as Sympy symbols with identical names are the same object.
Parameters
----------
prefix : string
A prefix prepended to the name of every symbol.
shape : int or tuple
Shape of the created array. If an int, the array is one-dimensional; for
more than one dimension the shape must be a tuple.
Examples
--------
>> from sympy import symarray
>> symarray('', 3)
[_0 _1 _2]
If you want multiple symarrays to contain distinct symbols, you *must*
provide unique prefixes:
>> a = symarray('', 3)
>> b = symarray('', 3)
>> a[0] is b[0]
True
>> a = symarray('a', 3)
>> b = symarray('b', 3)
>> a[0] is b[0]
False
Creating symarrays with a prefix:
>> symarray('a', 3)
[a_0 a_1 a_2]
For more than one dimension, the shape must be given as a tuple:
>> symarray('a', (2,3))
[[a_0_0 a_0_1 a_0_2]
[a_1_0 a_1_1 a_1_2]]
>> symarray('a', (2,3,2))
[[[a_0_0_0 a_0_0_1]
[a_0_1_0 a_0_1_1]
[a_0_2_0 a_0_2_1]]
<BLANKLINE>
[[a_1_0_0 a_1_0_1]
[a_1_1_0 a_1_1_1]
[a_1_2_0 a_1_2_1]]]
"""
try:
import numpy as np
except ImportError:
raise ImportError("symarray requires numpy to be installed")
arr = np.empty(shape, dtype=object)
for index in np.ndindex(shape):
arr[index] = Symbol('%s_%s' % (prefix, '_'.join(map(str, index))))
return arr
|
bsd-3-clause
| -3,508,994,818,285,606,000
| 30.698166
| 100
| 0.484604
| false
| 3.632847
| false
| false
| false
|
rgkirch/check-for-duplicates
|
walk-and-hash.py
|
1
|
2040
|
import os
import sys
import hashlib
# os.makedirs(dir) to make a dir
# hashfile source
# http://www.pythoncentral.io/finding-duplicate-files-with-python/
def hashfile(path, blocksize = 65536):
infile = open(path, 'rb')
hasher = hashlib.md5()
buf = infile.read(blocksize)
while len(buf) > 0:
hasher.update(buf)
buf = infile.read(blocksize)
infile.close()
return hasher.hexdigest()
def which_dir():
print "default dir is current dir (./)"
raw = raw_input("enter alternate dir:")
if raw:
if os.path.exists(str(raw)):
print "path exists"
return str(raw)
elif os.access(os.path.dirname(str(raw)), os.W_OK):
print "path does not exist but write privileges are given"
return str(raw)
else:
print "error, invalid path"
print "must have write privileges"
else:
print "using default dir (./)"
return "./"
if __name__ == '__main__':
startDir = which_dir()
all_hashes_once = {}
all_duplicates = {}
for dirName, dirList, fileList in os.walk(startDir):
print "checking", dirName
for filename in fileList:
# print filename
path = os.path.join(dirName, filename)
# file_hash = hashfile(dirName + "/" + filename)
file_hash = hashfile(path)
if file_hash in all_hashes_once:
print "->", filename
if file_hash in all_duplicates:
all_duplicates[file_hash].append(path)
else:
all_duplicates[file_hash] = [all_hashes_once[file_hash], path]
else:
all_hashes_once[file_hash] = path
# print all_hashes_once
print "done checking"
if all_duplicates:
print "duplicates found"
else:
print "no duplicates found"
print
for hash_value in all_duplicates:
for item in all_duplicates[hash_value]:
print item
print
print
|
gpl-2.0
| 629,394,333,001,483,300
| 28.142857
| 82
| 0.567647
| false
| 3.968872
| false
| false
| false
|
felipedau/blueberrywsn
|
blueberrywsn/pi.py
|
1
|
1992
|
from copy import deepcopy
from threading import Event, Lock, Thread
import bluetooth as bt
from constants import UUID
from receiver import Receiver
class Pi(Thread):
def __init__(self):
Thread.__init__(self)
self._devices = {}
self._lock_devices = Lock()
self.server_sock = bt.BluetoothSocket(bt.RFCOMM)
self.server_sock.bind(('', bt.PORT_ANY))
self.server_sock.listen(1)
self.done = None
port = self.server_sock.getsockname()[1]
bt.advertise_service(self.server_sock, 'SampleServer',
service_id=UUID,
service_classes=[UUID, bt.SERIAL_PORT_CLASS],
profiles=[bt.SERIAL_PORT_PROFILE])
print('Waiting for connection on RFCOMM channel %d' % port)
@property
def devices(self):
self._lock_devices.acquire()
devs = deepcopy(self._devices)
self._lock_devices.release()
return devs
def run(self):
self.done = Event()
while not self.done.isSet():
print('Waiting for clients')
client_sock, client_info = self.server_sock.accept()
r = Receiver(self, client_sock, client_info)
r.daemon = True
r.start()
self.server_sock.close()
print('The server socket has been closed')
def stop(self):
try:
self.done.set()
except AttributeError:
print('The server cannot be stopped. It is not running')
else:
print('The server has been stopped')
def update_device(self, device, data):
self._lock_devices.acquire()
self._devices[device] = data
self._lock_devices.release()
def main():
p = Pi()
p.start()
try:
raw_input('Press "enter" or "ctrl + c" to stop the server\n')
except KeyboardInterrupt:
print()
finally:
p.stop()
if __name__ == '__main__':
main()
|
gpl-3.0
| 8,016,502,288,322,077,000
| 25.56
| 74
| 0.562249
| false
| 4.081967
| false
| false
| false
|
lexdene/pavel
|
pavel/grammar/parser.py
|
1
|
11358
|
from ply import yacc
from . import lexer
class Parser:
def __init__(self):
self._debug = False
import os
if os.environ.get('PARSER_DEBUG') == 'on':
self._debug = True
def _create_lexer(self):
return lexer.Lexer()
def parse(self, source):
if self._debug:
self._debug_parse_tokens(source)
self.__parser = yacc.yacc(module=self)
debug = 0
else:
self.__parser = yacc.yacc(
module=self,
debug=False,
write_tables=False
)
debug = 0
result = self.__parser.parse(
source,
lexer=self._create_lexer(),
debug=debug
)
if self._debug:
import pprint
pprint.pprint(result, indent=4)
return result
def _debug_parse_tokens(self, source):
_lexer = self._create_lexer()
print(' ==== debug begin ==== ')
print(_lexer.tokens)
print(source)
print(repr(source))
_lexer.input(source)
for tok in _lexer:
print(
'%15s, %40s %3d %3d' % (
tok.type, repr(tok.value), tok.lineno, tok.lexpos
)
)
print(' ==== debug end ==== ')
print('')
tokens = lexer.Lexer.tokens
precedence = (
('nonassoc', 'CMP'),
('left', '+', '-'),
('left', '*', '/'),
)
def p_first_rule(self, p):
'''
first_rule : multi_lines
'''
p[0] = p[1]
def p_error(self, p):
raise ValueError(p)
def p_multi_lines(self, p):
'''
multi_lines : empty
| line
| multi_lines line
'''
if len(p) == 2:
if p[1] is None:
p[0] = (
'multi_lines',
dict(
lines=[]
),
)
else:
p[0] = (
'multi_lines',
dict(
lines=[p[1]]
),
)
elif len(p) == 3:
line_list = p[1][1]['lines'] + [p[2]]
p[0] = (
'multi_lines',
dict(
lines=line_list
),
)
else:
raise ValueError('len is %d' % len(p))
def p_empty(self, p):
'empty :'
p[0] = None
def p_line(self, p):
'''
line : expression NEWLINE
| assign NEWLINE
| if_struct NEWLINE
| for_struct NEWLINE
| while_struct NEWLINE
| function_struct NEWLINE
'''
p[0] = p[1]
def p_one_item_expression(self, p):
'''
expression : number
| keyword
| string
| function_call
| member_function_call
| anonymous_function_struct
| block
'''
p[0] = p[1]
def p_three_items_expression(self, p):
'''
expression : expression '+' expression
| expression '-' expression
| expression '*' expression
| expression '/' expression
| expression CMP expression
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_keyword_expression(self, p):
'''
expression : expression keyword expression
'''
p[0] = (
'function_call',
dict(
function=p[2],
params=[
p[1],
p[3],
]
)
)
def p_assign(self, p):
'''
assign : keyword ASSIGN expression
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_get_attr_expression(self, p):
'''
expression : expression '.' keyword
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2]),
args=(
p[1],
p[3]
)
)
)
def p_set_attr_expression(self, p):
'''
expression : expression '.' keyword ASSIGN expression
'''
p[0] = (
'expression',
dict(
operator=('operator', 'set_attr'),
args=(
p[1],
p[3],
p[5],
)
)
)
def p_get_item_expression(self, p):
'''
expression : expression '[' expression ']'
'''
p[0] = (
'expression',
dict(
operator=('operator', p[2] + p[4]),
args=(
p[1],
p[3]
)
)
)
def p_number(self, p):
'''
number : NUMBER
'''
p[0] = ('number', p[1])
def p_keyword(self, p):
'''
keyword : KEYWORD
'''
p[0] = (
'keyword',
dict(
name=p[1]
)
)
def p_string(self, p):
'''
string : STRING
'''
p[0] = ('string', p[1])
def p_if_struct(self, p):
'''
if_struct : IF '(' expression ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[3],
then_block=p[6],
else_block=None,
)
)
def p_if_with_block(self, p):
'''
if_struct : IF INDENT multi_lines OUTDENT NEWLINE THEN INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[3],
then_block=p[8],
else_block=None
)
)
def p_if_with_else(self, p):
'''
if_struct : if_struct NEWLINE ELSE INDENT multi_lines OUTDENT
'''
p[0] = (
'if_struct',
dict(
condition=p[1][1]['condition'],
then_block=p[1][1]['then_block'],
else_block=p[5],
)
)
def p_for_struct(self, p):
'''
for_struct : FOR '(' keyword IN expression ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'for_struct',
dict(
keyword=p[3],
expression=p[5],
body=p[8],
)
)
def p_while_struct(self, p):
'''
while_struct : WHILE '(' expression ')' block
'''
p[0] = (
'while_struct',
dict(
condition=p[3],
body=p[5],
)
)
def p_function_struct(self, p):
'''
function_struct : FUNCTION keyword '(' formal_param_list ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=p[2],
params=p[4],
body=p[7],
)
)
def p_no_param_function_struct(self, p):
'''
function_struct : FUNCTION keyword '(' ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=p[2],
params=[],
body=p[6],
)
)
def p_formal_param_list_with_one_item(self, p):
'''
formal_param_list : keyword
'''
p[0] = [p[1]]
def p_formal_param_list_with_multi_items(self, p):
'''
formal_param_list : formal_param_list ',' keyword
'''
formal_param_list = p[1]
formal_param_list.append(p[3])
p[0] = p[1] + [p[3]]
def p_member_function_call(self, p):
'''
member_function_call : expression '.' keyword '(' comma_expression_list ')'
'''
p[0] = (
'member_function_call',
dict(
this_object=p[1],
name=p[3],
params=p[5],
)
)
def p_no_param_member_function_call(self, p):
'''
member_function_call : expression '.' keyword '(' ')'
'''
p[0] = (
'member_function_call',
dict(
this_object=p[1],
name=p[3],
params=[],
)
)
def p_function_call(self, p):
'''
function_call : expression '(' comma_expression_list ')'
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=p[3],
)
)
def p_no_param_function_call(self, p):
'''
function_call : expression '(' ')'
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=[]
)
)
def p_call_block(self, p):
'''
function_call : expression block
'''
p[0] = (
'function_call',
dict(
function=p[1],
params=[p[2]]
)
)
def p_actual_param_list_with_one_item(self, p):
'''
comma_expression_list : expression
'''
p[0] = [p[1]]
def p_actual_param_list_with_multi_items(self, p):
'''
comma_expression_list : comma_expression_list ',' expression
| comma_expression_list ',' expression NEWLINE
'''
p[0] = p[1] + [p[3]]
def p_anonymous_function_struct(self, p):
'''
anonymous_function_struct : FUNCTION '(' formal_param_list ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=p[3],
body=p[6]
)
)
def p_anonymous_function_without_param(self, p):
'''
anonymous_function_struct : FUNCTION '(' ')' INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=[],
body=p[5],
)
)
def p_anonymous_function_struct_without_param(self, p):
'''
block : INDENT multi_lines OUTDENT
'''
p[0] = (
'function_struct',
dict(
name=None,
params=[],
body=p[2]
)
)
|
gpl-3.0
| -1,472,556,201,478,067,700
| 22.911579
| 101
| 0.36318
| false
| 4.378566
| false
| false
| false
|
rwl/PyCIM
|
CIM15/IEC61970/Meas/AccumulatorLimit.py
|
1
|
2420
|
# Copyright (C) 2010-2011 Richard Lincoln
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from CIM15.IEC61970.Meas.Limit import Limit
class AccumulatorLimit(Limit):
"""Limit values for Accumulator measurementsLimit values for Accumulator measurements
"""
def __init__(self, value=0, LimitSet=None, *args, **kw_args):
"""Initialises a new 'AccumulatorLimit' instance.
@param value: The value to supervise against. The value is positive.
@param LimitSet: The set of limits.
"""
#: The value to supervise against. The value is positive.
self.value = value
self._LimitSet = None
self.LimitSet = LimitSet
super(AccumulatorLimit, self).__init__(*args, **kw_args)
_attrs = ["value"]
_attr_types = {"value": int}
_defaults = {"value": 0}
_enums = {}
_refs = ["LimitSet"]
_many_refs = []
def getLimitSet(self):
"""The set of limits.
"""
return self._LimitSet
def setLimitSet(self, value):
if self._LimitSet is not None:
filtered = [x for x in self.LimitSet.Limits if x != self]
self._LimitSet._Limits = filtered
self._LimitSet = value
if self._LimitSet is not None:
if self not in self._LimitSet._Limits:
self._LimitSet._Limits.append(self)
LimitSet = property(getLimitSet, setLimitSet)
|
mit
| 4,569,718,613,417,140,700
| 36.8125
| 89
| 0.681818
| false
| 4.165232
| false
| false
| false
|
ResolveWang/algrithm_qa
|
分类代表题目/字符串/数字翻译成字符串(动态规划).py
|
1
|
1178
|
"""
给定一个数字,我们按照下面规则将其翻译成字符串:
0翻译成"a",1翻译成"b",...25翻译成"z",一个数字可能有多种翻译,比如
12258有5种不同的翻译,分别是"bccfi","bwfi","bczi","mcfi"和"mzfi",
求给定一个数字它的翻译方法有多少种?
思路:
套路就是求以每个位置结尾的情况有多少种翻译方式,可以通过动态规划求解
dp[i] = dp[i-1] + tmp(tmp=dp[i-2]当num_str[index-1:index+1]可以
被翻译成合法的字符,否则tmp为0)
"""
class Num2Str:
def get_total_res(self, num):
if num < 0:
return 0
if len(str(num)) == 1:
return 1
str_num = str(num)
dp = [0 for _ in range(len(str_num))]
dp[0] = 1
if int(str_num[0:2]) > 25:
dp[1] = 1
else:
dp[1] = 2
index = 2
while index < len(str_num):
tmp = 0
if int(str_num[index-1: index+1]) <= 25:
tmp = dp[index-2]
dp[index] = dp[index-1] + tmp
index += 1
return dp[-1]
if __name__ == '__main__':
print(Num2Str().get_total_res(12258))
|
mit
| -2,510,281,898,307,719,700
| 21.414634
| 60
| 0.502179
| false
| 1.881148
| false
| false
| false
|
marcocamma/trx
|
trx/cell.py
|
1
|
3337
|
# -*- coding: utf-8 -*-
from __future__ import print_function,division,absolute_import
import collections
import itertools
import numpy as np
from numpy import sin,cos
class Triclinic(object):
def __init__(self,a=1,b=1,c=1,alpha=90,beta=90,gamma=90):
self.a = a
self.b = b
self.c = c
alpha = alpha*np.pi/180
beta = beta*np.pi/180
gamma = gamma*np.pi/180
self.alpha = alpha
self.beta = beta
self.gamma = gamma
self._s11 = b**2 * c**2 * sin(alpha)**2
self._s22 = a**2 * c**2 * sin(beta)**2
self._s33 = a**2 * b**2 * sin(gamma)**2
self._s12 = a*b*c**2*(cos(alpha) * cos(beta) - cos(gamma))
self._s23 = a**2*b*c*(cos(beta) * cos(gamma) - cos(alpha))
self._s13 = a*b**2*c*(cos(gamma) * cos(alpha) - cos(beta))
self.V = (a*b*c)*np.sqrt(1-cos(alpha)**2 - cos(beta)**2 - cos(gamma)**2 + 2*cos(alpha)*cos(beta)*cos(gamma))
def __call__(self,h,k,l): return self.q(h,k,l)
def d(self,h,k,l):
temp = self._s11*h**2 + \
self._s22*k**2 + \
self._s33*l**2 + \
2*self._s12*h*k+ \
2*self._s23*k*l+ \
2*self._s13*h*l
d = self.V/np.sqrt(temp)
return d
def Q(self,h,k,l):
return 2*np.pi/self.d(h,k,l)
def reflection_list(self,maxQ=3,lim=10):
ret=dict()
# prepare hkl
i = range(-lim,lim+1)
prod = itertools.product( i,i,i )
hkl = np.asarray( list( itertools.product( i,i,i ) ) )
h,k,l = hkl.T
q = self.Q(h,k,l)
idx = q<maxQ;
q = q[idx]
hkl = hkl[idx]
q = np.round(q,12)
qunique = np.unique(q)
ret = []
for qi in qunique:
reflec = hkl[ q == qi ]
ret.append( (qi,tuple(np.abs(reflec)[0]),len(reflec),reflec) )
return qunique,ret
# for h in range(-lim,lim+1):
# for j in range(-lim,lim+1):
class Orthorombic(Triclinic):
def __init__(self,a=1,b=1,c=1):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=90,gamma=90)
class Cubic(Orthorombic):
def __init__(self,a=1):
Orthorombic.__init__(self,a=a,b=a,c=a)
class Monoclinic(object):
def __init__(self,a=1,b=1,c=1,beta=90.):
Triclinic.__init__(self,a=a,b=b,c=c,alpha=90,beta=beta,gamma=90)
def plotReflections(cell_instance,maxQ=3,ax=None,line_kw=dict(),text_kw=dict()):
import matplotlib.pyplot as plt
from matplotlib import lines
import matplotlib.transforms as transforms
_,refl_info = cell_instance.reflection_list(maxQ=maxQ)
if ax is None: ax = plt.gca()
# the x coords of this transformation are data, and the
# y coord are axes
trans = transforms.blended_transform_factory(ax.transData, ax.transAxes)
txt_kw = dict( horizontalalignment='center', rotation=45)
txt_kw.update(**text_kw)
for reflection in refl_info[1:]:
q,hkl,n,_ = reflection
line = lines.Line2D( [q,q],[1,1.1],transform=trans,**line_kw)
line.set_clip_on(False)
ax.add_line(line)
ax.text(q,1.15,str(hkl),transform=trans,**txt_kw)
ti3o5_lambda = Triclinic(a = 9.83776, b = 3.78674, c = 9.97069, beta = 91.2567)
ti3o5_beta = Triclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
#ti3o5_beta = Monoclinic(a = 9.7382 , b = 3.8005 , c = 9.4333 , beta = 91.496)
ti3o5_alpha = Triclinic(a = 9.8372, b = 3.7921, c = 9.9717)
ti3o5_alpha1 = Orthorombic(a = 9.8372, b = 3.7921, c = 9.9717)
si = Cubic(a=5.431020504)
|
mit
| 8,474,517,120,379,409,000
| 30.481132
| 115
| 0.594246
| false
| 2.43399
| false
| false
| false
|
ada-x/respect_mah_authoritay
|
movies_project.py
|
1
|
2198
|
import movies # my file with the class definition
import fresh_tomatoes # renders site
pi_movie = movies.Movie('Pi',
'https://www.youtube.com/watch?v=jo18VIoR2xU',
'a mathematician makes an incredible discovery',
'http://images.moviepostershop.com/pi-movie-poster-1998-1020474533.jpg')
big_fish = movies.Movie('Big Fish',
'https://www.youtube.com/watch?v=M3YVTgTl-F0',
'a story about the stories between a father and son',
'http://www.gstatic.com/tv/thumb/movieposters/32942/p32942_p_v8_aa.jpg')
gone_in_60_seconds = movies.Movie('Gone In 60 Seconds',
'https://www.youtube.com/watch?v=o6AyAM1buQ8',
'A reformed car thief is given three days to steal 50 pristine autos',
'http://www.gstatic.com/tv/thumb/movieposters/25612/p25612_p_v8_aa.jpg')
lauberge_espagnole = movies.Movie('L\'auberge Espagnole',
'https://www.youtube.com/watch?v=CCs6AzLeNQI',
'a student\'s adventures living in Barcelona',
'http://www.gstatic.com/tv/thumb/dvdboxart/30919/p30919_d_v8_aa.jpg')
lilo_and_stitch = movies.Movie('Lilo and Stitch',
'https://www.youtube.com/watch?v=hu9bERy7XGY',
'a lonely little girl gets an extra-terrestrial friend',
'http://img.lum.dolimg.com/v1/images/open-uri20150422-12561-1dajwj_23920e88.jpeg?region=0%2C0%2C1000%2C1409')
idiocracy = movies.Movie('Idiocracy',
'https://www.youtube.com/watch?v=BBvIweCIgwk',
'an average american wakes up in the future',
'http://www.gstatic.com/tv/thumb/dvdboxart/159395/p159395_d_v8_aa.jpg')
movies_list = [pi_movie, lilo_and_stitch, lauberge_espagnole,
gone_in_60_seconds, big_fish, idiocracy]
# print(movies_list)
# pi_movie.show_trailer()
# opens and renders display
fresh_tomatoes.open_movies_page(movies_list)
|
unlicense
| 8,596,849,178,169,684,000
| 51.333333
| 140
| 0.572793
| false
| 3.148997
| false
| false
| false
|
garrettr/onionshare
|
onionshare_gui/onionshare_gui.py
|
1
|
5020
|
import os, sys, subprocess, inspect, platform, argparse
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from PyQt4.QtWebKit import *
if platform.system() == 'Darwin':
onionshare_gui_dir = os.path.dirname(__file__)
else:
onionshare_gui_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
try:
import onionshare
except ImportError:
sys.path.append(os.path.abspath(onionshare_gui_dir+"/.."))
import onionshare
from onionshare import translated
import webapp
window_icon = None
class Application(QApplication):
def __init__(self):
platform = onionshare.get_platform()
if platform == 'Tails' or platform == 'Linux':
self.setAttribute(Qt.AA_X11InitThreads, True)
QApplication.__init__(self, sys.argv)
class WebAppThread(QThread):
def __init__(self, webapp_port):
QThread.__init__(self)
self.webapp_port = webapp_port
def run(self):
webapp.app.run(port=self.webapp_port)
class Window(QWebView):
def __init__(self, basename, webapp_port):
global window_icon
QWebView.__init__(self)
self.setWindowTitle("{0} | OnionShare".format(basename))
self.resize(580, 400)
self.setMinimumSize(580, 400)
self.setMaximumSize(580, 400)
self.setWindowIcon(window_icon)
self.load(QUrl("http://127.0.0.1:{0}".format(webapp_port)))
def alert(msg, icon=QMessageBox.NoIcon):
global window_icon
dialog = QMessageBox()
dialog.setWindowTitle("OnionShare")
dialog.setWindowIcon(window_icon)
dialog.setText(msg)
dialog.setIcon(icon)
dialog.exec_()
def select_file(strings, filename=None):
# get filename, either from argument or file chooser dialog
if not filename:
args = {}
if onionshare.get_platform() == 'Tails':
args['directory'] = '/home/amnesia'
filename = QFileDialog.getOpenFileName(caption=translated('choose_file'), options=QFileDialog.ReadOnly, **args)
if not filename:
return False, False
filename = str(filename)
# validate filename
if not os.path.isfile(filename):
alert(translated("not_a_file").format(filename), QMessageBox.Warning)
return False, False
filename = os.path.abspath(filename)
basename = os.path.basename(filename)
return filename, basename
def main():
onionshare.strings = onionshare.load_strings()
# start the Qt app
app = Application()
# check for root in Tails
if onionshare.get_platform() == 'Tails' and not onionshare.is_root():
subprocess.call(['/usr/bin/gksudo']+sys.argv)
return
# parse arguments
parser = argparse.ArgumentParser()
parser.add_argument('--local-only', action='store_true', dest='local_only', help='Do not attempt to use tor: for development only')
parser.add_argument('--stay-open', action='store_true', dest='stay_open', help='Keep hidden service running after download has finished')
parser.add_argument('--debug', action='store_true', dest='debug', help='Log errors to disk')
parser.add_argument('filename', nargs='?', help='File to share')
args = parser.parse_args()
filename = args.filename
local_only = args.local_only
stay_open = bool(args.stay_open)
debug = bool(args.debug)
onionshare.set_stay_open(stay_open)
# create the onionshare icon
global window_icon, onionshare_gui_dir
window_icon = QIcon("{0}/onionshare-icon.png".format(onionshare_gui_dir))
# try starting hidden service
onionshare_port = onionshare.choose_port()
local_host = "127.0.0.1:{0}".format(onionshare_port)
if not local_only:
try:
onion_host = onionshare.start_hidden_service(onionshare_port)
except onionshare.NoTor as e:
alert(e.args[0], QMessageBox.Warning)
return
onionshare.tails_open_port(onionshare_port)
# select file to share
filename, basename = select_file(onionshare.strings, filename)
if not filename:
return
# initialize the web app
webapp.onionshare = onionshare
webapp.onionshare_port = onionshare_port
webapp.filename = filename
webapp.qtapp = app
webapp.clipboard = app.clipboard()
webapp.stay_open = stay_open
if not local_only:
webapp.onion_host = onion_host
else:
webapp.onion_host = local_host
if debug:
webapp.debug_mode()
# run the web app in a new thread
webapp_port = onionshare.choose_port()
onionshare.tails_open_port(webapp_port)
webapp_thread = WebAppThread(webapp_port)
webapp_thread.start()
# clean up when app quits
def shutdown():
onionshare.tails_close_port(onionshare_port)
onionshare.tails_close_port(webapp_port)
app.connect(app, SIGNAL("aboutToQuit()"), shutdown)
# launch the window
web = Window(basename, webapp_port)
web.show()
# all done
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
gpl-3.0
| 1,164,837,602,835,972,400
| 30.572327
| 141
| 0.65996
| false
| 3.608914
| false
| false
| false
|
openweave/openweave-core
|
src/tools/simnet/lib/simnet/layouts/two-hans-shared-host-gateway.py
|
1
|
3105
|
#
# Simnet Network Layout: Two HANs with shared gateways implemented on host
#
# This simnet configuration defines two HANs, each with its own WiFi and Thread networks.
# Both HANs contain a single Weave device connected to the respective WiFi/Thread networks.
# The HANs also contain separate Gateway nodes that are implemented, in unison, by the
# host (i.e. the host acts as both Gateways simultaneously). The gateways use the host's
# default interface (typically eth0) as their outside interface allowing the HANs to
# access the internet if the host has internet access.
#
# Note: In order for this configuration to work, the two Gateway nodes must use distinct
# IPv4 subnets on their inside interfaces, even though in a real scenario they could use
# the same subnet.
#
#
# Copyright (c) 2015-2017 Nest Labs, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#===============================================================================
# HAN-1
#===============================================================================
WiFiNetwork(
name = 'han-1-wifi',
)
ThreadNetwork(
name = 'han-1-thread',
meshLocalPrefix = 'fd24:2424:2424::/64'
)
# Gateway in HAN-1 implemented on host, with outside access via host's default interface.
Gateway(
name = 'han-1-gw',
outsideNetwork = None,
outsideInterface = 'host-default',
useHost = True,
insideNetwork = 'han-1-wifi',
insideIP4Subnet = '192.168.168.0/24',
isIP4DefaultGateway = True
)
# Weave device in HAN-1 connected to HAN-1 WiFi and Thread networks
WeaveDevice(
name = 'han-1-dev',
weaveNodeId = 1,
weaveFabricId = 1,
wifiNetwork = 'han-1-wifi',
threadNetwork = 'han-1-thread'
)
#===============================================================================
# HAN-2
#===============================================================================
WiFiNetwork(
name = 'han-2-wifi',
)
ThreadNetwork(
name = 'han-2-thread',
meshLocalPrefix = 'fd42:4242:4242::/64'
)
# Gateway in HAN-2 implemented on host, with outside access via host's default interface.
Gateway(
name = 'han-2-gw',
outsideNetwork = None,
outsideInterface = 'host-default',
useHost = True,
insideNetwork = 'han-2-wifi',
insideIP4Subnet = '192.168.167.0/24',
isIP4DefaultGateway = True
)
# Weave device in HAN-2 connected to HAN-2 WiFi and Thread networks
WeaveDevice(
name = 'han-2-dev',
weaveNodeId = 2,
weaveFabricId = 2,
wifiNetwork = 'han-2-wifi',
threadNetwork = 'han-2-thread'
)
|
apache-2.0
| -7,368,057,710,601,936,000
| 31.010309
| 91
| 0.629308
| false
| 3.696429
| false
| false
| false
|
ps-jay/temper-python
|
temperusb/snmp.py
|
1
|
3612
|
# encoding: utf-8
#
# Run snmp_temper.py as a pass-persist module for NetSNMP.
# See README.md for instructions.
#
# Copyright 2012-2014 Philipp Adelt <info@philipp.adelt.net>
#
# This code is licensed under the GNU public license (GPL). See LICENSE.md for details.
import os
import sys
import syslog
import threading
import snmp_passpersist as snmp
from temperusb.temper import TemperHandler, TemperDevice
ERROR_TEMPERATURE = 9999
def _unbuffered_handle(fd):
return os.fdopen(fd.fileno(), 'w', 0)
class LogWriter():
def __init__(self, ident='temper-python', facility=syslog.LOG_DAEMON):
syslog.openlog(ident, 0, facility)
def write_log(self, message, prio=syslog.LOG_INFO):
syslog.syslog(prio, message)
class Updater():
def __init__(self, pp, logger, testmode=False):
self.logger = logger
self.pp = pp
self.testmode = testmode
self.usb_lock = threading.Lock() # used to stop reinitialization interfering with update-thread
self._initialize()
def _initialize(self):
with self.usb_lock:
try:
self.th = TemperHandler()
self.devs = self.th.get_devices()
self.logger.write_log('Found %i thermometer devices.' % len(self.devs))
for i, d in enumerate(self.devs):
self.logger.write_log('Initial temperature of device #%i: %0.1f degree celsius' % (i, d.get_temperature()))
except Exception as e:
self.logger.write_log('Exception while initializing: %s' % str(e))
def _reinitialize(self):
# Tries to close all known devices and starts over.
self.logger.write_log('Reinitializing devices')
with self.usb_lock:
for i,d in enumerate(self.devs):
try:
d.close()
except Exception as e:
self.logger.write_log('Exception closing device #%i: %s' % (i, str(e)))
self._initialize()
def update(self):
if self.testmode:
# APC Internal/Battery Temperature
self.pp.add_int('318.1.1.1.2.2.2.0', 99)
# Cisco devices temperature OIDs
self.pp.add_int('9.9.13.1.3.1.3.1', 97)
self.pp.add_int('9.9.13.1.3.1.3.2', 98)
self.pp.add_int('9.9.13.1.3.1.3.3', 99)
else:
try:
with self.usb_lock:
temperatures = [d.get_temperature() for d in self.devs]
self.pp.add_int('318.1.1.1.2.2.2.0', int(max(temperatures)))
for i, temperature in enumerate(temperatures[:3]): # use max. first 3 devices
self.pp.add_int('9.9.13.1.3.1.3.%i' % (i+1), int(temperature))
except Exception as e:
self.logger.write_log('Exception while updating data: %s' % str(e))
# Report an exceptionally large temperature to set off all alarms.
# snmp_passpersist does not expose an API to remove an OID.
for oid in ('318.1.1.1.2.2.2.0', '9.9.13.1.3.1.3.1', '9.9.13.1.3.1.3.2', '9.9.13.1.3.1.3.3'):
self.pp.add_int(oid, ERROR_TEMPERATURE)
self.logger.write_log('Starting reinitialize after error on update')
self._reinitialize()
def main():
sys.stdout = _unbuffered_handle(sys.stdout)
pp = snmp.PassPersist(".1.3.6.1.4.1")
logger = LogWriter()
upd = Updater(pp, logger, testmode=('--testmode' in sys.argv))
pp.start(upd.update, 5) # update every 5s
if __name__ == '__main__':
main()
|
gpl-3.0
| -4,935,427,862,383,892,000
| 37.425532
| 127
| 0.580288
| false
| 3.375701
| true
| false
| false
|
rs2/bokeh
|
sphinx/source/docs/user_guide/examples/extensions_example_tool.py
|
1
|
1483
|
from bokeh.core.properties import Instance
from bokeh.io import output_file, show
from bokeh.models import ColumnDataSource, Tool
from bokeh.plotting import figure
output_file('tool.html')
JS_CODE = """
import * as p from "core/properties"
import {GestureTool, GestureToolView} from "models/tools/gestures/gesture_tool"
export class DrawToolView extends GestureToolView
# this is executed when the pan/drag event starts
_pan_start: (e) ->
@model.source.data = {x: [], y: []}
# this is executed on subsequent mouse/touch moves
_pan: (e) ->
frame = @plot_model.frame
{sx, sy} = e.bokeh
if not frame.bbox.contains(sx, sy)
return null
x = frame.xscales['default'].invert(sx)
y = frame.yscales['default'].invert(sy)
@model.source.data.x.push(x)
@model.source.data.y.push(y)
@model.source.change.emit()
# this is executed then the pan/drag ends
_pan_end: (e) -> return null
export class DrawTool extends GestureTool
default_view: DrawToolView
type: "DrawTool"
tool_name: "Drag Span"
icon: "bk-tool-icon-lasso-select"
event_type: "pan"
default_order: 12
@define { source: [ p.Instance ] }
"""
class DrawTool(Tool):
__implementation__ = JS_CODE
source = Instance(ColumnDataSource)
source = ColumnDataSource(data=dict(x=[], y=[]))
plot = figure(x_range=(0,10), y_range=(0,10), tools=[DrawTool(source=source)])
plot.title.text ="Drag to draw on the plot"
plot.line('x', 'y', source=source)
show(plot)
|
bsd-3-clause
| -8,073,249,730,603,915,000
| 24.568966
| 79
| 0.685772
| false
| 3.155319
| false
| false
| false
|
amitay/samba
|
buildtools/wafsamba/irixcc.py
|
1
|
1943
|
# compiler definition for irix/MIPSpro cc compiler
# based on suncc.py from waf
import os, optparse
import Utils, Options, Configure
import ccroot, ar
from Configure import conftest
from compiler_cc import c_compiler
c_compiler['irix'] = ['gcc', 'irixcc']
@conftest
def find_irixcc(conf):
v = conf.env
cc = None
if v['CC']: cc = v['CC']
elif 'CC' in conf.environ: cc = conf.environ['CC']
if not cc: cc = conf.find_program('cc', var='CC')
if not cc: conf.fatal('irixcc was not found')
cc = conf.cmd_to_list(cc)
try:
if Utils.cmd_output(cc + ['-version']) != '':
conf.fatal('irixcc %r was not found' % cc)
except ValueError:
conf.fatal('irixcc -v could not be executed')
v['CC'] = cc
v['CC_NAME'] = 'irix'
@conftest
def irixcc_common_flags(conf):
v = conf.env
v['CC_SRC_F'] = ''
v['CC_TGT_F'] = ['-c', '-o', '']
v['CPPPATH_ST'] = '-I%s' # template for adding include paths
# linker
if not v['LINK_CC']: v['LINK_CC'] = v['CC']
v['CCLNK_SRC_F'] = ''
v['CCLNK_TGT_F'] = ['-o', '']
v['LIB_ST'] = '-l%s' # template for adding libs
v['LIBPATH_ST'] = '-L%s' # template for adding libpaths
v['STATICLIB_ST'] = '-l%s'
v['STATICLIBPATH_ST'] = '-L%s'
v['CCDEFINES_ST'] = '-D%s'
# v['SONAME_ST'] = '-Wl,-h -Wl,%s'
# v['SHLIB_MARKER'] = '-Bdynamic'
# v['STATICLIB_MARKER'] = '-Bstatic'
# program
v['program_PATTERN'] = '%s'
# shared library
# v['shlib_CCFLAGS'] = ['-Kpic', '-DPIC']
# v['shlib_LINKFLAGS'] = ['-G']
v['shlib_PATTERN'] = 'lib%s.so'
# static lib
# v['staticlib_LINKFLAGS'] = ['-Bstatic']
# v['staticlib_PATTERN'] = 'lib%s.a'
detect = '''
find_irixcc
find_cpp
find_ar
irixcc_common_flags
cc_load_tools
cc_add_flags
link_add_flags
'''
|
gpl-3.0
| 6,208,172,338,209,144,000
| 24.233766
| 73
| 0.529593
| false
| 2.908683
| false
| false
| false
|
mhl/mysociety-cvs
|
sitestats/pylib/sitestats/newsletters/tests/hfymp.py
|
1
|
1131
|
import unittest
from sitestats.newsletters.models.hfymp import HFYMPNewsletter
from tests import example_dir
from newsletter import MockPiwik, MockGoogle, newsletter_date
class HFYMPNewsletterTests(unittest.TestCase):
def setUp(self):
self.sources = {'piwik' : MockPiwik(), 'google' : MockGoogle()}
self.hfymp = HFYMPNewsletter()
self.hfymp.set_site_id = lambda sources: None
self.hfymp.base_url = 'http://www.hearfromyourmp.com'
def testRenderedToHTMLTemplateCorrectly(self):
html = self.hfymp.render('html', self.sources, date=newsletter_date()).strip()
expected_html = open(example_dir() + 'hfymp.html').read().strip()
self.assertEqual(expected_html, html, 'render produces correct output in HTML for example data')
def testRenderedToTextTemplateCorrectly(self):
text = self.hfymp.render('text', self.sources, date=newsletter_date()).strip()
expected_text = open(example_dir() + 'hfymp.txt').read().strip()
self.assertEqual(expected_text, text, 'render produces correct output in text for example data')
|
agpl-3.0
| 3,400,418,588,539,802,600
| 50.454545
| 104
| 0.691424
| false
| 3.660194
| true
| false
| false
|
melinath/django-graph-api
|
django_graph_api/tests/conftest.py
|
1
|
1399
|
import pytest
from test_app.models import (
Droid,
Episode,
Human,
)
@pytest.fixture
def starwars_data(transactional_db):
luke, _ = Human.objects.get_or_create(
id=1000,
name='Luke Skywalker',
)
darth_vader, _ = Human.objects.get_or_create(
id=1001,
name='Darth Vader',
)
han, _ = Human.objects.get_or_create(
id=1002,
name='Han Solo',
)
leia, _ = Human.objects.get_or_create(
id=1003,
name='Leia Organa',
)
c3po, _ = Droid.objects.get_or_create(
id=2000,
name='C-3PO',
primary_function='Protocol',
)
r2d2, _ = Droid.objects.get_or_create(
id=2001,
name='R2-D2',
primary_function='Astromech',
)
for friend in (han, leia, c3po, r2d2):
luke.friends.add(friend)
han.friends.add(leia)
han.friends.add(r2d2)
leia.friends.add(c3po)
leia.friends.add(r2d2)
c3po.friends.add(r2d2)
a_new_hope, _ = Episode.objects.get_or_create(
id=1,
name='A New Hope',
number=4
)
empire_strikes_back, _ = Episode.objects.get_or_create(
id=2,
name='The Empire Strikes Back',
number=5
)
for character in (luke, han, leia, c3po, r2d2, darth_vader):
a_new_hope.characters.add(character)
empire_strikes_back.characters.add(character)
|
mit
| -1,655,201,041,344,370,000
| 21.934426
| 64
| 0.567548
| false
| 2.803607
| false
| false
| false
|
cherepaha/PyDLV
|
pydlv/dl_plotter.py
|
1
|
2699
|
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
from matplotlib import cm
from matplotlib.ticker import MaxNLocator
from mpl_toolkits.mplot3d import Axes3D
class DLPlotter:
'''
This class is responsible for plotting decision landscapes. Matplotlib is used as a background.
'''
figsize = (10.5, 6) # in inches, at 100 dpi
# figsize = (14, 8) # in inches, at 100 dpi
legendFontSize = 24
tickLabelFontSize = 18
axisLabelFontSize = 24
lw=2.0
def __init__(self, elev=27, azim=130, ax=None):
if ax is None:
fig = plt.figure(figsize=self.figsize)
self.ax = fig.add_subplot(111, projection='3d')
# self.ax = fig.gca(projection='3d')
else:
self.ax = ax
self.set_axis_params(elev, azim)
def set_axis_params(self, elev=27, azim=130):
self.ax.xaxis.set_major_locator(MaxNLocator(5))
self.ax.yaxis.set_major_locator(MaxNLocator(5))
self.ax.zaxis.set_major_locator(MaxNLocator(1))
self.ax.set_xlabel(r'x coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.set_ylabel(r'y coordinate', fontsize=self.axisLabelFontSize, labelpad=20)
self.ax.tick_params(axis='both', which='major', labelsize=self.tickLabelFontSize)
self.ax.view_init(elev, azim)
def plot_surface(self, x_grid, y_grid, z, cmap=cm.viridis, color=None, scale_z=True,
view=None, alpha=1.0, shade=False, linewidth=0.1, aa=True, plot_marble=True):
n_cells=100
x, y = np.meshgrid((x_grid[1:]+x_grid[:-1])/2, (y_grid[1:]+y_grid[:-1])/2)
z = np.nan_to_num(z)
if scale_z:
self.ax.set_zlim([np.min(z), 0])
norm = mpl.colors.Normalize(vmin=np.min(z), vmax=0, clip=False)
if plot_marble:
self.ax.plot([0.], [0.], [0.], marker='o', markersize=15, color='black')
if color is None:
self.ax.plot_surface(x, y, z, cmap=cmap, norm=norm, alpha=alpha, shade=shade,
rcount=n_cells, ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
else:
self.ax.plot_surface(x, y, z, color=color, alpha=alpha, shade=shade, rcount=n_cells,
ccount=n_cells, linewidth=linewidth, edgecolors='k', antialiased=aa)
if view == 'top right':
self.ax.view_init(elev=27, azim=40)
return self.ax
def add_legend(self, colors, labels):
patches = [mpl.patches.Patch(color=color, linewidth=0) for color in colors]
self.ax.legend(patches, labels, fontsize=self.legendFontSize)
|
gpl-3.0
| 8,262,736,209,481,907,000
| 41.857143
| 113
| 0.607262
| false
| 3.190307
| false
| false
| false
|
rsignell-usgs/notebook
|
pyugrid/pyugrid/test_examples.py
|
1
|
2559
|
#!/usr/bin/env python
"""
Some example UGRIDs to test, etc with
"""
from __future__ import (absolute_import, division, print_function)
from pyugrid import ugrid
def two_triangles():
"""
returns about the simplest triangle grid possible
4 nodes, two triangles, five edges
"""
nodes = [(0.1, 0.1),
(2.1, 0.1),
(1.1, 2.1),
(3.1, 2.1)]
faces = [(0, 1, 2),
(1, 3, 2), ]
edges = [(0, 1),
(1, 3),
(3, 2),
(2, 0),
(1, 2)]
return ugrid.UGrid(nodes, faces, edges)
def twenty_one_triangles():
"""
returns a basic triangle grid with 21 triangles, a hole and a "tail"
"""
nodes = [(5, 1),
(10, 1),
(3, 3),
(7, 3),
(9, 4),
(12, 4),
(5, 5),
(3, 7),
(5, 7),
(7, 7),
(9, 7),
(11, 7),
(5, 9),
(8, 9),
(11, 9),
(9, 11),
(11, 11),
(7, 13),
(9, 13),
(7, 15), ]
faces = [(0, 1, 3),
(0, 6, 2),
(0, 3, 6),
(1, 4, 3),
(1, 5, 4),
(2, 6, 7),
(6, 8, 7),
(7, 8, 12),
(6, 9, 8),
(8, 9, 12),
(9, 13, 12),
(4, 5, 11),
(4, 11, 10),
(9, 10, 13),
(10, 11, 14),
(10, 14, 13),
(13, 14, 15),
(14, 16, 15),
(15, 16, 18),
(15, 18, 17),
(17, 18, 19), ]
# We may want to use this later to define just the outer boundary.
boundaries = [(0, 1),
(1, 5),
(5, 11),
(11, 14),
(14, 16),
(16, 18),
(18, 19),
(19, 17),
(17, 15),
(15, 13),
(13, 12),
(12, 7),
(7, 2),
(2, 0),
(3, 4),
(4, 10),
(10, 9),
(9, 6),
(6, 3), ]
grid = ugrid.UGrid(nodes, faces, boundaries=boundaries)
grid.build_edges()
return grid
if __name__ == "__main__":
grid = twenty_one_triangles()
print(grid.edges)
print(len(grid.edges))
grid.build_edges()
print(grid.edges)
print(len(grid.edges))
|
mit
| 651,994,480,699,171,300
| 21.447368
| 72
| 0.313794
| false
| 3.458108
| false
| false
| false
|
at15/ts-parallel
|
bin/agraph.py
|
1
|
2982
|
#!/usr/bin/env python3
import glob
import re
import csv
import matplotlib.pyplot as plt
def main():
data = {}
operations = ["sort", "reduce"]
types = ["int", "float", "double"]
for op in operations:
for tp in types:
# i.e. sort int
data[op + "_" + tp] = {}
results = glob.glob("*_" + op + "_*_" + tp + ".csv")
for result in results:
backend, num = re.match(
"(.*)_" + op + "_(.*)_" + tp + ".csv", result).groups()
# data[op + "_" + tp]
if backend not in data[op + "_" + tp]:
data[op + "_" + tp][backend] = {}
num = int(num)
# print(backend, num)
data[op + "_" + tp][backend][num] = {}
with open(result) as f:
# NOTE: it will detect the header of CSV and change it to
# key
reader = csv.DictReader(f)
for row in reader:
data[op + "_" + tp][backend][num][row["stage"]
] = row["duration"]
# print(row)
# print(results)
# print(data)
# now let's draw the graph
plot_data = {}
for op, backends in data.items():
# print(op)
plot_data[op] = []
for backend, results in backends.items():
pdata = {"name": backend, "x": [], "y": []}
# print(backend)
# [(10, {'init': '2771', 'generate': '7667', 'copy': '112781784', 'run': '825079', 'delete': '67504'}), (50, {'init': '1045', 'generate': '8579', 'copy': '110102907', 'run': '1389482', 'delete': '68685'})]
sorted_results = sorted(results.items())
for result in sorted_results:
num, stages = result
# print(num)
if "run" not in stages:
print("didn't find run!", op, backend, num)
continue
pdata["x"].append(num)
pdata["y"].append(stages["run"])
plot_data[op].append(pdata)
# print(plot_data)
i = 1
color_map = {"serial": "C1", "boost": "C2", "thrust": "C3"}
exclude = {"serial": True}
for op, pdatas in plot_data.items():
plt.figure(i)
i += 1
for pdata in pdatas:
if pdata["name"] in exclude:
continue
plt.plot(pdata["x"], pdata["y"],
color_map[pdata["name"]], label=pdata["name"])
plt.title(op)
plt.xlabel("Vector length")
# TODO: ylabel is not shown, and the color changes in different figure
# NOTE: we are using microseconds, because nano seconds got negative
# value
plt.ylabel("Time (us)")
plt.legend(loc='upper right', shadow=True, fontsize='x-small')
plt.show()
if __name__ == "__main__":
main()
|
mit
| 5,607,377,259,063,584,000
| 37.230769
| 217
| 0.450704
| false
| 3.928854
| false
| false
| false
|
cyphactor/lifecyclemanager
|
testenv/trac-0.10.4/trac/versioncontrol/web_ui/log.py
|
1
|
9917
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2003-2006 Edgewall Software
# Copyright (C) 2003-2005 Jonas Borgström <jonas@edgewall.com>
# Copyright (C) 2005-2006 Christian Boos <cboos@neuf.fr>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://trac.edgewall.org/log/.
#
# Author: Jonas Borgström <jonas@edgewall.com>
# Christian Boos <cboos@neuf.fr>
import re
import urllib
from trac.core import *
from trac.perm import IPermissionRequestor
from trac.util.datefmt import http_date
from trac.util.html import html
from trac.util.text import wrap
from trac.versioncontrol import Changeset
from trac.versioncontrol.web_ui.changeset import ChangesetModule
from trac.versioncontrol.web_ui.util import *
from trac.web import IRequestHandler
from trac.web.chrome import add_link, add_stylesheet, INavigationContributor
from trac.wiki import IWikiSyntaxProvider, Formatter
LOG_LIMIT = 100
class LogModule(Component):
implements(INavigationContributor, IPermissionRequestor, IRequestHandler,
IWikiSyntaxProvider)
# INavigationContributor methods
def get_active_navigation_item(self, req):
return 'browser'
def get_navigation_items(self, req):
return []
# IPermissionRequestor methods
def get_permission_actions(self):
return ['LOG_VIEW']
# IRequestHandler methods
def match_request(self, req):
import re
match = re.match(r'/log(?:(/.*)|$)', req.path_info)
if match:
req.args['path'] = match.group(1) or '/'
return True
def process_request(self, req):
req.perm.assert_permission('LOG_VIEW')
mode = req.args.get('mode', 'stop_on_copy')
path = req.args.get('path', '/')
rev = req.args.get('rev')
stop_rev = req.args.get('stop_rev')
format = req.args.get('format')
verbose = req.args.get('verbose')
limit = LOG_LIMIT
repos = self.env.get_repository(req.authname)
normpath = repos.normalize_path(path)
rev = unicode(repos.normalize_rev(rev))
if stop_rev:
stop_rev = unicode(repos.normalize_rev(stop_rev))
if repos.rev_older_than(rev, stop_rev):
rev, stop_rev = stop_rev, rev
req.hdf['title'] = path + ' (log)'
req.hdf['log'] = {
'mode': mode,
'path': path,
'rev': rev,
'verbose': verbose,
'stop_rev': stop_rev,
'browser_href': req.href.browser(path),
'changeset_href': req.href.changeset(),
'log_href': req.href.log(path, rev=rev)
}
path_links = get_path_links(req.href, path, rev)
req.hdf['log.path'] = path_links
if path_links:
add_link(req, 'up', path_links[-1]['href'], 'Parent directory')
# The `history()` method depends on the mode:
# * for ''stop on copy'' and ''follow copies'', it's `Node.history()`
# * for ''show only add, delete'' it's`Repository.get_path_history()`
if mode == 'path_history':
def history(limit):
for h in repos.get_path_history(path, rev, limit):
yield h
else:
history = get_existing_node(req, repos, path, rev).get_history
# -- retrieve history, asking for limit+1 results
info = []
previous_path = repos.normalize_path(path)
for old_path, old_rev, old_chg in history(limit+1):
if stop_rev and repos.rev_older_than(old_rev, stop_rev):
break
old_path = repos.normalize_path(old_path)
item = {
'rev': str(old_rev),
'path': old_path,
'log_href': req.href.log(old_path, rev=old_rev),
'browser_href': req.href.browser(old_path, rev=old_rev),
'changeset_href': req.href.changeset(old_rev),
'restricted_href': req.href.changeset(old_rev, new_path=old_path),
'change': old_chg
}
if not (mode == 'path_history' and old_chg == Changeset.EDIT):
info.append(item)
if old_path and old_path != previous_path \
and not (mode == 'path_history' and old_path == normpath):
item['copyfrom_path'] = old_path
if mode == 'stop_on_copy':
break
if len(info) > limit: # we want limit+1 entries
break
previous_path = old_path
if info == []:
# FIXME: we should send a 404 error here
raise TracError("The file or directory '%s' doesn't exist "
"at revision %s or at any previous revision."
% (path, rev), 'Nonexistent path')
def make_log_href(path, **args):
link_rev = rev
if rev == str(repos.youngest_rev):
link_rev = None
params = {'rev': link_rev, 'mode': mode, 'limit': limit}
params.update(args)
if verbose:
params['verbose'] = verbose
return req.href.log(path, **params)
if len(info) == limit+1: # limit+1 reached, there _might_ be some more
next_rev = info[-1]['rev']
next_path = info[-1]['path']
add_link(req, 'next', make_log_href(next_path, rev=next_rev),
'Revision Log (restarting at %s, rev. %s)'
% (next_path, next_rev))
# now, only show 'limit' results
del info[-1]
req.hdf['log.items'] = info
revs = [i['rev'] for i in info]
changes = get_changes(self.env, repos, revs, verbose, req, format)
if format == 'rss':
# Get the email addresses of all known users
email_map = {}
for username,name,email in self.env.get_known_users():
if email:
email_map[username] = email
for cs in changes.values():
# For RSS, author must be an email address
author = cs['author']
author_email = ''
if '@' in author:
author_email = author
elif email_map.has_key(author):
author_email = email_map[author]
cs['author'] = author_email
cs['date'] = http_date(cs['date_seconds'])
elif format == 'changelog':
for rev in revs:
changeset = repos.get_changeset(rev)
cs = changes[rev]
cs['message'] = wrap(changeset.message, 70,
initial_indent='\t',
subsequent_indent='\t')
files = []
actions = []
for path, kind, chg, bpath, brev in changeset.get_changes():
files.append(chg == Changeset.DELETE and bpath or path)
actions.append(chg)
cs['files'] = files
cs['actions'] = actions
req.hdf['log.changes'] = changes
if req.args.get('format') == 'changelog':
return 'log_changelog.cs', 'text/plain'
elif req.args.get('format') == 'rss':
return 'log_rss.cs', 'application/rss+xml'
add_stylesheet(req, 'common/css/browser.css')
add_stylesheet(req, 'common/css/diff.css')
rss_href = make_log_href(path, format='rss', stop_rev=stop_rev)
add_link(req, 'alternate', rss_href, 'RSS Feed', 'application/rss+xml',
'rss')
changelog_href = make_log_href(path, format='changelog',
stop_rev=stop_rev)
add_link(req, 'alternate', changelog_href, 'ChangeLog', 'text/plain')
return 'log.cs', None
# IWikiSyntaxProvider methods
REV_RANGE = "%s[-:]%s" % ((ChangesetModule.CHANGESET_ID,)*2)
def get_wiki_syntax(self):
yield (
# [...] form, starts with optional intertrac: [T... or [trac ...
r"!?\[(?P<it_log>%s\s*)" % Formatter.INTERTRAC_SCHEME +
# <from>:<to> + optional path restriction
r"(?P<log_rev>%s)(?P<log_path>/[^\]]*)?\]" % self.REV_RANGE,
lambda x, y, z: self._format_link(x, 'log1', y[1:-1], y, z))
yield (
# r<from>:<to> form (no intertrac and no path restriction)
r"(?:\b|!)r%s\b" % self.REV_RANGE,
lambda x, y, z: self._format_link(x, 'log2', '@' + y[1:], y))
def get_link_resolvers(self):
yield ('log', self._format_link)
def _format_link(self, formatter, ns, match, label, fullmatch=None):
if ns == 'log1':
it_log = fullmatch.group('it_log')
rev = fullmatch.group('log_rev')
path = fullmatch.group('log_path') or '/'
target = '%s%s@%s' % (it_log, path, rev)
# prepending it_log is needed, as the helper expects it there
intertrac = formatter.shorthand_intertrac_helper(
'log', target, label, fullmatch)
if intertrac:
return intertrac
else: # ns == 'log2'
path, rev, line = get_path_rev_line(match)
stop_rev = None
for sep in ':-':
if not stop_rev and rev and sep in rev:
stop_rev, rev = rev.split(sep, 1)
href = formatter.href.log(path or '/', rev=rev, stop_rev=stop_rev)
return html.A(label, href=href, class_='source')
|
gpl-3.0
| -4,291,927,195,399,466,500
| 38.66
| 82
| 0.543217
| false
| 3.771396
| false
| false
| false
|
googleinterns/hw-fuzzing
|
experiment_scripts/plots/exp006_plot_coverage_noseeds.py
|
1
|
13291
|
#!/usr/bin/env python3
# Copyright 2020 Timothy Trippel
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import collections
import glob
import itertools
import os
import sys
from dataclasses import dataclass
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from hwfutils.string_color import color_str_green as green
from hwfutils.string_color import color_str_red as red
from hwfutils.string_color import color_str_yellow as yellow
# from scipy import stats
# ------------------------------------------------------------------------------
# Plot parameters
# ------------------------------------------------------------------------------
LABEL_FONT_SIZE = 8
TICK_FONT_SIZE = 8
LEGEND_FONT_SIZE = 8
LEGEND_TITLE_FONT_SIZE = 8
TIME_SCALE = "m"
SCALED_MAX_PLOT_TIME = 60
PLOT_FILE_NAME = "hwf_no_seeds.pdf"
PLOT_FORMAT = "PDF"
# ------------------------------------------------------------------------------
# Plot labels
# ------------------------------------------------------------------------------
TIME_LABEL = "Time"
TOPLEVEL_LABEL = "Core"
GRAMMAR_LABEL = "Grammar"
COVERAGE_TYPE_LABEL = "Coverage"
COVERAGE_LABEL = "Cov. (%)"
HW_LINE_COVERAGE_LABEL = "HW Line (VLT)"
SW_LINE_COVERAGE_LABEL = "SW Line (kcov)"
SW_REGION_COVERAGE_LABEL = "SW Basic Block (LLVM)"
# ------------------------------------------------------------------------------
# Other Labels
# ------------------------------------------------------------------------------
AFL_TEST_ID_LABEL = "Test-ID"
# ------------------------------------------------------------------------------
# Experiment Parameters
# ------------------------------------------------------------------------------
EXPERIMENT_BASE_NAME = "exp014-cpp-afl-%s-%s-%s-%s"
TOPLEVELS = ["aes", "hmac", "kmac", "rv_timer"]
OPCODE_TYPES = ["mapped"]
INSTR_TYPES = ["variable"]
TERMINATE_TYPES = ["never"]
TRIALS = range(0, 5)
# ------------------------------------------------------------------------------
# Other defines
# ------------------------------------------------------------------------------
TERMINAL_ROWS, TERMINAL_COLS = os.popen('stty size', 'r').read().split()
LINE_SEP = "=" * int(TERMINAL_COLS)
COUNT = 0
@dataclass
class SubplotAxisLimits:
x_lower: int = None
x_upper: int = None
y_lower: int = None
y_upper: int = None
@dataclass
class FigureAxisLimits:
kcov_limits: SubplotAxisLimits
llvm_cov_limits: SubplotAxisLimits
vlt_cov_limits: SubplotAxisLimits
@dataclass
class FuzzingData:
toplevel: str = ""
opcode_type: str = ""
instr_type: str = ""
terminate_type: str = ""
trial_num: int = -1
afl_data_path: str = ""
cov_data_path: str = ""
def __post_init__(self):
self.afl_data = self._load_afl_data()
self.kcov_data = self._load_cov_data("kcov")
self.llvm_cov_data = self._load_cov_data("llvm_cov")
self.vlt_cov_data = self._load_cov_data("vlt_cov")
def _load_afl_data(self):
afl_glob_path = os.path.join(self.afl_data_path, "out",
"afl_*_interactive", "plot_data")
afl_plot_data_files = glob.glob(afl_glob_path)
if len(afl_plot_data_files) != 1:
print(red("ERROR: AFL plot_data file no found."))
sys.exit(1)
# Load data into Pandas DataFrame
afl_df = self._load_csv_data(afl_plot_data_files[0])
# Remove leading/trailing white space from column names
afl_df = afl_df.rename(columns=lambda x: x.strip())
# Adjust time stamps to be relative to start time
afl_df.loc[:, "# unix_time"] -= afl_df.loc[0, "# unix_time"]
# Set time as index
afl_df = afl_df.set_index("# unix_time")
return afl_df
@staticmethod
def _id_str_to_int(id_str):
return int(id_str.lstrip("id:"))
def _load_cov_data(self, cov_type):
cov_data_path = "%s/logs/%s_cum.csv" % (self.cov_data_path, cov_type)
if not os.path.exists(cov_data_path):
print(red("ERROR: coverage data (%s) does not exist." % cov_data_path))
sys.exit(1)
# Load data into Pandas DataFrame
cov_df = self._load_csv_data(cov_data_path)
if cov_df.shape[0] < int(self.afl_data.iloc[-1, 2]):
print(red("ERROR: coverage data is missing (%s). Aborting!" % cov_type))
sys.exit(1)
# TODO(ttrippel): remove this hack after fixing run_cov_local.sh
if cov_type == "vlt_cov":
cov_df.drop(AFL_TEST_ID_LABEL, axis=1, inplace=True)
cov_df.insert(0, AFL_TEST_ID_LABEL, list(range(cov_df.shape[0])))
else:
# Convert Test-ID labels to ints
cov_df.loc[:,
AFL_TEST_ID_LABEL] = cov_df.loc[:, AFL_TEST_ID_LABEL].apply(
FuzzingData._id_str_to_int)
# Set ID column as the row indicies
cov_df = cov_df.set_index(AFL_TEST_ID_LABEL)
return cov_df
def _load_csv_data(self, csv_file):
return pd.read_csv(csv_file,
delimiter=',',
index_col=None,
engine='python')
@property
def grammar(self):
return "%s-%s-%s" % (self.opcode_type, self.instr_type,
self.terminate_type)
def get_paths_total_at_time(time, afl_data):
while time not in afl_data.index:
time -= 1
return afl_data.loc[time, "paths_total"]
def get_cov_at_time(paths_total, cov_data, cov_data_key):
return cov_data.loc[paths_total, cov_data_key] * 100.0
def get_vlt_cov_at_time(paths_total, vlt_cov_data):
vlt_cov = (float(vlt_cov_data.loc[paths_total, "Lines-Covered"]) /
float(vlt_cov_data.loc[paths_total, "Total-Lines"])) * 100.0
return vlt_cov
def build_avg_coverage_df(exp2data,
time_units="m",
normalize_to_start=False,
consolidation="max"):
print(yellow("Building average coverage dataframe ..."))
# Create empty dictionary that will be used to create a Pandas DataFrame that
# looks like the following:
# +--------------------------------------------------------------------+
# | toplevel | isa (grammar) | coverage type | time (s) | coverage (%) |
# +--------------------------------------------------------------------+
# | ... | ... | ... | ... | ... |
coverage_dict = {
TOPLEVEL_LABEL: [],
GRAMMAR_LABEL: [],
COVERAGE_TYPE_LABEL: [],
TIME_LABEL: [],
COVERAGE_LABEL: [],
}
for exp_name, fd_list in exp2data.items():
anchor_fd = fd_list[0]
for time, row in anchor_fd.afl_data.iterrows():
# scale time
if time_units == "h":
scaled_time = float(time) / float(3600)
elif time_units == "m":
scaled_time = float(time) / float(60)
else:
scaled_time = time
# add circuit, grammar, and time values to dataframe row
for _ in range(3):
coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
coverage_dict[TIME_LABEL].append(scaled_time)
# compute average coverage at all points in time
kcov_avg = 0
llvm_cov_avg = 0
vlt_cov_avg = 0
kcov_max = 0
llvm_cov_max = 0
vlt_cov_max = 0
i = 0
for fd in fd_list:
# get the paths_total at the current time
paths_total = get_paths_total_at_time(time, fd.afl_data) - 1
# get coverage data
# print(exp_name, i)
kcov = get_cov_at_time(paths_total, fd.kcov_data, "Line-Coverage-(%)")
kcov_avg += kcov
kcov_max = max(kcov_max, kcov)
llvm_cov = get_cov_at_time(paths_total, fd.llvm_cov_data,
"Region-Coverage-(%)")
llvm_cov_avg += llvm_cov
llvm_cov_max = max(llvm_cov_max, llvm_cov)
vlt_cov = get_vlt_cov_at_time(paths_total, fd.vlt_cov_data)
vlt_cov_avg += vlt_cov
vlt_cov_max = max(vlt_cov_max, vlt_cov)
i += 1
kcov_avg /= float(len(fd_list))
llvm_cov_avg /= float(len(fd_list))
vlt_cov_avg /= float(len(fd_list))
# save time 0 coverage to normalize
if time == 0:
kcov_avg_t0 = kcov_avg
llvm_cov_avg_t0 = llvm_cov_avg
vlt_cov_avg_t0 = vlt_cov_avg
if normalize_to_start:
kcov_avg /= kcov_avg_t0
llvm_cov_avg /= llvm_cov_avg_t0
vlt_cov_avg /= vlt_cov_avg_t0
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
if consolidation == "avg":
coverage_dict[COVERAGE_LABEL].append(kcov_avg)
coverage_dict[COVERAGE_LABEL].append(llvm_cov_avg)
coverage_dict[COVERAGE_LABEL].append(vlt_cov_avg)
else:
coverage_dict[COVERAGE_LABEL].append(kcov_max)
coverage_dict[COVERAGE_LABEL].append(llvm_cov_max)
coverage_dict[COVERAGE_LABEL].append(vlt_cov_max)
# extend lines to max time value
if coverage_dict[TIME_LABEL][-1] != SCALED_MAX_PLOT_TIME:
for _ in range(3):
coverage_dict[TOPLEVEL_LABEL].append(anchor_fd.toplevel)
coverage_dict[GRAMMAR_LABEL].append(anchor_fd.grammar)
coverage_dict[TIME_LABEL].append(SCALED_MAX_PLOT_TIME)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(SW_REGION_COVERAGE_LABEL)
coverage_dict[COVERAGE_TYPE_LABEL].append(HW_LINE_COVERAGE_LABEL)
coverage_dict[COVERAGE_LABEL].extend(coverage_dict[COVERAGE_LABEL][-3:])
# print("Max SW Line coverage: ", coverage_dict[COVERAGE_LABEL][-3])
# print("Max SW Basic Block coverage:", coverage_dict[COVERAGE_LABEL][-2])
print("Max HW Line coverage: ", coverage_dict[COVERAGE_LABEL][-1])
print(green("Done."))
print(LINE_SEP)
return pd.DataFrame.from_dict(coverage_dict)
def load_fuzzing_data(afl_data_root, cov_data_root):
print(yellow("Loading data ..."))
exp2data = collections.defaultdict(list)
# TODO: change this to automatically extract names from a single exp. number
# extract each data file into a Pandas dataframe
isas = list(
itertools.product(TOPLEVELS, OPCODE_TYPES, INSTR_TYPES, TERMINATE_TYPES))
for toplevel, opcode_type, instr_type, terminate_type in isas:
for trial in TRIALS:
# Build complete path to data files
exp_name_wo_trialnum = EXPERIMENT_BASE_NAME % (
toplevel, opcode_type, instr_type, terminate_type)
exp_name_wo_trialnum = exp_name_wo_trialnum.replace("_", "-")
exp_name = "%s-%d" % (exp_name_wo_trialnum, trial)
afl_data_path = os.path.join(afl_data_root, exp_name)
cov_data_path = os.path.join(cov_data_root, exp_name)
# Load fuzzing data into an object
exp2data[exp_name_wo_trialnum].append(
FuzzingData(toplevel, opcode_type, instr_type, terminate_type, trial,
afl_data_path, cov_data_path))
return exp2data
def plot_avg_coverage_vs_time(cov_df, time_units="m"):
print(yellow("Generating plot ..."))
# Set plot style and extract only HDL line coverage
sns.set_theme(context="notebook", style="darkgrid")
hdl_cov_df = cov_df[cov_df[COVERAGE_TYPE_LABEL] == HW_LINE_COVERAGE_LABEL]
# create figure and plot the data
fig, ax = plt.subplots(1, 1, figsize=(4, 2))
sns.lineplot(data=hdl_cov_df,
x=TIME_LABEL,
y=COVERAGE_LABEL,
hue=TOPLEVEL_LABEL,
ax=ax,
markers="x")
# format the plot
if time_units == "m":
time_units_label = "min."
elif time_units == "h":
time_units_label = "hours"
else:
time_units_label = "s"
ax.set_xlabel(TIME_LABEL + " (%s)" % time_units_label,
fontsize=LABEL_FONT_SIZE)
ax.set_ylabel("HDL Line " + COVERAGE_LABEL, fontsize=LABEL_FONT_SIZE)
ax.tick_params("x", labelsize=TICK_FONT_SIZE)
ax.tick_params("y", labelsize=TICK_FONT_SIZE)
plt.legend(title="Core",
fontsize=LEGEND_FONT_SIZE,
title_fontsize=LEGEND_TITLE_FONT_SIZE,
ncol=2)
plt.tight_layout()
# save the plot
plt.savefig(PLOT_FILE_NAME, format=PLOT_FORMAT)
print(green("Done."))
print(LINE_SEP)
def main(argv):
parser = argparse.ArgumentParser(description="Plotting script for exp. 004.")
parser.add_argument("afl_data_root")
parser.add_argument("cov_data_root")
args = parser.parse_args()
# Load runtime data
exp2data = load_fuzzing_data(args.afl_data_root, args.cov_data_root)
avg_cov_df = build_avg_coverage_df(exp2data,
time_units=TIME_SCALE,
normalize_to_start=False)
# Plot data
plot_avg_coverage_vs_time(avg_cov_df, time_units=TIME_SCALE)
if __name__ == "__main__":
main(sys.argv[1:])
|
apache-2.0
| 8,622,760,688,873,013,000
| 35.614325
| 80
| 0.587841
| false
| 3.292296
| true
| false
| false
|
alatiera/YEAP
|
src/rssinfo.py
|
1
|
2735
|
import feedparser as fp
from . import parse_feeds
def feedinfo(feed: parse_feeds.FEEDTUP) -> None:
"""Print the contents of the FeedTup of the Rss feed."""
# Based on RSS 2.0 Spec
# https://cyber.harvard.edu/rss/rss.html
print('\n----- Feed Info -----')
# Common elements
print(f'Feed Title: {feed.title}')
print(f'Link: {feed.link}')
print(f'Description: {feed.description}')
print(f'Published: {feed.published}')
# print('Published Parsed:{}'.format(feedobj.feed.get('published_parsed')))
# Uncommon elements
if feed.image:
print('Image: {}'.format(feed.image.get('href')))
# print('Categories: {}'.format(feedobj.feed.get('categories')))
# print('Cloud: {}'.format(feedobj.feed.get('cloud')))
# Extra
print(f'Author: {feed.author}')
print(f'Language: {feed.language}')
print(f'Rights: {feed.copyright}')
def iteminfo(entry: parse_feeds.ENTRYTUP, content: bool = True) -> None:
"""Print the contents of the Item object of the feed."""
print('\n----- Item Info -----')
# Common elements
print(f'Title: {entry.title}')
print(f'Description: {entry.description}')
print(f'Link: {entry.link}')
print(f'Published: {entry.published}')
# print(f'Published Parsed: {entry.published_parsed}')
# print(f'ID: {entry.id}')
# Uncommon elements
# Enclosures
# print(f'Enclosures: {entry.enclosures}')
print(f'Source: {entry.uri}')
print(f'Type: {entry.type}')
print(f'Length: {entry.length}')
# Content
if content and entry.content is not None:
print(f'Contents: {entry.content}')
for content in entry.content:
con = parse_feeds.content_data(content)
entrycontent(con)
print(f'Comments: {entry.comments}')
def entrycontent(content: parse_feeds.CONTUP) -> None:
"""Print the data of entry.content."""
print('\n----- Content Info -----')
print(f'Content Base: {content.base}')
print(f'Content Type: {content.type}')
print(f'Content Value: {content.value}')
print(f'Content Base: {content.language}')
def fullprint(feeduri: str, limit: int = 3) -> None:
"""Print the data of the :feeduri feed, :limit limits the enties result."""
feed = fp.parse(feeduri)
print('Feed version: {}'.format(feed.get('version')))
feed_ = parse_feeds.feed_data(feed)
feedinfo(feed_)
for ent in feed_.entries[:limit]:
entry = parse_feeds.entry_data(ent)
iteminfo(entry)
def main() -> None:
"""Ask for a url and print the contents of the rss feed."""
url = input('Insert Feed URL: ')
# Something Something Sanitize the input
fullprint(url)
if __name__ == '__main__':
main()
|
gpl-3.0
| -6,923,101,523,011,745,000
| 28.408602
| 79
| 0.623766
| false
| 3.484076
| false
| false
| false
|
democratech/LaPrimaire
|
tools/simulation/simulation_lots.py
|
1
|
2675
|
import numpy as np
import mj
import matplotlib.pyplot as plt
from matplotlib import cm
import sys, os
# ---------------------------------------------------------------------------------------------------
# montrer que l'algorithme de construction des lots est fiable
#
Ncandidats = 100
electeurs = np.arange(10000,100000,10000)
Nlot = 10
root = "simulation/lots/"
def simulation(Nelecteurs, Nlot):
occurence = np.zeros(Ncandidats)
corr = np.zeros((Ncandidats,Ncandidats))
log_occr = root + "occr/Nc_%i-Ne_%i-Nl_%i.txt" % (Ncandidats, Nelecteurs, Nlot)
log_corr = root + "corr/Nc_%i-Ne_%i-Nl_%i.txt" % (Ncandidats, Nelecteurs, Nlot)
try:
os.makedirs(root + "occr")
os.makedirs(root + "corr")
except OSError:
pass
if os.path.isfile(log_occr) and os.path.isfile(log_corr):
occurence = np.genfromtxt(log_occr, delimiter=",")
corr = np.genfromtxt(log_corr, delimiter=",")
return [occurence,corr]
for i in range(Nelecteurs):
lot = mj.subset(Ncandidats, Nlot, occurence)
for j in lot:
corr[j,lot] += 1
np.savetxt(log_corr, corr, delimiter = ",", fmt="%i")
np.savetxt(log_occr, occurence, delimiter = ",", fmt="%i")
return [occurence, corr]
def plotOccurences(occurence):
width = 0.95
m = np.mean(occurence)
plt.bar(range(Ncandidats), occurence, width,color="#3a6d99", edgecolor='white')
plt.ylabel('Nombre d\'occurence')
plt.xlabel('Candidats')
plt.xlim([0,Ncandidats])
plt.plot([0, Ncandidats], [m,m], color="#d91d1c")
plt.show()
def plotRSMvsCandidats(occurences, electeurs):
RSM = []
for occr in occurences:
m = np.mean(occr)
std = np.std(occr)
RSM.append(std/m)
print RSM
plt.ylabel('Ratio deviation/moyenne')
plt.xlabel('Nombre d\'electeurs')
plt.xlim([0,max(electeurs)])
plt.plot(electeurs, RSM, color="#d91d1c")
plt.show()
def plotCorrelations(corr):
# mask = np.tri(corr.shape[0], k=-1)
# A = np.ma.array(corr, mask=mask)
plt.pcolor(corr)
plt.colorbar()
plt.ylabel('Candidats')
plt.xlabel('Candidats')
# plt.yticks(np.arange(0.5,10.5),range(0,10))
# plt.xticks(np.arange(0.5,10.5),range(0,10))
plt.show()
# ------
# plot 1
#[occr,corr] = simulation(100000, Nlot)
#plotOccurences(occr)
# ------
# plot 2
# occrs = []
# for e in electeurs:
# [occr,corr] = simulation(e, Nlot)
# occrs.append(occr)
# plotRSMvsCandidats(occrs, electeurs)
# ------
# plot 3
#
[occr,corr] = simulation(100000, Nlot)
plotCorrelations(corr)
|
agpl-3.0
| 1,603,931,362,166,058,000
| 28.406593
| 101
| 0.582804
| false
| 2.780665
| false
| false
| false
|
aerler/WRF-Projects
|
src/archive/plotInnerPrecip.py
|
1
|
6163
|
'''
Created on 2012-09-29
A simple script that reads a WRF netcdf-4 file and displays a 2D field in a proper geographic projection;
application here is plotting precipitation in the inner WRF domain.
@author: Andre R. Erler
'''
## includes
# matplotlib config: size etc.
import numpy as np
import matplotlib.pylab as pyl
import matplotlib as mpl
mpl.rc('lines', linewidth=1.)
mpl.rc('font', size=10)
# pygeode stuff
from myDatasets.loadWRF import openWRF
from myPlots.plots import surfacePlot
from mpl_toolkits.basemap import Basemap
from mpl_toolkits.basemap import cm, maskoceans
#from pygeode.plot import plot_v1 as pl
#from pygeode.plot import basemap as bm
## settings
nax = 2 # number of panels
ndom = 2
sf = dict(dpi=150) # print properties
folder = '/home/me/Research/Dynamical Downscaling/figures/' # figure directory
if __name__ == '__main__':
## read data
data = openWRF('ctrl-1',[1982],list(range(11,12)))
print(data[ndom-1])
## compute data
precip = []; ndays = []
for n in range(ndom):
nrec = data[n].time.values[-1]+1
ndays = data[n].xtime(time=nrec-1).get() /24/60 # xtime is in minutes, need days
dailyrain = data[n].rain(time=nrec-1).get() / ndays
# ndays = ( data[n].xtime(time=nrec-1).get() - data[n].xtime(time=0).get() )/24/60 # xtime is in minutes, need days
# dailyrain = ( data[n].rain(time=nrec-1).get() - data[n].rain(time=0).get() ) / ndays
precip.append(dailyrain.squeeze())
## setup projection
f = pyl.figure(facecolor='white', figsize = (6.25,4.25))
ax = []
for n in range(nax):
ax.append(f.add_subplot(1,2,n+1))
f.subplots_adjust(bottom=0.12, left=0.06, right=.97, top=.94, hspace=0.05, wspace=0.05) # hspace, wspace
# setup lambert conformal basemap.
# lat_1 is first standard parallel.
# lat_2 is second standard parallel (defaults to lat_1).
# lon_0,lat_0 is central point.
# rsphere=(6378137.00,6356752.3142) specifies WGS4 ellipsoid
# area_thresh=1000 means don't plot coastline features less
# than 1000 km^2 in area.
lcc = dict(projection='lcc', lat_0=59, lon_0=-123, lat_1=53, rsphere=(6378137.00,6356752.3142),#
width=310*10e3, height=315*10e3, area_thresh = 1000., resolution='l')
# map projection boundaries for inner WRF domain
map = []
for n in range(nax):
map.append(Basemap(ax=ax[n],**lcc)) # one map for each panel!!
## Plot data
grid = 10; res = 'l'
clevs = np.linspace(0,25,51)
norm = mpl.colors.Normalize(vmin=min(clevs),vmax=max(clevs),clip=True)
cmap = mpl.cm.gist_ncar #s3pcpn
cmap.set_over('purple'); cmap.set_under('blue')
# coordinates
lat = []; lon = []; x = []; y = []
for n in range(ndom):
lat.append(data[n].lat.get())
lon.append(data[n].lon.get())
xx, yy = map[0](lon[n],lat[n]) # convert to map-native coordinates
x.append(xx); y.append(yy)
# draw boundaries of inner and outer domains
bdy2 = np.ones_like(lat[1]); bdy2[0,:]=0; bdy2[-1,:]=0; bdy2[:,0]=0; bdy2[:,-1]=0
for n in range(nax):
# N.B.: bdy2 depends on inner domain coordinates x[1],y[1]
map[n].contour(x[1],y[1],bdy2,[0],ax=ax[n], colors='k') # draw boundary of inner domain
# # terrain data: mask out ocean
# zs = []
# for n in xrange(ndom):
# zs.append(maskoceans(lon[n],lat[n],data[n].zs.get(),resolution=res,grid=grid))
# draw data
cd = []
for n in range(nax): # only plot first domain in first panel
for m in range(n+1): # but also plot first domain in second panel (as background)
print('panel %i / domain %i'%(n,m))
print('precip: min %f / max %f / mean %f'%(precip[m].min(),precip[m].max(),precip[m].mean()))
cd.append(map[n].contourf(x[m],y[m],precip[m],clevs,ax=ax[n],cmap=cmap, norm=norm,extend='both'))
# add colorbar
cax = f.add_axes([0.1, 0.06, 0.8, 0.03])
for cn in cd: # [c1d1, c1d2, c2d2]:
cn.set_clim(vmin=min(clevs),vmax=max(clevs))
cbar = f.colorbar(cax=cax,mappable=cd[0],orientation='h',extend='both') # ,size='3%',pad='2%'
cbl = np.linspace(min(clevs),max(clevs),6)
cbar.set_ticks(cbl); cbar.set_ticklabels(['%02.1f mm'%(lev) for lev in cbl])
## Annotation
# add labels
f.suptitle('Average Daily Precipitation',fontsize=12)
ax[0].set_title('Outer Domain (30 km)',fontsize=11)
ax[1].set_title('Inner Domain (10 km)',fontsize=11)
# ax.set_xlabel('Longitude'); ax.set_ylabel('Latitude')
map[0].drawmapscale(-135, 49, -137, 57, 800, barstyle='fancy', yoffset=0.01*(map[n].ymax-map[n].ymin))
for n in range(nax):
if n == 0 or n == 1: Bottom = True
else: Bottom = False
if n == 0: Left = True
else: Left = False
# land/sea mask
map[n].drawlsmask(ocean_color='blue', land_color='green',resolution=res,grid=grid)
# add map stuff
map[n].drawcoastlines(linewidth=0.5)
map[n].drawcountries(linewidth=0.5)
# map[n].drawrivers(linewidth=0.5)
# map[n].fillcontinents(color = 'coral')
map[n].drawmapboundary(fill_color='k',linewidth=2)
# labels = [left,right,top,bottom]
map[n].drawparallels([45,65],linewidth=1, labels=[Left,False,False,False])
map[n].drawparallels([55,75],linewidth=0.5, labels=[Left,False,False,False])
map[n].drawmeridians([-140,-120,-100],linewidth=1, labels=[False,False,False,Bottom])
map[n].drawmeridians([-150,-130,-110],linewidth=0.5, labels=[False,False,False,Bottom])
# save figure to disk
f.savefig(folder+'AnnualPrecip.pdf', **sf) # save figure to pdf
print(('\nSaved figure in '+folder+'AnnualPrecip.pdf'))
# show plots
pyl.show()
## more projections
# setup lambert azimuthal equal area basemap.
# lat_ts is latitude of true scale.
# lon_0,lat_0 is central point.
# laea = dict(projection='laea', lat_0=57, lon_0=-137, lat_ts=53, resolution='l', #
# width=259*30e3, height=179*30e3, rsphere=(6378137.00,6356752.3142), area_thresh = 1000.)
# lon_0, lat_0 are the center point of the projection.
# resolution = 'l' means use low resolution coastlines.
# ortho = dict(projection='ortho', lat_0 = 57, lon_0 = -137, resolution = 'l', area_thresh = 1000.)
# 'parallels':[30,50,70], 'meridians':[-180,-150,-120,-90], 'labels':[1,0,0,1]}
|
gpl-3.0
| 8,309,401,785,416,781,000
| 41.212329
| 122
| 0.651631
| false
| 2.753798
| false
| false
| false
|
ralucagoja/pentagram
|
practica/practica/settings.py
|
1
|
2699
|
"""
Django settings for practica project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k(p-s3iq!p3oj=70#pb3rh^dyz7w#t_(f)pvj1szs7e$7o_my7'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'rest_framework.authtoken',
'pentagram',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'practica.urls'
WSGI_APPLICATION = 'practica.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (os.path.join(BASE_DIR, 'templates'),)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
)
}
STATICFILES_DIRS = (
os.path.join(BASE_DIR, "static"),
)
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
MEDIA_URL = '/media/'
LOGIN_REDIRECT_URL = "homepage"
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_AUTHENTIFICATION_CLASSES':(
'rest_framework.authentificatoin.TokenAuthentification',
)
}
|
gpl-3.0
| -5,448,509,871,541,174,000
| 23.324324
| 71
| 0.712116
| false
| 3.251807
| false
| false
| false
|
belokop/indico_bare
|
indico_zodbimport/modules/event_categories.py
|
1
|
3197
|
# This file is part of Indico.
# Copyright (C) 2002 - 2016 European Organization for Nuclear Research (CERN).
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# Indico is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Indico; if not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals, division
from operator import attrgetter
import transaction
from indico.core.db import db
from indico.modules.events.models.events import Event
from indico.util.console import verbose_iterator, cformat
from indico.util.struct.iterables import committing_iterator
from indico_zodbimport import Importer
class EventCategoriesImporter(Importer):
def has_data(self):
return bool(Event.query.filter(Event.category_id.isnot(None)).count())
def migrate(self):
self._load_data()
self.migrate_event_categories()
def _load_data(self):
self.category_mapping = {}
for category in self.zodb_root['categories'].itervalues():
self.category_mapping[int(category.id)] = map(int, reversed(category.getCategoryPath()))
def migrate_event_categories(self):
self.print_step("Migrating event categories")
delete_events = set()
for conf in committing_iterator(self._iter_events()):
try:
category_chain = self.category_mapping[int(conf._Conference__owners[0].id)]
except (IndexError, KeyError):
self.print_error(cformat('%{red!}Event has no category!'), event_id=conf.id)
delete_events.add(int(conf.id))
continue
Event.query.filter_by(id=int(conf.id)).update({Event.category_id: category_chain[0],
Event.category_chain: category_chain},
synchronize_session=False)
if not self.quiet:
self.print_success(repr(category_chain), event_id=conf.id)
for event_id in delete_events:
self.print_warning(cformat('%{yellow!}Deleting broken event {}').format(event_id))
Event.query.filter_by(id=event_id).update({Event.is_deleted: True}, synchronize_session=False)
if self.zodb_root['conferences'].has_key(str(event_id)):
del self.zodb_root['conferences'][str(event_id)]
db.session.commit()
transaction.commit()
def _iter_events(self):
it = self.zodb_root['conferences'].itervalues()
total = len(self.zodb_root['conferences'])
if self.quiet:
it = verbose_iterator(it, total, attrgetter('id'), lambda x: x.__dict__.get('title', ''))
for conf in self.flushing_iterator(it):
yield conf
|
gpl-3.0
| 1,307,890,533,711,365,600
| 42.202703
| 106
| 0.649984
| false
| 4.026448
| false
| false
| false
|
blancha/abcngspipelines
|
bischipseq/convert1StartTo0Start_batch.py
|
1
|
2156
|
#!/usr/bin/env python3
# Version 1.0
# Author Alexis Blanchet-Cohen
# Date: 15/06/2014
import argparse
import glob
import os
import subprocess
import util
# Read the command line arguments.
parser = argparse.ArgumentParser(description='Generate scripts to convert bedgraph files from one-based start to zero-based start.')
parser.add_argument("-s", "--scriptsDirectory", help="Scripts directory.", default="convert1StartTo0Start")
parser.add_argument("-i", "--inputDirectory", help="Input directory with bedgraph files.", default="../bedgraph/methylation_counts_sorted/")
parser.add_argument("-o", "--outputDirectory", help="Output directory with sorted bedgraph files.", default="../bedgraph/methylation_counts_sorted_0_start/")
parser.add_argument("-q", "--submitJobsToQueue", help="Submit jobs to queue immediately.", choices=["yes", "no", "y", "n"], default="no")
args = parser.parse_args()
# Process the command line arguments.
scriptsDirectory = os.path.abspath(args.scriptsDirectory)
inputDirectory = os.path.abspath(args.inputDirectory)
outputDirectory = os.path.abspath(args.outputDirectory)
samples = util.getMergedsamples()
# Read configuration files.
config = util.readConfigurationFiles()
# Create scripts directory, if it does not exist yet, and cd to it.
if not os.path.exists(scriptsDirectory):
os.mkdir(scriptsDirectory)
os.chdir(scriptsDirectory)
# Create output directory, if it does not exist yet.
if not os.path.exists(outputDirectory):
os.mkdir(outputDirectory)
for file in os.listdir(inputDirectory):
file = os.path.splitext(file)[0]
# Create script file.
scriptName = 'convert1StartTo0Start_' + file + '.sh'
script = open(scriptName, 'w')
util.writeHeader(script, config, "convert1StartTo0Start")
script.write("convert1StartTo0Start.py " + "\\\n")
script.write("--one_start_bedgraph " + inputDirectory + "/" + file + ".bedgraph " + "\\\n")
script.write("--zero_start_bedgraph " + outputDirectory + "/" + file + ".bedgraph")
script.close()
if (args.submitJobsToQueue.lower() == "yes") | (args.submitJobsToQueue.lower() == "y"):
subprocess.call("submitJobs.py", shell=True)
|
gpl-3.0
| -3,264,305,320,030,537,000
| 40.461538
| 157
| 0.728664
| false
| 3.551895
| false
| false
| false
|
CyberLabs-BR/face_detect
|
pyimagesearch/nn/conv/minivggnet.py
|
1
|
1990
|
# import the necessary packages
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Flatten
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras import backend as K
class MiniVGGNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model along with the input shape to be
# "channels last" and the channels dimension itself
model = Sequential()
inputShape = (height, width, depth)
chanDim = -1
# if we are using "channels first", update the input shape
# and channels dimension
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
chanDim = 1
# first CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(32, (3, 3), padding="same",
input_shape=inputShape))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(32, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# second CONV => RELU => CONV => RELU => POOL layer set
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(Conv2D(64, (3, 3), padding="same"))
model.add(Activation("relu"))
model.add(BatchNormalization(axis=chanDim))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# first (and only) set of FC => RELU layers
model.add(Flatten())
model.add(Dense(512))
model.add(Activation("relu"))
model.add(BatchNormalization())
model.add(Dropout(0.5))
# softmax classifier
model.add(Dense(classes))
model.add(Activation("softmax"))
# return the constructed network architecture
return model
|
mit
| 1,556,194,539,473,619,200
| 32.183333
| 60
| 0.724623
| false
| 3.199357
| false
| false
| false
|
dl1ksv/gr-display
|
docs/doxygen/doxyxml/text.py
|
1
|
1297
|
#
# Copyright 2010 Free Software Foundation, Inc.
#
# This file was generated by gr_modtool, a tool from the GNU Radio framework
# This file is a part of gr-display
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
"""
Utilities for extracting text from generated classes.
"""
from __future__ import unicode_literals
def is_string(txt):
if isinstance(txt, str):
return True
try:
if isinstance(txt, str):
return True
except NameError:
pass
return False
def description(obj):
if obj is None:
return None
return description_bit(obj).strip()
def description_bit(obj):
if hasattr(obj, 'content'):
contents = [description_bit(item) for item in obj.content]
result = ''.join(contents)
elif hasattr(obj, 'content_'):
contents = [description_bit(item) for item in obj.content_]
result = ''.join(contents)
elif hasattr(obj, 'value'):
result = description_bit(obj.value)
elif is_string(obj):
return obj
else:
raise Exception('Expecting a string or something with content, content_ or value attribute')
# If this bit is a paragraph then add one some line breaks.
if hasattr(obj, 'name') and obj.name == 'para':
result += "\n\n"
return result
|
gpl-3.0
| 8,988,638,054,465,679,000
| 27.195652
| 100
| 0.643022
| false
| 3.918429
| false
| false
| false
|
bmun/huxley
|
huxley/core/admin/committee_feedback.py
|
1
|
4452
|
# Copyright (c) 2011-2021 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import csv
from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import reverse
from django.http import HttpResponse, HttpResponseRedirect
from googleapiclient.discovery import build
from google.oauth2 import service_account
from huxley.core.models import CommitteeFeedback
class CommitteeFeedbackAdmin(admin.ModelAdmin):
search_fields = ('committee__name', )
def get_rows(self):
rows = []
rows.append([
'Committee', 'General Rating', 'General Comment', 'Chair 1',
'Chair 1 Rating', 'Chair 1 Comment', 'Chair 2 Name',
'Chair 2 Rating', 'Chair 2 Comment', 'Chair 3 Name',
'Chair 3 Rating', 'Chair 3 Comment', 'Chair 4 Name',
'Chair 4 Rating', 'Chair 4 Comment', 'Chair 5 Name',
'Chair 5 Rating', 'Chair 5 Comment', 'Chair 6 Name',
'Chair 6 Rating', 'Chair 6 Comment', 'Chair 7 Name',
'Chair 7 Rating', 'Chair 7 Comment', 'Chair 8 Name',
'Chair 8 Rating', 'Chair 8 Comment', 'Chair 9 Name',
'Chair 9 Rating', 'Chair 9 Comment', 'Chair 10 Name',
'Chair 10 Rating', 'Chair 10 Comment', 'Perception of Berkeley',
'Money Spent'
])
for feedback in CommitteeFeedback.objects.all().order_by(
'committee__name'):
rows.append([
feedback.committee.name, feedback.rating, feedback.comment,
feedback.chair_1_name, feedback.chair_1_rating,
feedback.chair_1_comment, feedback.chair_2_name,
feedback.chair_2_rating, feedback.chair_2_comment,
feedback.chair_3_name, feedback.chair_3_rating,
feedback.chair_3_comment, feedback.chair_4_name,
feedback.chair_4_rating, feedback.chair_4_comment,
feedback.chair_5_name, feedback.chair_5_rating,
feedback.chair_5_comment, feedback.chair_6_name,
feedback.chair_6_rating, feedback.chair_6_comment,
feedback.chair_7_name, feedback.chair_7_rating,
feedback.chair_7_comment, feedback.chair_8_name,
feedback.chair_8_rating, feedback.chair_8_comment,
feedback.chair_9_name, feedback.chair_9_rating,
feedback.chair_9_comment, feedback.chair_10_name,
feedback.chair_10_rating, feedback.chair_10_comment,
feedback.berkeley_perception, feedback.money_spent
])
return rows
def list(self, request):
'''Return a CSV file containing all committee feedback.'''
feedbacks = HttpResponse(content_type='text/csv')
feedbacks[
'Content-Disposition'] = 'attachment; filename="feedback.csv"'
writer = csv.writer(feedbacks)
for row in self.get_rows():
writer.writerow(row)
return feedbacks
def sheets(self, request):
if settings.SHEET_ID:
SHEET_RANGE = 'Feedback!A1:AI'
# Store credentials
creds = service_account.Credentials.from_service_account_file(
settings.SERVICE_ACCOUNT_FILE, scopes=settings.SCOPES)
data = self.get_rows()
body = {
'values': data,
}
service = build('sheets', 'v4', credentials=creds)
response = service.spreadsheets().values().clear(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
).execute()
response = service.spreadsheets().values().update(
spreadsheetId=settings.SHEET_ID,
range=SHEET_RANGE,
valueInputOption='USER_ENTERED',
body=body).execute()
return HttpResponseRedirect(
reverse('admin:core_committeefeedback_changelist'))
def get_urls(self):
return super(CommitteeFeedbackAdmin, self).get_urls() + [
url(r'list',
self.admin_site.admin_view(self.list),
name='core_committeefeedback_list'),
url(
r'sheets',
self.admin_site.admin_view(self.sheets),
name='core_committeefeedback_sheets',
),
]
|
bsd-3-clause
| -1,210,776,847,596,304,000
| 39.472727
| 77
| 0.590296
| false
| 3.964381
| false
| false
| false
|
hankcs/udacity-deep-learning
|
2_fullyconnected.py
|
1
|
12463
|
# coding: utf-8
# Deep Learning
# =============
#
# Assignment 2
# ------------
#
# Previously in `1_notmnist.ipynb`, we created a pickle with formatted datasets for training, development and testing on the [notMNIST dataset](http://yaroslavvb.blogspot.com/2011/09/notmnist-dataset.html).
#
# The goal of this assignment is to progressively train deeper and more accurate models using TensorFlow.
# In[ ]:
# These are all the modules we'll be using later. Make sure you can import them
# before proceeding further.
from __future__ import print_function
import numpy as np
import tensorflow as tf
from six.moves import cPickle as pickle
from six.moves import range
# First reload the data we generated in `1_notmnist.ipynb`.
# In[ ]:
pickle_file = 'notMNIST.pickle'
with open(pickle_file, 'rb') as f:
save = pickle.load(f)
train_dataset = save['train_dataset']
train_labels = save['train_labels']
valid_dataset = save['valid_dataset']
valid_labels = save['valid_labels']
test_dataset = save['test_dataset']
test_labels = save['test_labels']
del save # hint to help gc free up memory
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# Reformat into a shape that's more adapted to the models we're going to train:
# - data as a flat matrix,
# - labels as float 1-hot encodings.
# In[ ]:
image_size = 28
num_labels = 10
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size * image_size)).astype(np.float32)
# Map 0 to [1.0, 0.0, 0.0 ...], 1 to [0.0, 1.0, 0.0 ...]
labels = (np.arange(num_labels) == labels[:, None]).astype(np.float32)
return dataset, labels
train_dataset, train_labels = reformat(train_dataset, train_labels)
valid_dataset, valid_labels = reformat(valid_dataset, valid_labels)
test_dataset, test_labels = reformat(test_dataset, test_labels)
print('Training set', train_dataset.shape, train_labels.shape)
print('Validation set', valid_dataset.shape, valid_labels.shape)
print('Test set', test_dataset.shape, test_labels.shape)
# We're first going to train a multinomial logistic regression using simple gradient descent.
#
# TensorFlow works like this:
# * First you describe the computation that you want to see performed: what the inputs, the variables, and the operations look like. These get created as nodes over a computation graph. This description is all contained within the block below:
#
# with graph.as_default():
# ...
#
# * Then you can run the operations on this graph as many times as you want by calling `session.run()`, providing it outputs to fetch from the graph that get returned. This runtime operation is all contained in the block below:
#
# with tf.Session(graph=graph) as session:
# ...
#
# Let's load all the data into TensorFlow and build the computation graph corresponding to our training:
# In[ ]:
# With gradient descent training, even this much data is prohibitive.
# Subset the training data for faster turnaround.
train_subset = 10000
graph = tf.Graph()
with graph.as_default():
# Input data.
# Load the training, validation and test data into constants that are
# attached to the graph.
tf_train_dataset = tf.constant(train_dataset[:train_subset, :])
tf_train_labels = tf.constant(train_labels[:train_subset])
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
# These are the parameters that we are going to be training. The weight
# matrix will be initialized using random values following a (truncated)
# normal distribution. The biases get initialized to zero.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
# We multiply the inputs with the weight matrix, and add biases. We compute
# the softmax and cross-entropy (it's one operation in TensorFlow, because
# it's very common, and it can be optimized). We take the average of this
# cross-entropy across all training examples: that's our loss.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
# We are going to find the minimum of this loss using gradient descent.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
# These are not part of training, but merely here so that we can report
# accuracy figures as we train.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# Let's run this computation and iterate:
# In[ ]:
num_steps = 801
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
with tf.Session(graph=graph) as session:
# This is a one-time operation which ensures the parameters get initialized as
# we described in the graph: random weights for the matrix, zeros for the
# biases.
tf.initialize_all_variables().run()
print('Initialized')
for step in range(num_steps):
# Run the computations. We tell .run() that we want to run the optimizer,
# and get the loss value and the training predictions returned as numpy
# arrays.
_, l, predictions = session.run([optimizer, loss, train_prediction])
if (step % 100 == 0):
print('Loss at step %d: %f' % (step, l))
print('Training accuracy: %.1f%%' % accuracy(
predictions, train_labels[:train_subset, :]))
# Calling .eval() on valid_prediction is basically like calling run(), but
# just to get that one numpy array. Note that it recomputes all its graph
# dependencies.
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), valid_labels))
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# Let's now switch to stochastic gradient descent training instead, which is much faster.
#
# The graph will be similar, except that instead of holding all the training data into a constant node, we create a `Placeholder` node which will be fed actual data at every call of `session.run()`.
# In[ ]:
batch_size = 128
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
weights = tf.Variable(
tf.truncated_normal([image_size * image_size, num_labels]))
biases = tf.Variable(tf.zeros([num_labels]))
# Training computation.
logits = tf.matmul(tf_train_dataset, weights) + biases
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
valid_prediction = tf.nn.softmax(
tf.matmul(tf_valid_dataset, weights) + biases)
test_prediction = tf.nn.softmax(tf.matmul(tf_test_dataset, weights) + biases)
# Let's run it:
# In[ ]:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
# ---
# Problem
# -------
#
# Turn the logistic regression example with SGD into a 1-hidden layer neural network
# with rectified linear units [nn.relu()](https://www.tensorflow.org/versions/r0.7/api_docs/python/nn.html#relu)
# and 1024 hidden nodes. This model should improve your validation / test accuracy.
#
# ---
batch_size = 128
hidden_size = 1024
graph = tf.Graph()
with graph.as_default():
# Input data. For the training data, we use a placeholder that will be fed
# at run time with a training minibatch.
tf_train_dataset = tf.placeholder(tf.float32,
shape=(batch_size, image_size * image_size))
tf_train_labels = tf.placeholder(tf.float32, shape=(batch_size, num_labels))
tf_valid_dataset = tf.constant(valid_dataset)
tf_test_dataset = tf.constant(test_dataset)
# Variables.
W1 = tf.Variable(tf.truncated_normal([image_size * image_size, hidden_size]))
b1 = tf.Variable(tf.zeros([hidden_size]))
W2 = tf.Variable(tf.truncated_normal([hidden_size, num_labels]))
b2 = tf.Variable(tf.zeros([num_labels]))
# Training computation.
y1 = tf.nn.relu(tf.matmul(tf_train_dataset, W1) + b1)
logits = tf.matmul(y1, W2) + b2
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits, tf_train_labels))
# Optimizer.
optimizer = tf.train.GradientDescentOptimizer(0.5).minimize(loss)
# Predictions for the training, validation, and test data.
train_prediction = tf.nn.softmax(logits)
y1_valid = tf.nn.relu(tf.matmul(tf_valid_dataset, W1) + b1)
valid_logits = tf.matmul(y1_valid, W2) + b2
valid_prediction = tf.nn.softmax(valid_logits)
y1_test = tf.nn.relu(tf.matmul(tf_test_dataset, W1) + b1)
test_logits = tf.matmul(y1_test, W2) + b2
test_prediction = tf.nn.softmax(test_logits)
# Let's run it:
num_steps = 3001
with tf.Session(graph=graph) as session:
tf.initialize_all_variables().run()
print("Initialized")
for step in range(num_steps):
# Pick an offset within the training data, which has been randomized.
# Note: we could use better randomization across epochs.
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
# Generate a minibatch.
batch_data = train_dataset[offset:(offset + batch_size), :]
batch_labels = train_labels[offset:(offset + batch_size), :]
# Prepare a dictionary telling the session where to feed the minibatch.
# The key of the dictionary is the placeholder node of the graph to be fed,
# and the value is the numpy array to feed to it.
feed_dict = {tf_train_dataset: batch_data, tf_train_labels: batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 500 == 0):
print("Minibatch loss at step %d: %f" % (step, l))
print("Minibatch accuracy: %.1f%%" % accuracy(predictions, batch_labels))
print("Validation accuracy: %.1f%%" % accuracy(
valid_prediction.eval(), valid_labels))
print("Test accuracy: %.1f%%" % accuracy(test_prediction.eval(), test_labels))
|
gpl-3.0
| 6,008,777,394,889,813,000
| 40.405316
| 243
| 0.672872
| false
| 3.665588
| true
| false
| false
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.