text stringlengths 4 1.02M | meta dict |
|---|---|
import sys
import json
import os
import configparser
import urllib.request
import urllib.parse
import urllib.error
from bs4 import BeautifulSoup
LIBDIR = 'opacclient/opacapp/src/main/assets/bibs/'
TYPES = [
'NONE', 'BOOK', 'CD', 'CD_SOFTWARE', 'CD_MUSIC', 'DVD', 'MOVIE', 'AUDIOBOOK', 'PACKAGE',
'GAME_CONSOLE', 'EBOOK', 'SCORE_MUSIC', 'PACKAGE_BOOKS', 'UNKNOWN', 'NEWSPAPER',
'BOARDGAME', 'SCHOOL_VERSION', 'MAP', 'BLURAY', 'AUDIO_CASSETTE', 'ART', 'MAGAZINE',
'GAME_CONSOLE_WII', 'GAME_CONSOLE_NINTENDO', 'GAME_CONSOLE_PLAYSTATION',
'GAME_CONSOLE_XBOX', 'LP_RECORD', 'MP3', 'URL', 'EVIDEO','EDOC','EAUDIO']
def getInput(required=False, default=None):
if default is not None:
print("[Standard %s]" % default, end=" ")
print("> ", end="")
inp = input().strip()
if default is not None and (inp is None or inp == ''):
return default
if required and (inp is None or inp == ''):
print("Feld muss gefüllt werden.")
return getInput(required=required, default=default)
if inp is None or inp == '':
return None
return inp
def loadGeoPossibilities(data):
possibilities = []
for address in ('%s, %s, %s' % (data['title'], data['city'], data['state']),
'%s, %s, %s' % ('Bibliothek', data['city'], data['state']),
data['city']):
uri = 'https://maps.googleapis.com/maps/api/geocode/json?' + \
urllib.parse.urlencode({'address': address, 'sensor': 'true'})
jsoncontent = urllib.request.urlopen(uri).read().decode()
geocode = json.loads(jsoncontent)
if geocode['status'] != 'OK':
print("ERROR!")
for res in geocode['results']:
possibilities.append(
(
", ".join([a["long_name"] for a in res['address_components']]),
[float(res['geometry']['location']['lat']), float(
res['geometry']['location']['lng'])]
)
)
return possibilities
class Api:
def accountSupported(self):
return True
def prompt(self, data):
return data
class Bibliotheca(Api):
def accountSupported(self):
return True
def prompt(self, data):
datadata = data['data']
global TYPES
print("Muss eine bestimmte Datenbank geladen werden?")
print("Enter drücken, wenn nicht benötigt.")
inp = getInput(required=False)
suffix = ''
if inp is not None:
datadata['db'] = inp
print("Welchen Suffix hat diese Datenbank im System? (Bsp. _DB1)")
inp = getInput(required=False, default='')
if inp is not None:
suffix = inp
fetched = None
try:
fetched = self._fetchData(datadata['baseurl'], suffix)
datadata['accounttable'] = fetched['accounttable']
datadata['reservationtable'] = fetched['reservationtable']
datadata['copiestable'] = fetched['copiestable']
except Exception as e:
print(str(e))
print("WARNUNG! Konfiguration konnte nicht ausgelesen werden. HANDARBEIT NÖTIG!")
print("Mehr Informationen:")
print("https://github.com/raphaelm/opacclient/wiki/Supported-library-types#bibliotheca")
if fetched is not None:
if len(fetched['mediatypes']) > 0:
print("Bitte weise die Medientypen ihren Entsprechungen in der App zu.")
print("Verfügbar sind:")
print(" ".join(sorted(TYPES)))
print("")
datadata['mediatypes'] = {}
for k, v in fetched['mediatypes'].items():
inp = ''
while inp not in TYPES:
print("'%s' ('%s')?" % (v, k))
inp = getInput(required=False, default="UNKNOWN")
datadata['mediatypes'][k] = inp
data['data'] = datadata
return data
def _fetchData(self, url, suff=''):
config = configparser.RawConfigParser(allow_no_value=True, strict=False)
if os.path.exists('w3oini.txt'):
config.read_string(open('w3oini.txt', 'rb').read().decode('iso-8859-1'))
else:
config.read_string(urllib.request.urlopen(url + '/w3oini.txt').read().decode('iso-8859-1'))
data = {
'accounttable': {},
'reservationtable': {},
'copiestable': {},
}
i_acc = 0
i_res = 0
for i in range(1, 21):
conf = config.get("ANZEIGEKONTOFELDER", "konto" + str(i))
if conf == '':
continue
key = conf.split("#")[0].lower()
if key in ('exemplarnr', 'buchungsnr'):
data['accounttable']['barcode'] = i_acc
i_acc += 1
elif key == 'verf':
data['accounttable']['author'] = i_acc
data['reservationtable']['author'] = i_res
i_acc += 1
i_res += 1
elif key == 'titel':
data['accounttable']['title'] = i_acc
data['reservationtable']['title'] = i_res
i_acc += 1
i_res += 1
elif key == 'frist':
data['accounttable']['returndate'] = i_acc
i_acc += 1
elif key == 'bereit':
data['reservationtable']['availability'] = i_res
i_res += 1
elif key == 'ausleihstatus':
data['accounttable']['status'] = i_acc
i_acc += 1
elif key == 'zwst':
data['accounttable']['homebranch'] = i_acc
data['reservationtable']['branch'] = i_res
i_acc += 1
i_res += 1
elif key == 'ausleihstelle':
data['accounttable']['lendingbranch'] = i_acc
i_acc += 1
elif key == 'mediengrp' or key == 'reserviert' or key == 'saeumnisgebuehr':
i_acc += 1
elif key == 'bereit bis':
data['reservationtable']['expirationdate'] = i_res
i_res += 1
else:
print("WARNING! NOT COUNTING ", key, url)
sys.exit(0)
data['accounttable']['prolongurl'] = i_acc
data['reservationtable']['cancelurl'] = i_res
if ('lendingbranch' not in data['accounttable']
or data['accounttable']['lendingbranch'] == -1) and (
'homebranch' in data['accounttable'] and data['accounttable']['homebranch'] > 0):
data['accounttable']['lendingbranch'] = data['accounttable']['homebranch']
i_copy = 0
for i in range(1, 11):
conf = config.get("ANZEIGE_EXEMPLAR" + suff, "AE" + str(i))
if conf == '':
continue
key = conf.split("#")[1]
if key == 'buchungsnr':
data['copiestable']['barcode'] = i_copy
elif key == 'zweigstelle':
data['copiestable']['branch'] = i_copy
elif key == 'standort2':
data['copiestable']['department'] = i_copy
elif key == 'standort':
data['copiestable']['location'] = i_copy
elif key == 'exemplarstatus':
data['copiestable']['status'] = i_copy
elif key == 'rueckgabedatum':
data['copiestable']['returndate'] = i_copy
elif key == 'Auslanzvorbestakt':
data['copiestable']['reservations'] = i_copy
i_copy += 1
data['mediatypes'] = {}
for i in range(1, 100):
if not config.has_option("ANZEIGE_MEDIGRPPIC", "MEDIGRPPIC" + str(i)):
continue
conf = config.get("ANZEIGE_MEDIGRPPIC", "MEDIGRPPIC" + str(i))
if conf == '' or conf is None:
continue
split = conf.split("#")
data['mediatypes'][split[1]] = split[2]
return data
class Sisis(Api):
def accountSupported(self):
return True
def prompt(self, data):
print("Sind zusätzliche Parameter nötig?")
print("Ein häufiges Beispiel wäre sowas wie 'Login=opsb'")
inp = getInput(required=False)
if inp is not None:
data['data']['startparams'] = inp
return data
class TouchPoint(Api):
def accountSupported(self):
return False
class WebOpacNet(Api):
def accountSupported(self):
return False
class WinBiap(Api):
def accountSupported(self):
return True
class Adis(Api):
def accountSupported(self):
return True
def prompt(self, data):
print("Sind zusätzliche Parameter nötig?")
print("Ein häufiges Beispiel wäre sowas wie 'service=direct/0/Home/$DirectLink&sp=S127.0.0.1%3A23002&sp=SS10000000'")
inp = getInput(required=False)
if inp is not None:
data['data']['startparams'] = inp
return data
class Biber1992(Api):
def accountSupported(self):
return True
def prompt(self, data):
print("Opac-Ordner?")
inp = getInput(required=False, default='opax')
if inp is not None:
data['data']['opacdir'] = inp
return data
print("WARNUNG! Konfiguration kann nicht ausgelesen werden. HANDARBEIT NÖTIG!")
return data
class VuFind(Api):
def accountSupported(self):
return False
class Zones(Api):
def accountSupported(self):
return False
class Primo(Api):
def accountSupported(self):
return False
def prompt(self, data):
print("VID?")
inp = getInput(required=True)
data['data']['db'] = inp
print("Sprachen (short codes, kommagetrennt)?")
inp = getInput(required=True)
data['data']['languages'] = inp.split(",")
return data
class Pica(Api):
account = True
def accountSupported(self):
return True
def prompt(self, data):
print("DB-Nummer?")
inp = getInput(required=True)
data['data']['db'] = inp
return data
class IOpac(Api):
def accountSupported(self):
return True
class Open(Api):
def accountSupported(self):
return False
def prompt(self, data):
data['data']['urls'] = {}
baseurl = data['data']['baseurl'] + '/'
html = urllib.request.urlopen(baseurl).read().decode('utf-8')
doc = BeautifulSoup(html, 'html.parser')
elems = doc.select('#dnn_dnnNAV_ctldnnNAV li a')
for elem in elems:
name = elem.get_text()
if name in ('Einfache Suche'):
data['data']['urls']['simple_search'] = elem['href'].replace(baseurl, '')
elif name in ('Erweiterte Suche', 'Profisuche'):
data['data']['urls']['advanced_search'] = elem['href'].replace(baseurl, '')
if not 'simple_search' in data['data']['urls']:
print("URL für Einfache Suche?")
data['data']['urls']['simple_search'] = getInput(required=True)
if not 'advanced_search' in data['data']['urls']:
print("URL für Erweiterte Suche?")
data['data']['urls']['advanced_search'] = getInput(required=True)
return data
APIS = {
'bibliotheca' : Bibliotheca,
'sisis' : Sisis,
'touchpoint' : TouchPoint,
'biber1992' : Biber1992,
'zones' : Zones,
'iopac' : IOpac,
'pica' : Pica,
'adis' : Adis,
'webopac.net' : WebOpacNet,
'winbiap' : WinBiap,
'vufind' : VuFind,
'primo' : Primo,
'open' : Open
}
data = {}
if __name__ == '__main__':
print("Hallo! Dieses Skript hilft dir, eine neue Bibliothek hinzuzufügen")
if not os.path.isdir(LIBDIR):
print("Bitte wechsle in das Verzeichnis, in dem die App liegt.")
sys.exit(0)
print("In welcher Stadt befindet sich die Bibliothek?")
print("Suffixe wie 'Frankfurt (Main)' werden in Klammern gesetzt.")
data['city'] = getInput(required=True)
print("In welchem Land liegt die Bibliothek?")
data['country'] = getInput(required=True, default="Deutschland")
print("In welchem Bundesland ist die Bibliothek?")
print("In Deutschland und Österreich werden Bundesländer benutzt, in der Schweiz Kantone.")
data['state'] = getInput(required=True)
print("Wie heißt die Bibliothek?")
print("Dies sollte etwas in dieser Stadt eindeutiges sein wie 'Stadtbibliothek', 'Unibibliothek' oder 'Ruprecht-Karls-Universität'. Der Name der Stadt soll nicht erneut vorkommen!")
data['title'] = getInput(default="Stadtbibliothek")
print("Lade Geodaten...")
geo = loadGeoPossibilities(data)
for k, g in enumerate(geo):
print("[%d] %s" % (k + 1, g[0]))
print("Welche dieser Positionen trifft am besten zu? 0 für keine.")
print("Nummer", end=" ")
geokey = int(getInput(default="0"))
if geokey > 0:
data['geo'] = geo[geokey - 1][1]
print("Welche API-Implementierung wird genutzt?")
print("Verfügbar sind: " + " ".join(sorted(APIS.keys())))
data['api'] = ''
while data['api'] not in APIS.keys():
data['api'] = getInput(required=True)
print("URL zum OPAC")
print("Ohne abschließenden /")
data['data'] = {}
data['data']['baseurl'] = getInput(required=True)
print("URL zu einer Informationsseite")
print("Sollte Öffnungszeiten u.ä. enthalten")
data['information'] = getInput(required=True)
api = APIS[data['api']]()
data = api.prompt(data)
print("Konto unterstützt?")
inp = getInput(required=False, default='nein' if not api.accountSupported() else 'ja')
if inp.lower() in ("ja", "yes", "y", "j", "true", "1"):
data['account_supported'] = True
else:
data['account_supported'] = False
ok = False;
while not ok:
print("Dateiname")
print("Sowas wie 'Mannheim' oder 'Heidelberg_Uni'. Möglichst keine Leerzeichen und Umlaute.")
ident = getInput(required=True)
if os.path.isfile(LIBDIR + ident + '.json'):
print("ACHTUNG: Datei existiert bereits. Überschreiben? (j/n)");
value = getInput(required=True, default="n")
if value == "j":
ok = True;
else:
ok = True;
print(json.dumps(data, indent=4, sort_keys=True), end="\n\n")
with open(LIBDIR + ident + '.json', 'w') as fp:
json.dump(data, fp, sort_keys=True, indent=4)
fp.write("\n")
print("In Datei %s geschrieben." % (LIBDIR + ident + '.json'))
| {
"content_hash": "14dd36537c4a76972697e7ae6d9e3f29",
"timestamp": "",
"source": "github",
"line_count": 449,
"max_line_length": 185,
"avg_line_length": 32.937639198218264,
"alnum_prop": 0.5460139292717561,
"repo_name": "simon04/opacclient",
"id": "e61c195079eb108d25ce8399000a12171cbc0dad",
"size": "14837",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/add_library.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "915543"
},
{
"name": "Java",
"bytes": "1593455"
},
{
"name": "Shell",
"bytes": "2035"
}
],
"symlink_target": ""
} |
import fcntl, termios, struct, os, sys
def getDims():
env = os.environ
def ioctl_GWINSZ(fd):
try:
cr = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
except:
return
return cr
return ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) or (25, 80)
class RedirectStdStreams(object):
def __init__(self, stdout=None, stderr=None):
self._stdout = stdout or sys.stdout
self._stderr = stderr or sys.stderr
def __enter__(self):
self.old_stdout, self.old_stderr = sys.stdout, sys.stderr
self.old_stdout.flush(); self.old_stderr.flush()
sys.stdout, sys.stderr = self._stdout, self._stderr
def __exit__(self, exc_type, exc_value, traceback):
self._stdout.flush(); self._stderr.flush()
sys.stdout = self.old_stdout
sys.stderr = self.old_stderr
| {
"content_hash": "ebf28db2cdb66b5878f3e5bdf1c52743",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 81,
"avg_line_length": 33.03703703703704,
"alnum_prop": 0.6098654708520179,
"repo_name": "Refefer/quickfind",
"id": "37cd1235c879eda93b77bcd22a020356503c64eb",
"size": "892",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quickfind/Console.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "32549"
},
{
"name": "Shell",
"bytes": "502"
},
{
"name": "VimL",
"bytes": "733"
}
],
"symlink_target": ""
} |
import abc
from molecule import util
from molecule.provisioner.lint import ansible_lint
class Base(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config):
"""
Base initializer for all :ref:`Provisioner` classes.
:param config: An instance of a Molecule config.
:returns: None
"""
self._config = config
@abc.abstractproperty
def default_options(self): # pragma: no cover
"""
Default CLI arguments provided to ``cmd`` and returns a dict.
:return: dict
"""
pass
@abc.abstractproperty
def default_env(self): # pragma: no cover
"""
Default env variables provided to ``cmd`` and returns a dict.
:return: dict
"""
pass
@property
@abc.abstractmethod
def name(self): # pragma: no cover
"""
Name of the provisioner and returns a string.
:returns: str
"""
pass
@property
@util.memoize
def lint(self):
lint_name = self._config.config['provisioner']['lint']['name']
if lint_name == 'ansible-lint':
return ansible_lint.AnsibleLint(self._config)
| {
"content_hash": "f6fa6bb2eb56930e54f94b89b260c3a2",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 23.057692307692307,
"alnum_prop": 0.5746455379482902,
"repo_name": "metacloud/molecule",
"id": "944c9045710e7e1e9c23ae4d2065f3c27888c4a4",
"size": "2319",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "molecule/provisioner/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1082"
},
{
"name": "Makefile",
"bytes": "738"
},
{
"name": "Python",
"bytes": "767920"
},
{
"name": "Ruby",
"bytes": "536"
},
{
"name": "Shell",
"bytes": "198"
}
],
"symlink_target": ""
} |
"""
Interactive Crossfilter
=======================
This example shows a multi-panel view of the same data, where you can interactively
select a portion of the data in any of the panels to highlight that portion in any
of the other panels.
"""
# category: interactive charts
import altair as alt
from vega_datasets import data
flights = alt.UrlData(data.flights_2k.url,
format={'parse': {'date': 'date'}})
brush = alt.selection(type='interval', encodings=['x'])
# Define the base chart, with the common parts of the
# background and highlights
base = alt.Chart().mark_bar().encode(
x=alt.X(alt.repeat('column'), type='quantitative', bin=alt.Bin(maxbins=20)),
y='count()'
).properties(
width=180,
height=130
)
# blue background with selection
background = base.properties(
selection=brush
)
# yellow highlights on the transformed data
highlight = base.encode(
color=alt.value('goldenrod')
).transform_filter(
brush
)
# layer the two charts & repeat
alt.layer(
background, highlight,
data=flights
).transform_calculate(
"time", "hours(datum.date)"
).repeat(
column=["distance", "delay", "time"]
)
| {
"content_hash": "ff5587ab547f4f9126a5a9ede78c554e",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 83,
"avg_line_length": 24.829787234042552,
"alnum_prop": 0.6812339331619537,
"repo_name": "ellisonbg/altair",
"id": "ffe35aea585f1ea5e47dada18a6c6696192e4881",
"size": "1167",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/vegalite/v2/examples/interactive_layered_crossfilter.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "136763"
},
{
"name": "Makefile",
"bytes": "312"
},
{
"name": "Python",
"bytes": "1150719"
}
],
"symlink_target": ""
} |
import time
from Axon.ThreadedComponent import threadedcomponent
from Axon.Scheduler import scheduler
class ThreadedTest(threadedcomponent):
def __init__(self):
super(ThreadedTest, self).__init__()
self.last = time.time()
def main(self):
while 1:
t = time.time()
print t - self.last
self.last = t
time.sleep(0.0005)
if __name__ == "__main__":
ThreadedTest().run()
| {
"content_hash": "6fc6708e676b6038c0ff5881cf80d6d8",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 52,
"avg_line_length": 20.681818181818183,
"alnum_prop": 0.5802197802197803,
"repo_name": "bbc/kamaelia",
"id": "30e701b43e20b2e2082ed6ef61fa6ff65500d29e",
"size": "1285",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/JT/Jam/application/trunk/TestGraveyard/ThreadTimingTest.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ApacheConf",
"bytes": "62985"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "Diff",
"bytes": "483"
},
{
"name": "Gettext Catalog",
"bytes": "3919909"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "31234"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896320"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "711244"
}
],
"symlink_target": ""
} |
"""
TinyDB extensions. PetitDB, a TinyDB with unique fields logics.
requires: tinydb >= v3.0 (note that this is not a stable release yet.)
"""
# -------------------------------------------------------------------------------
# Author: Alexandre Manhaes Savio <alexsavio@gmail.com>
# Nuklear Medizin Department
# Klinikum rechts der Isar, Technische Universitaet Muenchen (TUM)
# 2015, Alexandre Manhaes Savio
# Use this at your own risk!
# -------------------------------------------------------------------------------
from datetime import datetime
from collections import OrderedDict
try:
import tinydb
except:
raise ImportError('Please install TinyDB v3.0.')
from tinydb import TinyDB, where
from tinydb.storages import JSONStorage
from tinydb.middlewares import CachingMiddleware
from six import string_types
from dateutil.tz import tzutc
class MoreThanOneItemError(Exception):
pass
class NotUniqueItemError(Exception):
pass
def timestamp_with_tzinfo(dt):
"""
Serialize a date/time value into an ISO8601 text representation
adjusted (if needed) to UTC timezone.
For instance:
>>> serialize_date(datetime(2012, 4, 10, 22, 38, 20, 604391))
'2012-04-10T22:38:20.604391Z'
"""
utc = tzutc()
if dt.tzinfo:
dt = dt.astimezone(utc).replace(tzinfo=None)
return dt.isoformat() + 'Z'
def timestamp_to_date_str(dt):
""" Serialize a date/time value into YYYY-MM-DD date string. """
return str(dt.date())
def _to_string(data):
""" Convert to string all values in `data`.
Parameters
----------
data: dict[str]->object
Returns
-------
string_data: dict[str]->str
"""
sdata = data.copy()
for k, v in data.items():
if isinstance(v, datetime):
sdata[k] = timestamp_to_date_str(v)
elif not isinstance(v, (string_types, float, int)):
sdata[k] = str(v)
return sdata
def insert_unique(table, data, unique_fields=None, *, raise_if_found=False):
"""Insert `data` into `table` ensuring that data has unique values
in `table` for the fields listed in `unique_fields`.
If `raise_if_found` is True, will raise an NotUniqueItemError if
another item with the same `unique_fields` values are found
previously in `table`.
If False, will return the `eid` from the item found.
Parameters
----------
table: tinydb.Table
data: dict
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
raise_if_found: bool
Returns
-------
eid: int
Id of the object inserted or the one found with same `unique_fields`.
Raises
------
MoreThanOneItemError
Raise even with `raise_with_found` == False if it finds more than one item
with the same values as the sample.
NotUniqueItemError
If `raise_if_found` is True and an item with the same `unique_fields`
values from `data` is found in `table`.
"""
item = find_unique(table, data, unique_fields)
if item is not None:
if raise_if_found:
raise NotUniqueItemError('Not expected to find an item with the same '
'values for {}. Inserting {} got {} in eid {}.'.format(unique_fields,
data,
table.get(eid=item),
item))
else:
return item
return table.insert(data)
def search_sample(table, sample):
"""Search for items in `table` that have the same field sub-set values as in `sample`.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
Returns
-------
search_result: list of dict
List of the items found. The list is empty if no item is found.
"""
query = _query_sample(sample=sample, operators='__eq__')
return table.search(query)
def search_unique(table, sample, unique_fields=None):
""" Search for items in `table` that have the same field sub-set values as in `sample`.
Expecting it to be unique, otherwise will raise an exception.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
Returns
-------
search_result: tinydb.database.Element
Unique item result of the search.
Raises
------
KeyError:
If the search returns for more than one entry.
"""
if unique_fields is None:
unique_fields = list(sample.keys())
query = _query_data(sample, field_names=unique_fields, operators='__eq__')
items = table.search(query)
if len(items) == 1:
return items[0]
if len(items) == 0:
return None
raise MoreThanOneItemError('Expected to find zero or one items, but found '
'{} items.'.format(len(items)))
def find_unique(table, sample, unique_fields=None):
"""Search in `table` an item with the value of the `unique_fields` in the `sample` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table: tinydb.table
sample: dict
Sample data
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
Returns
-------
eid: int
Id of the object found with same `unique_fields`.
None if none is found.
Raises
------
MoreThanOneItemError
If more than one example is found.
"""
res = search_unique(table, sample, unique_fields)
if res is not None:
return res.eid
else:
return res
def _query_sample(sample, operators='__eq__'):
"""Create a TinyDB query that looks for items that have each field in `sample` with a value
compared with the correspondent operation in `operators`.
Parameters
----------
sample: dict
The sample data
operators: str or list of str
A list of comparison operations for each field value in `sample`.
If this is a str, will use the same operator for all `sample` fields.
If you want different operators for each field, remember to use an OrderedDict for `sample`.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
if isinstance(operators, str):
operators = [operators] * len(sample)
if len(sample) != len(operators):
raise ValueError('Expected `operators` to be a string or a list with the same'
' length as `field_names` ({}), got {}.'.format(len(sample),
operators))
queries = []
for i, fn in enumerate(sample):
fv = sample[fn]
op = operators[i]
queries.append(_build_query(field_name=fn,
field_value=fv,
operator=op))
return _concat_queries(queries, operators='__and__')
def _query_data(data, field_names=None, operators='__eq__'):
"""Create a tinyDB Query object that looks for items that confirms the correspondent operator
from `operators` for each `field_names` field values from `data`.
Parameters
----------
data: dict
The data sample
field_names: str or list of str
The name of the fields in `data` that will be used for the query.
operators: str or list of str
A list of comparison operations for each field value in `field_names`.
If this is a str, will use the same operator for all `field_names`.
If you want different operators for each field, remember to use an OrderedDict for `data`.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
if field_names is None:
field_names = list(data.keys())
if isinstance(field_names, str):
field_names = [field_names]
# using OrderedDict by default, in case operators has different operators for each field.
sample = OrderedDict([(fn, data[fn]) for fn in field_names])
return _query_sample(sample, operators=operators)
def _concat_queries(queries, operators='__and__'):
"""Create a tinyDB Query object that is the concatenation of each query in `queries`.
The concatenation operator is taken from `operators`.
Parameters
----------
queries: list of tinydb.Query
The list of tinydb.Query to be joined.
operators: str or list of str
List of binary operators to join `queries` into one query.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
# checks first
if not queries:
raise ValueError('Expected some `queries`, got {}.'.format(queries))
if len(queries) == 1:
return queries[0]
if isinstance(operators, str):
operators = [operators] * (len(queries) - 1)
if len(queries) - 1 != len(operators):
raise ValueError('Expected `operators` to be a string or a list with the same'
' length as `field_names` ({}), got {}.'.format(len(queries),
operators))
# recursively build the query
first, rest, end = queries[0], queries[1:-1], queries[-1:][0]
bigop = getattr(first, operators[0])
for i, q in enumerate(rest):
bigop = getattr(bigop(q), operators[i])
return bigop(end)
def _build_query(field_name, field_value, operator='__eq__'):
"""Create a tinyDB Query object with the format:
(where(`field_name`) `operator` `field_value`)
Parameters
----------
field_name: str
The name of the field to be queried.
field_value:
The value of the field
operator: str
The comparison operator.
Check TinyDB.Query class for possible choices.
Returns
-------
query: tinydb.database.Query
"""
qelem = where(field_name)
if not hasattr(qelem, operator):
raise NotImplementedError('Operator `{}` not found in query object.'.format(operator))
else:
query = getattr(qelem, operator)
return query(field_value)
class PetitDB(TinyDB):
"""A generic TinyDB subclass that defines operations for: unique values and meta-queries."""
def __init__(self, file_path, storage=CachingMiddleware(JSONStorage)):
self._db_fpath = file_path
self._storage = storage
super(PetitDB, self).__init__(self._db_fpath) #, storage=self._storage)
def search_by_eid(self, table_name, eid):
"""Return the element in `table_name` with Object ID `eid`.
If None is found will raise a KeyError exception.
Parameters
----------
table_name: str
The name of the table to look in.
eid: int
The Object ID of the element to look for.
Returns
-------
elem: tinydb.database.Element
Raises
------
KeyError
If the element with ID `eid` is not found.
"""
elem = self.table(table_name).get(eid=eid)
if elem is None:
raise KeyError('Could not find {} with eid {}.'.format(table_name, eid))
return elem
def insert_unique(self, table_name, data, unique_fields=None, *, raise_if_found=False):
"""Insert `data` into `table` ensuring that data has unique values
in `table` for the fields listed in `unique_fields`.
If `raise_if_found` is True, will raise an NotUniqueItemError if
another item with the same `unique_fields` values are found
previously in `table`.
If False, will return the `eid` from the item found.
Parameters
----------
table_name: str
data: dict
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
raise_if_found: bool
Returns
-------
eid: int
Id of the object inserted or the one found with same `unique_fields`.
Raises
------
MoreThanOneItemError
Raise even with `raise_with_found` == False if it finds more than one item
with the same values as the sample.
NotUniqueItemError
If `raise_if_found` is True and an item with the same `unique_fields`
values from `data` is found in `table`.
"""
return insert_unique(table=self.table(table_name),
data=_to_string(data),
unique_fields=unique_fields,
raise_if_found=raise_if_found)
def search_unique(self, table_name, sample, unique_fields=None):
""" Search in `table` an item with the value of the `unique_fields` in the `data` sample.
Check if the the obtained result is unique. If nothing is found will return an empty list,
if there is more than one item found, will raise an IndexError.
Parameters
----------
table_name: str
sample: dict
Sample data
unique_fields: list of str
Name of fields (keys) from `data` which are going to be used to build
a sample to look for exactly the same values in the database.
If None, will use every key in `data`.
Returns
-------
eid: int
Id of the object found with same `unique_fields`.
None if none is found.
Raises
------
MoreThanOneItemError
If more than one example is found.
"""
return search_unique(table=self.table(table_name),
sample=sample,
unique_fields=unique_fields)
def search_sample(self, table_name, sample):
"""Search for items in `table` that have the same field sub-set values as in `sample`.
Parameters
----------
table_name: str
sample: dict
Sample data
Returns
-------
search_result: list of dict
List of the items found. The list is empty if no item is found.
"""
return search_sample(table=self.table(table_name),
sample=sample)
def is_unique(self, table_name, sample, unique_fields=None):
"""Return True if an item with the value of `unique_fields`
from `data` is unique in the table with `table_name`.
False if no sample is found or more than one is found.
See function `find_unique` for more details.
Parameters
----------
table_name: str
sample: dict
Sample data for query
unique_fields: str or list of str
Returns
-------
is_unique: bool
"""
try:
eid = find_unique(self.table(table_name),
sample=sample,
unique_fields=unique_fields)
except:
return False
else:
return eid is not None
def update_unique(self, table_name, fields, data, cond=None, unique_fields=None,
*, raise_if_not_found=False):
"""Update the unique matching element to have a given set of fields.
Parameters
----------
table_name: str
fields: dict or function[dict -> None]
new data/values to insert into the unique element
or a method that will update the elements.
data: dict
Sample data for query
cond: tinydb.Query
which elements to update
unique_fields: list of str
raise_if_not_found: bool
Will raise an exception if the element is not found for update.
Returns
-------
eid: int
The eid of the updated element if found, None otherwise.
"""
eid = find_unique(self.table(table_name), data, unique_fields)
if eid is None:
if raise_if_not_found:
msg = 'Could not find {} with {}'.format(table_name, data)
if cond is not None:
msg += ' where {}.'.format(cond)
raise IndexError(msg)
else:
self.table(table_name).update(_to_string(fields), cond=cond, eids=[eid])
return eid
def count(self, table_name, sample):
"""Return the number of items that match the `sample` field values
in table `table_name`.
Check function search_sample for more details.
"""
return len(list(search_sample(table=self.table(table_name),
sample=sample)))
| {
"content_hash": "76b260daa26b789b0682ac5e5051aed5",
"timestamp": "",
"source": "github",
"line_count": 569,
"max_line_length": 112,
"avg_line_length": 30.808435852372583,
"alnum_prop": 0.5773531089560753,
"repo_name": "Neurita/boyle",
"id": "033690883c91788e178684d2c01e3e3dc128c083",
"size": "17545",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "boyle/petitdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "1687"
},
{
"name": "Python",
"bytes": "391188"
}
],
"symlink_target": ""
} |
from django.core.management.base import BaseCommand, CommandError
from django.utils.timezone import now
from accounting_core.models import AccountingYear
from accounting_tools.models import Subvention, SubventionLine, SubventionLogging, SubventionFile
from units.models import Unit
from users.models import TruffeUser
import datetime
import json
import pytz
import sys
import os
class Command(BaseCommand):
""" Requirements : files in /media/uploads/_generic/Subvention/"""
help = 'Import subventions'
def handle(self, *args, **options):
data = json.loads(sys.stdin.read())
paris_tz = pytz.timezone("Europe/Paris")
root_user = TruffeUser.objects.get(username=179189)
for subvention_data in data['data']:
(unit, blank_unit_name) = (None, None)
try:
if subvention_data['groupe_name']:
unit = Unit.objects.get(name=subvention_data['groupe_name'])
else:
blank_unit_name = subvention_data['forWho']
unit_name = blank_unit_name or unit.name
except:
print u"Unit not found !!", subvention_data['groupe_name'], subvention_data['forWho']
unit_name = None
if unit_name:
try:
user = TruffeUser.objects.get(username=subvention_data['contact_username'])
except:
user = root_user
try:
ay = AccountingYear.objects.get(name=subvention_data['year_name'])
except:
ay = None
if ay:
subv, created = Subvention.objects.get_or_create(name=u"{} {}".format(unit_name, ay.name), unit=unit, unit_blank_name=blank_unit_name, accounting_year=ay, amount_asked=subvention_data['amount_asked'],
amount_given=subvention_data['amount_given'], mobility_asked=subvention_data['mobility_asked'], mobility_given=subvention_data['mobility_given'], description=subvention_data['description'])
if subvention_data['traitee']:
subv.status = '2_treated'
elif subvention_data['deposee']:
subv.status = '1_submited'
subv.save()
if created:
SubventionLogging(who=user, what='imported', object=subv).save()
print "+ {!r}".format(subv.name)
order = 0
for line_data in subvention_data['lines']:
if line_data['name']:
if line_data['date'] == 'None':
line_data['date'] = '1970-01-01'
start_date = paris_tz.localize(datetime.datetime.strptime(line_data['date'], '%Y-%m-%d'))
subvline, created = SubventionLine.objects.get_or_create(subvention=subv, name=line_data['name'], start_date=start_date, end_date=start_date, nb_spec=0, order=order)
if created:
print " + {!r}".format(subvline.name)
order += 1
for file_data in subvention_data['uploads']:
if not os.path.isfile(os.path.join('media', 'uploads', '_generic', 'Subvention', file_data.split('/')[-1])):
print " (!) Missing file {}".format(file_data)
else:
__, created = SubventionFile.objects.get_or_create(uploader=user, object=subv, file=os.path.join('uploads', '_generic', 'Subvention', file_data.split('/')[-1]), defaults={'upload_date': now()})
if created:
print " (L)", file_data
| {
"content_hash": "2e3bf5b84f6b9837cc4ed52eafba2ebd",
"timestamp": "",
"source": "github",
"line_count": 84,
"max_line_length": 221,
"avg_line_length": 45.86904761904762,
"alnum_prop": 0.5362055541136777,
"repo_name": "ArcaniteSolutions/truffe2",
"id": "bb09060b801ce38d7a7b13c76f2fc551881aa8ac",
"size": "3878",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "truffe2/truffe/management/commands/import_subventions.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "ActionScript",
"bytes": "15982"
},
{
"name": "CSS",
"bytes": "552855"
},
{
"name": "HTML",
"bytes": "742372"
},
{
"name": "JavaScript",
"bytes": "1859724"
},
{
"name": "PHP",
"bytes": "2274"
},
{
"name": "Python",
"bytes": "3048852"
}
],
"symlink_target": ""
} |
import os
import sys
import datetime
from django.conf import settings
from django.core.mail import EmailMultiAlternatives
from django.template.loader import render_to_string
class single_instance(object):
def __init__(self, pidfile_name):
self.pidfile_name = pidfile_name
def __call__(self, f):
def wrapped_f(*args,**kwargs):
full_path = os.path.join(self.pidfile_name + '.pid')
# Check if there is already a lock file existing
if os.access(full_path, os.F_OK):
if self.check_pid(full_path):
sys.exit(1)
# put a PID in the pid file
self.create_pid_file(full_path)
try:
f(*args,**kwargs)
except:
# Catch any errors and delete pidfile
os.remove(full_path)
raise
os.remove(full_path)
return wrapped_f
def create_pid_file(self, fn):
pidfile = open(fn, "w")
pidfile.write("%s" % os.getpid())
pidfile.close()
def check_pid(self, full_path):
# if the lockfile is already there then check the PID number
# in the lock file
pidfile = open(full_path, "r")
pidfile.seek(0)
old_pid = pidfile.readline().strip()
pidfile.close()
# Check PID is running, return True if we should exit
if not old_pid:
print "Existing PID file %s was empty, can't check whether it is still running!" % self.pidfile_name
return True
if os.path.exists("/proc/%s" % old_pid):
run_time = datetime.datetime.now() - self.modification_date(full_path)
# Be quiet unless the previous job has been running longer than
# settings.PIGEONPOST_WARN_RUNTIME seconds
too_long = settings.PIGEONPOST_WARN_RUNTIME if hasattr(settings, 'PIGEONPOST_WARN_RUNTIME') else 0
if run_time > datetime.timedelta(seconds=too_long):
print "PID file %s exists. You already have an instance of the program running" % self.pidfile_name
print "It has been running as process %s for %s" % (old_pid,run_time)
return True
else:
print "PID file %s exists but the program is not running" % self.pidfile_name
print "Removing stale lock file for pid %s" % old_pid
os.remove(full_path)
return False
def modification_date(self, filename):
t = os.path.getmtime(filename)
return datetime.datetime.fromtimestamp(t)
def generate_email(to_user, subject, context, text_template, html_template, from_email=None):
""" Create an email with html and text versions.
Note that the same context is used for both rendering the text and html
versions of the email.
"""
from django.contrib.sites.models import Site
current_site = Site.objects.get_current()
context = dict(context)
context['site'] = current_site
# First generate the text version
body = render_to_string(text_template, context)
if from_email:
args = (subject, body, from_email)
else:
args = (subject, body)
msg = EmailMultiAlternatives(*args, to=[to_user.email])
# Then generate the html version with rendered markdown
html_content = render_to_string(html_template, context)
msg.attach_alternative(html_content, "text/html")
return msg
| {
"content_hash": "3031aafb88a7e5888bcb6629b7c3a448",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 115,
"avg_line_length": 37.41304347826087,
"alnum_prop": 0.6159209761766414,
"repo_name": "dragonfly-science/django-pigeonpost",
"id": "fb0e9961a4285d2f4f56a4a86dc1d41057423439",
"size": "3442",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pigeonpost/utils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "67"
},
{
"name": "PLpgSQL",
"bytes": "246"
},
{
"name": "Python",
"bytes": "53057"
},
{
"name": "Shell",
"bytes": "164"
}
],
"symlink_target": ""
} |
"""
Unit Tests for hyperv quantum rpc
"""
import mock
import unittest2
from quantum.agent import rpc as agent_rpc
from quantum.common import topics
from quantum.openstack.common import context
from quantum.openstack.common import rpc
from quantum.plugins.hyperv import agent_notifier_api as ana
from quantum.plugins.hyperv.common import constants
class rpcHyperVApiTestCase(unittest2.TestCase):
def _test_hyperv_quantum_api(
self, rpcapi, topic, method, rpc_method, **kwargs):
ctxt = context.RequestContext('fake_user', 'fake_project')
expected_retval = 'foo' if method == 'call' else None
expected_msg = rpcapi.make_msg(method, **kwargs)
expected_msg['version'] = rpcapi.BASE_RPC_API_VERSION
if rpc_method == 'cast' and method == 'run_instance':
kwargs['call'] = False
rpc_method_mock = mock.Mock()
rpc_method_mock.return_value = expected_retval
setattr(rpc, rpc_method, rpc_method_mock)
retval = getattr(rpcapi, method)(ctxt, **kwargs)
self.assertEqual(retval, expected_retval)
expected_args = [ctxt, topic, expected_msg]
for arg, expected_arg in zip(rpc_method_mock.call_args[0],
expected_args):
self.assertEqual(arg, expected_arg)
def test_delete_network(self):
rpcapi = ana.AgentNotifierApi(topics.AGENT)
self._test_hyperv_quantum_api(
rpcapi,
topics.get_topic_name(
topics.AGENT,
topics.NETWORK,
topics.DELETE),
'network_delete', rpc_method='fanout_cast',
network_id='fake_request_spec')
def test_port_update(self):
rpcapi = ana.AgentNotifierApi(topics.AGENT)
self._test_hyperv_quantum_api(
rpcapi,
topics.get_topic_name(
topics.AGENT,
topics.PORT,
topics.UPDATE),
'port_update', rpc_method='fanout_cast',
port='fake_port',
network_type='fake_network_type',
segmentation_id='fake_segmentation_id',
physical_network='fake_physical_network')
def test_port_delete(self):
rpcapi = ana.AgentNotifierApi(topics.AGENT)
self._test_hyperv_quantum_api(
rpcapi,
topics.get_topic_name(
topics.AGENT,
topics.PORT,
topics.DELETE),
'port_delete', rpc_method='fanout_cast',
port_id='port_id')
def test_tunnel_update(self):
rpcapi = ana.AgentNotifierApi(topics.AGENT)
self._test_hyperv_quantum_api(
rpcapi,
topics.get_topic_name(
topics.AGENT,
constants.TUNNEL,
topics.UPDATE),
'tunnel_update', rpc_method='fanout_cast',
tunnel_ip='fake_ip', tunnel_id='fake_id')
def test_device_details(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_hyperv_quantum_api(
rpcapi, topics.PLUGIN,
'get_device_details', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_update_device_down(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_hyperv_quantum_api(
rpcapi, topics.PLUGIN,
'update_device_down', rpc_method='call',
device='fake_device',
agent_id='fake_agent_id')
def test_tunnel_sync(self):
rpcapi = agent_rpc.PluginApi(topics.PLUGIN)
self._test_hyperv_quantum_api(
rpcapi, topics.PLUGIN,
'tunnel_sync', rpc_method='call',
tunnel_ip='fake_tunnel_ip')
| {
"content_hash": "e1b6305863df8769d41de890091c8cde",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 66,
"avg_line_length": 34.824074074074076,
"alnum_prop": 0.5849508109545334,
"repo_name": "rossella/neutron",
"id": "098fcea122d7f0c8c8fd8e72dca5ea55058c27b4",
"size": "4482",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quantum/tests/unit/hyperv/test_hyperv_rpcapi.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37307"
},
{
"name": "JavaScript",
"bytes": "67928"
},
{
"name": "Perl",
"bytes": "235"
},
{
"name": "Python",
"bytes": "3048930"
},
{
"name": "Shell",
"bytes": "7843"
},
{
"name": "XSLT",
"bytes": "50907"
}
],
"symlink_target": ""
} |
from setuptools import setup
from ike import __version__
def readme():
with open('docs/readme.rst') as f:
return f.read()
setup(name='ike',
version='.'.join('{}'.format(x) for x in __version__),
description='Minimalistic Internet Key Exchange protocol v2 (RFC 5996) library',
long_description=readme(),
author='Kimmo Parviainen-Jalanko',
author_email='k@77.fi',
url='http://github.com/kimvais/ike/',
download_url='https://github.com/kimvais/ike/releases',
packages=['ike', 'ike.util'],
install_requires=[
'rsa',
'cryptography'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: Unix',
'Programming Language :: Python :: 3.4',
'Topic :: Communications',
'Topic :: Internet',
'Topic :: Security',
'Topic :: Security :: Cryptography',
]
)
| {
"content_hash": "214158f3e991a0bfc50f9eb88516b0c6",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 86,
"avg_line_length": 30.84375,
"alnum_prop": 0.5653495440729484,
"repo_name": "kimvais/ike",
"id": "e82e0e91bfce7b702a74d7105065ff843cbf6185",
"size": "1011",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "110871"
},
{
"name": "Makefile",
"bytes": "426"
},
{
"name": "Python",
"bytes": "53966"
}
],
"symlink_target": ""
} |
from pip._internal.utils.typing import MYPY_CHECK_RUNNING
if MYPY_CHECK_RUNNING:
from typing import List, Optional
__version__ = "20.0.2"
def main(args=None):
# type: (Optional[List[str]]) -> int
"""This is an internal API only meant for use by pip's own console scripts.
For additional details, see https://github.com/pypa/pip/issues/7498.
"""
from pip._internal.utils.entrypoints import _wrapper
return _wrapper(args)
| {
"content_hash": "754ccefd4034218de468d29d46c713a4",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 79,
"avg_line_length": 25.27777777777778,
"alnum_prop": 0.6901098901098901,
"repo_name": "eammx/proyectosWeb",
"id": "827a4e20a7b0a7824ae863f97f0b0c1c38408030",
"size": "455",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "proyectoPython/env/lib/python3.6/site-packages/pip/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "18790"
},
{
"name": "PHP",
"bytes": "60704"
}
],
"symlink_target": ""
} |
"""
homeassistant.components.httpinterface
~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
By default it will run on port 8123.
All API calls have to be accompanied by an 'api_password' parameter and will
return JSON. If successful calls will return status code 200 or 201.
Other status codes that can occur are:
- 400 (Bad Request)
- 401 (Unauthorized)
- 404 (Not Found)
- 405 (Method not allowed)
The api supports the following actions:
/api - GET
Returns message if API is up and running.
Example result:
{
"message": "API running."
}
/api/states - GET
Returns a list of entities for which a state is available
Example result:
[
{ .. state object .. },
{ .. state object .. }
]
/api/states/<entity_id> - GET
Returns the current state from an entity
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/states/<entity_id> - POST
Updates the current state of an entity. Returns status code 201 if successful
with location header of updated resource and as body the new state.
parameter: new_state - string
optional parameter: attributes - JSON encoded object
Example result:
{
"attributes": {
"next_rising": "07:04:15 29-10-2013",
"next_setting": "18:00:31 29-10-2013"
},
"entity_id": "weather.sun",
"last_changed": "23:24:33 28-10-2013",
"state": "below_horizon"
}
/api/events/<event_type> - POST
Fires an event with event_type
optional parameter: event_data - JSON encoded object
Example result:
{
"message": "Event download_file fired."
}
"""
import json
import threading
import logging
import re
import os
import time
import gzip
from http.server import SimpleHTTPRequestHandler, HTTPServer
from socketserver import ThreadingMixIn
from urllib.parse import urlparse, parse_qs
import homeassistant as ha
import homeassistant.remote as rem
import homeassistant.util as util
from . import frontend
DOMAIN = "http"
DEPENDENCIES = []
HTTP_OK = 200
HTTP_CREATED = 201
HTTP_MOVED_PERMANENTLY = 301
HTTP_BAD_REQUEST = 400
HTTP_UNAUTHORIZED = 401
HTTP_NOT_FOUND = 404
HTTP_METHOD_NOT_ALLOWED = 405
HTTP_UNPROCESSABLE_ENTITY = 422
URL_ROOT = "/"
URL_STATIC = "/static/{}"
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Sets up the HTTP API and debug interface. """
if not util.validate_config(config, {DOMAIN: [CONF_API_PASSWORD]},
_LOGGER):
return False
api_password = config[DOMAIN]['api_password']
# If no server host is given, accept all incoming requests
server_host = config[DOMAIN].get(CONF_SERVER_HOST, '0.0.0.0')
server_port = config[DOMAIN].get(CONF_SERVER_PORT, rem.SERVER_PORT)
development = config[DOMAIN].get(CONF_DEVELOPMENT, "") == "1"
server = HomeAssistantHTTPServer((server_host, server_port),
RequestHandler, hass, api_password,
development)
hass.listen_once_event(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True).start())
# If no local api set, set one with known information
if isinstance(hass, rem.HomeAssistant) and hass.local_api is None:
hass.local_api = \
rem.API(util.get_local_ip(), api_password, server_port)
return True
class HomeAssistantHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle HTTP requests in a threaded fashion. """
# pylint: disable=too-many-arguments
def __init__(self, server_address, RequestHandlerClass,
hass, api_password, development=False):
super().__init__(server_address, RequestHandlerClass)
self.server_address = server_address
self.hass = hass
self.api_password = api_password
self.development = development
# We will lazy init this one if needed
self.event_forwarder = None
if development:
_LOGGER.info("running frontend in development mode")
def start(self):
""" Starts the server. """
_LOGGER.info(
"Starting web interface at http://%s:%d", *self.server_address)
self.serve_forever()
# pylint: disable=too-many-public-methods
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handles incoming HTTP requests
We extend from SimpleHTTPRequestHandler instead of Base so we
can use the guess content type methods.
"""
server_version = "HomeAssistant/1.0"
PATHS = [ # debug interface
('GET', URL_ROOT, '_handle_get_root'),
('POST', URL_ROOT, '_handle_get_root'),
# /api - for validation purposes
('GET', rem.URL_API, '_handle_get_api'),
# /states
('GET', rem.URL_API_STATES, '_handle_get_api_states'),
('GET',
re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
'_handle_get_api_states_entity'),
('POST',
re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
'_handle_post_state_entity'),
('PUT',
re.compile(r'/api/states/(?P<entity_id>[a-zA-Z\._0-9]+)'),
'_handle_post_state_entity'),
# /events
('GET', rem.URL_API_EVENTS, '_handle_get_api_events'),
('POST',
re.compile(r'/api/events/(?P<event_type>[a-zA-Z\._0-9]+)'),
'_handle_api_post_events_event'),
# /services
('GET', rem.URL_API_SERVICES, '_handle_get_api_services'),
('POST',
re.compile((r'/api/services/'
r'(?P<domain>[a-zA-Z\._0-9]+)/'
r'(?P<service>[a-zA-Z\._0-9]+)')),
'_handle_post_api_services_domain_service'),
# /event_forwarding
('POST', rem.URL_API_EVENT_FORWARD, '_handle_post_api_event_forward'),
('DELETE', rem.URL_API_EVENT_FORWARD,
'_handle_delete_api_event_forward'),
# Statis files
('GET', re.compile(r'/static/(?P<file>[a-zA-Z\._\-0-9/]+)'),
'_handle_get_static')
]
use_json = False
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
if url.path.startswith('/api/'):
self.use_json = True
# Read query input
data = parse_qs(url.query)
# parse_qs gives a list for each value, take the latest element
for key in data:
data[key] = data[key][-1]
# Did we get post input ?
content_length = int(self.headers.get('Content-Length', 0))
if content_length:
body_content = self.rfile.read(content_length).decode("UTF-8")
if self.use_json:
try:
data.update(json.loads(body_content))
except ValueError:
_LOGGER.exception("Exception parsing JSON: %s",
body_content)
self._message(
"Error parsing JSON", HTTP_UNPROCESSABLE_ENTITY)
return
else:
data.update({key: value[-1] for key, value in
parse_qs(body_content).items()})
api_password = self.headers.get(rem.AUTH_HEADER)
if not api_password and 'api_password' in data:
api_password = data['api_password']
if '_METHOD' in data:
method = data.pop('_METHOD')
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
# Check every handler to find matching result
for t_method, t_path, t_handler in RequestHandler.PATHS:
# we either do string-comparison or regular expression matching
# pylint: disable=maybe-no-member
if isinstance(t_path, str):
path_match = url.path == t_path
else:
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = getattr(self, t_handler)
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# For API calls we need a valid password
if self.use_json and api_password != self.server.api_password:
self._message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
else:
handle_request_method(path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
else:
self.send_response(HTTP_NOT_FOUND)
def do_HEAD(self): # pylint: disable=invalid-name
""" HEAD request handler. """
self._handle_request('HEAD')
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def do_PUT(self): # pylint: disable=invalid-name
""" PUT request handler. """
self._handle_request('PUT')
def do_DELETE(self): # pylint: disable=invalid-name
""" DELETE request handler. """
self._handle_request('DELETE')
# pylint: disable=unused-argument
def _handle_get_root(self, path_match, data):
""" Renders the debug interface. """
write = lambda txt: self.wfile.write((txt + "\n").encode("UTF-8"))
self.send_response(HTTP_OK)
self.send_header('Content-type', 'text/html; charset=utf-8')
self.end_headers()
if self.server.development:
app_url = "polymer/splash-login.html"
else:
app_url = "frontend-{}.html".format(frontend.VERSION)
write(("<!doctype html>"
"<html>"
"<head><title>Home Assistant</title>"
"<meta name='mobile-web-app-capable' content='yes'>"
"<link rel='shortcut icon' href='/static/favicon.ico' />"
"<link rel='icon' type='image/png' "
" href='/static/favicon-192x192.png' sizes='192x192'>"
"<meta name='viewport' content='width=device-width, "
" user-scalable=no, initial-scale=1.0, "
" minimum-scale=1.0, maximum-scale=1.0' />"
"<meta name='theme-color' content='#03a9f4'>"
"</head>"
"<body fullbleed>"
"<h3 id='init' align='center'>Initializing Home Assistant</h3>"
"<script"
" src='/static/webcomponents.min.js'></script>"
"<link rel='import' href='/static/{}' />"
"<splash-login auth='{}'></splash-login>"
"</body></html>").format(app_url, data.get('api_password', '')))
# pylint: disable=unused-argument
def _handle_get_api(self, path_match, data):
""" Renders the debug interface. """
self._message("API running.")
# pylint: disable=unused-argument
def _handle_get_api_states(self, path_match, data):
""" Returns a dict containing all entity ids and their state. """
self._write_json(self.server.hass.states.all())
# pylint: disable=unused-argument
def _handle_get_api_states_entity(self, path_match, data):
""" Returns the state of a specific entity. """
entity_id = path_match.group('entity_id')
state = self.server.hass.states.get(entity_id)
if state:
self._write_json(state)
else:
self._message("State does not exist.", HTTP_NOT_FOUND)
def _handle_post_state_entity(self, path_match, data):
""" Handles updating the state of an entity.
This handles the following paths:
/api/states/<entity_id>
"""
entity_id = path_match.group('entity_id')
try:
new_state = data['state']
except KeyError:
self._message("state not specified", HTTP_BAD_REQUEST)
return
attributes = data['attributes'] if 'attributes' in data else None
is_new_state = self.server.hass.states.get(entity_id) is None
# Write state
self.server.hass.states.set(entity_id, new_state, attributes)
# Return state if json, else redirect to main page
if self.use_json:
state = self.server.hass.states.get(entity_id)
status_code = HTTP_CREATED if is_new_state else HTTP_OK
self._write_json(
state.as_dict(),
status_code=status_code,
location=rem.URL_API_STATES_ENTITY.format(entity_id))
else:
self._message(
"State of {} changed to {}".format(entity_id, new_state))
def _handle_get_api_events(self, path_match, data):
""" Handles getting overview of event listeners. """
self._write_json([{"event": key, "listener_count": value}
for key, value
in self.server.hass.bus.listeners.items()])
def _handle_api_post_events_event(self, path_match, event_data):
""" Handles firing of an event.
This handles the following paths:
/api/events/<event_type>
Events from /api are threated as remote events.
"""
event_type = path_match.group('event_type')
if event_data is not None and not isinstance(event_data, dict):
self._message("event_data should be an object",
HTTP_UNPROCESSABLE_ENTITY)
event_origin = ha.EventOrigin.remote
# Special case handling for event STATE_CHANGED
# We will try to convert state dicts back to State objects
if event_type == ha.EVENT_STATE_CHANGED and event_data:
for key in ('old_state', 'new_state'):
state = ha.State.from_dict(event_data.get(key))
if state:
event_data[key] = state
self.server.hass.bus.fire(event_type, event_data, event_origin)
self._message("Event {} fired.".format(event_type))
def _handle_get_api_services(self, path_match, data):
""" Handles getting overview of services. """
self._write_json(
[{"domain": key, "services": value}
for key, value
in self.server.hass.services.services.items()])
# pylint: disable=invalid-name
def _handle_post_api_services_domain_service(self, path_match, data):
""" Handles calling a service.
This handles the following paths:
/api/services/<domain>/<service>
"""
domain = path_match.group('domain')
service = path_match.group('service')
self.server.hass.call_service(domain, service, data)
self._message("Service {}/{} called.".format(domain, service))
# pylint: disable=invalid-name
def _handle_post_api_event_forward(self, path_match, data):
""" Handles adding an event forwarding target. """
try:
host = data['host']
api_password = data['api_password']
except KeyError:
self._message("No host or api_password received.",
HTTP_BAD_REQUEST)
return
try:
port = int(data['port']) if 'port' in data else None
except ValueError:
self._message(
"Invalid value received for port", HTTP_UNPROCESSABLE_ENTITY)
return
if self.server.event_forwarder is None:
self.server.event_forwarder = \
rem.EventForwarder(self.server.hass)
api = rem.API(host, api_password, port)
self.server.event_forwarder.connect(api)
self._message("Event forwarding setup.")
def _handle_delete_api_event_forward(self, path_match, data):
""" Handles deleting an event forwarding target. """
try:
host = data['host']
except KeyError:
self._message("No host received.",
HTTP_BAD_REQUEST)
return
try:
port = int(data['port']) if 'port' in data else None
except ValueError:
self._message(
"Invalid value received for port", HTTP_UNPROCESSABLE_ENTITY)
return
if self.server.event_forwarder is not None:
api = rem.API(host, None, port)
self.server.event_forwarder.disconnect(api)
self._message("Event forwarding cancelled.")
def _handle_get_static(self, path_match, data):
""" Returns a static file. """
req_file = util.sanitize_path(path_match.group('file'))
# Strip md5 hash out of frontend filename
if re.match(r'^frontend-[A-Za-z0-9]{32}\.html$', req_file):
req_file = "frontend.html"
path = os.path.join(os.path.dirname(__file__), 'www_static', req_file)
inp = None
try:
inp = open(path, 'rb')
do_gzip = 'gzip' in self.headers.get('accept-encoding', '')
self.send_response(HTTP_OK)
ctype = self.guess_type(path)
self.send_header("Content-Type", ctype)
# Add cache if not development
if not self.server.development:
# 1 year in seconds
cache_time = 365 * 86400
self.send_header(
"Cache-Control", "public, max-age={}".format(cache_time))
self.send_header(
"Expires", self.date_time_string(time.time()+cache_time))
if do_gzip:
gzip_data = gzip.compress(inp.read())
self.send_header("Content-Encoding", "gzip")
self.send_header("Vary", "Accept-Encoding")
self.send_header("Content-Length", str(len(gzip_data)))
else:
fs = os.fstat(inp.fileno())
self.send_header("Content-Length", str(fs[6]))
self.end_headers()
if do_gzip:
self.wfile.write(gzip_data)
else:
self.copyfile(inp, self.wfile)
except IOError:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
finally:
if inp:
inp.close()
def _message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
if self.use_json:
self._write_json({'message': message}, status_code=status_code)
else:
self.send_error(status_code, message)
def _redirect(self, location):
""" Helper method to redirect caller. """
self.send_response(HTTP_MOVED_PERMANENTLY)
self.send_header(
"Location", "{}?api_password={}".format(
location, self.server.api_password))
self.end_headers()
def _write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header('Content-type', 'application/json')
if location:
self.send_header('Location', location)
self.end_headers()
if data is not None:
self.wfile.write(
json.dumps(data, indent=4, sort_keys=True,
cls=rem.JSONEncoder).encode("UTF-8"))
| {
"content_hash": "4a8d22db1cf1da52a1d01335cc40cfd4",
"timestamp": "",
"source": "github",
"line_count": 624,
"max_line_length": 79,
"avg_line_length": 32.25801282051282,
"alnum_prop": 0.5758855382781062,
"repo_name": "JMSwag/home-assistant",
"id": "0072e5c2f3817849ed1ed9a669fd808b63eeaa7f",
"size": "20129",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "homeassistant/components/http/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "195582"
},
{
"name": "Shell",
"bytes": "888"
}
],
"symlink_target": ""
} |
"""
MSC 3B truncate 1024.
"""
from parlai.core.build_data import built, download_models, get_model_dir
import os
import os.path
def download(datapath):
ddir = os.path.join(get_model_dir(datapath), 'fits')
model_type = 'director_seeker_module'
version = 'v0.1'
if not built(os.path.join(ddir, model_type), version):
opt = {'datapath': datapath, 'model_type': model_type}
fnames = [f'model_{version}.tar.gz']
download_models(opt, fnames, 'fits', version=version, use_model_type=True)
| {
"content_hash": "55e2cc77d97d240cafe45c32162e1759",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 82,
"avg_line_length": 32.75,
"alnum_prop": 0.666030534351145,
"repo_name": "facebookresearch/ParlAI",
"id": "fc2540f212329bb675a833784bdf335698dada57",
"size": "723",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "parlai/zoo/fits/director_seeker_module.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "2000"
},
{
"name": "CSS",
"bytes": "38474"
},
{
"name": "Cuda",
"bytes": "4118"
},
{
"name": "Dockerfile",
"bytes": "1218"
},
{
"name": "HTML",
"bytes": "645771"
},
{
"name": "JavaScript",
"bytes": "405110"
},
{
"name": "Makefile",
"bytes": "289"
},
{
"name": "Python",
"bytes": "6802410"
},
{
"name": "Shell",
"bytes": "26147"
}
],
"symlink_target": ""
} |
from twisted.python import log
from twisted.internet.defer import Deferred
from twisted.internet import defer, stdio
from twisted.protocols import basic
from twisted.internet.protocol import Protocol
from twisted.web import server, resource
from twisted.web.http_headers import Headers
from twisted.internet import reactor
from twisted.internet.endpoints import TCP4ClientEndpoint
from txsocksx.http import SOCKS5Agent
import sys, tty, termios
import base64
import StringIO
import wave
import cyclone.httpclient
import cyclone.jsonrpc
import curses, time, traceback, sys
import curses.wrapper
import display
import argparse
import yaml
import tor
import rec
import socks
import socket
from zope.interface import implements
from twisted.internet.defer import succeed
from twisted.web.iweb import IBodyProducer
import pyaudio
p = pyaudio.PyAudio()
SHORT_NORMALIZE = (1.0/32768.0)
chunk = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 1
RATE = 16000
swidth = 2
Max_Seconds = 10
TimeoutSignal = ((RATE / chunk * Max_Seconds) + 2)
silence = True
FileNameTmp = '/tmp/hello.wav'
Time = 0
class StringProducer(object):
implements(IBodyProducer)
def __init__(self, body):
self.body = body
self.length = len(body)
def startProducing(self, consumer):
consumer.write(self.body)
return succeed(None)
def pauseProducing(self):
pass
def stopProducing(self):
pass
class Sender():
def __init__(self, cfg):
self.cfg = cfg
self.peer = None
self.screen = None
self.agent = None
def ensure_agent(self):
if self.agent is None:
torServerEndpoint = TCP4ClientEndpoint(reactor, '127.0.0.1', self.cfg['tor_proxy_port'])
self.agent = SOCKS5Agent(reactor, proxyEndpoint=torServerEndpoint)
def alert(self, data):
self.ensure_agent()
self.screen.addLine("Alerting peer.")
url = "http://{}/alert".format(self.peer)
log.err("peer url is {}".format(url))
d = self.agent.request('POST',
url,
Headers({"content-type": ["application/octet-stream"]}),
StringProducer(data))
def cb(res):
self.screen.addLine("Peer alert POST result {}".format(res.code))
log.err("got a result from POST: {}".format(res.code))
def ecb(res):
self.screen.addLine("Peer alert POST resuled in error {}".format(res))
log.err("got a error from POST: {}".format(res))
d.addCallback(cb)
d.addErrback(ecb)
def send(self, data):
self.ensure_agent()
url = "http://{}/voice".format(self.peer)
self.screen.addLine("Going to attempt to send recording to {}".format(url))
d = self.agent.request('POST',
url,
Headers({"content-type": ["application/octet-stream"]}),
StringProducer(data))
self.wf = None
self.df = None
def cb(res):
self.screen.addLine("Peer POST result {}".format(res.code))
log.err("got a result from POST: {}".format(res.code))
def ecb(res):
self.screen.addLine("Peer POST resuled in error {}".format(res))
log.err("got a error from POST: {}".format(res))
d.addCallback(cb)
d.addErrback(ecb)
class AlertHandler(cyclone.web.RequestHandler):
def post(self):
req = self.request
data = req.body
self.application.screen.addLine("Received message from peer: {}".format(data))
class Player():
def __init__(self):
self.messages = []
self.am_playing = False
def add_message(self, data):
self.messages.append(data)
if not self.am_playing:
self.play()
def play(self):
if len(self.messages):
data = self.messages.pop()
else:
return
df = StringIO.StringIO()
df.write(data)
df.seek(0)
wf = wave.open(df, 'rb')
def cb(in_data, frame_count, time_info, status):
data = wf.readframes(frame_count)
if not data:
self.am_playing = False
self.play()
return (data, pyaudio.paContinue)
self.am_playing = True
stream = p.open(format = FORMAT,
channels = CHANNELS,
rate = RATE,
output = True,
frames_per_buffer = chunk,
stream_callback = cb)
class VoiceHandler(cyclone.web.RequestHandler):
def initialize(self, player):
self.player = player
def post(self):
req = self.request
self.application.screen.addLine("Received {} bytes from peer.".format(len(req.body)))
self.player.add_message(req.body)
def main():
try:
log.startLogging(open('./rec.log', 'w'))
parser = argparse.ArgumentParser()
parser.add_argument("--config", dest="config", nargs=1)
parser.add_argument("peer")
args = parser.parse_args()
conffile = open(args.config[0],'r')
cfg = yaml.load(conffile)
roster_raw = yaml.load(open("roster.yml"))
roster = { r['name']: {'onion': r['onion']} for r in roster_raw['peers'] }
if args.peer not in roster:
msg = "Could not find peer {}, cannot start".format(args.peer)
print msg
log.err(msg)
sys.exit()
# tor it up
host, port = cfg['bind'].split(':')
if cfg['disable_ths'] is False:
onion_host = tor.start_hidden_service(cfg, port, log)
# proxy
socks.setdefaultproxy(socks.PROXY_TYPE_SOCKS5, "127.0.0.1", 9150, True)
socket.socket = socks.socksocket
# audio recorder
recorder = rec.Rec()
recorder.log = log
# audio player
player = Player()
# sender, sends data to peer
sender = Sender(cfg)
peer_onion = roster[args.peer]['onion']
sender.peer = peer_onion
recorder.sender = sender
# screen
stdscr = curses.initscr() # initialize curses
screen = display.Screen(stdscr, recorder) # create Screen object
stdscr.refresh()
recorder.screen = screen
sender.screen = screen
reactor.addReader(screen) # add screen object as a reader to the reactor
# http application
application = cyclone.web.Application([
(r"/voice", VoiceHandler, dict(player=player)),
(r"/alert", AlertHandler)
])
application.screen = screen
reactor.listenTCP(int(port), application)
reactor.run()
finally:
#restore_term()
log.err("In finally handler.")
if __name__ == '__main__':
main()
| {
"content_hash": "9c6f20b96c6c7168c776c4fb74ab4b2a",
"timestamp": "",
"source": "github",
"line_count": 259,
"max_line_length": 100,
"avg_line_length": 27.594594594594593,
"alnum_prop": 0.5670910871694417,
"repo_name": "joshuathayer/laggy",
"id": "c2caa3430a1b558eca34ad9eac287310949e6093",
"size": "7166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "laggy.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14643"
}
],
"symlink_target": ""
} |
import doctest
import os
import re
import sys
import pytest
from unittest.mock import DEFAULT, patch
from space_tracer import main
from space_tracer.main import replace_input, TraceRunner, analyze
from test_report_builder import ReportTestCase
EXAMPLE_DRIVER_PATH = os.path.join(os.path.dirname(__file__),
'example_driver.py')
EXAMPLE_SOURCE_PATH = os.path.join(os.path.dirname(__file__),
'example_source.py')
EXAMPLE_PRINTING_PATH = os.path.join(os.path.dirname(__file__),
'example_printing.py')
EXAMPLE_LIB_PATH = os.path.join(os.path.dirname(__file__),
'example_package',
'lib_in_package.py')
EXAMPLE_PATCHING_DRIVER_PATH = os.path.join(os.path.dirname(__file__),
'example_patching_driver.py')
EXAMPLE_DRIVER_SYNTAX_ERROR_PATH = os.path.join(os.path.dirname(__file__),
'example_driver_syntax_error.py')
EXAMPLE_PYCHARM_FAILURES_PATH = os.path.join(os.path.dirname(__file__),
'example_pycharm_failures.py')
EXAMPLE_SILENT_DRIVER_PATH = os.path.join(os.path.dirname(__file__),
'example_silent_driver.py')
patch.multiple = patch.multiple # Avoids PyCharm warnings.
@pytest.fixture
def stdin():
with patch('sys.stdin') as mocked:
yield mocked
@pytest.fixture
def stdout(capsys):
class MockIO(object):
@staticmethod
def getvalue():
return capsys.readouterr().out
yield MockIO()
@pytest.fixture
def stderr(capsys):
class MockIO(object):
@staticmethod
def getvalue():
return capsys.readouterr().err
yield MockIO()
@pytest.fixture
def argv():
mocked = []
with patch('sys.argv', mocked):
yield mocked
class CodeTracerMainTest(ReportTestCase):
def setUp(self):
super(CodeTracerMainTest, self).setUp()
self.maxDiff = None
for module_name in ('example_source',
'example_package',
'example_package.__main__',
'example_package.lib_in_package',
'example_driver'):
if module_name in sys.modules:
del sys.modules[module_name]
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', 'foo.py'])
def test_main(self, stdin, stdout):
code = """\
i = 1
name = __name__
"""
expected_report = """\
i = 1
name = '__main__' """
stdin.read.return_value = code
main()
self.assertReportEqual(expected_report,
stdout.write.call_args_list[0][0][0])
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_indent', '4',
'--live',
EXAMPLE_SOURCE_PATH])
def test_source_file_arg(self, stdin, stdout):
expected_report = """\
def foo(x): | x = 3
return x + 1 | return 4
|
|
def bar(bucket): |
bucket.add('bar') |
|
|
if __name__ == '__live_coding__': |
y = foo(3) | y = 4
"""
stdin.read.return_value = ""
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH])
def test_driver(self, stdin, stdout):
source = """\
def foo(x):
name = __name__
return x + 1
"""
expected_report = """\
x = 42
name = 'example_source'
return 43
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH,
'99'])
def test_driver_args(self, stdin, stdout):
source = """\
import sys
def foo(x):
return sys.argv[1:]
"""
expected_report = """\
x = 42
return ['99']
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', '__live_coding__',
'--live',
EXAMPLE_SOURCE_PATH,
'99'])
def test_args_no_driver(self, stdin, stdout):
source = """\
import sys
x = sys.argv[1:]
"""
expected_report = """\
x = ['99']
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
'-m',
'example_driver',
'99'])
def test_driver_module(self, stdin, stdout):
source = """\
import sys
def foo(x):
return sys.argv[1:]
"""
expected_report = """\
x = 42
return ['99']
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_LIB_PATH,
'--traced', 'example_package.lib_in_package',
'-m',
'example_driver'])
def test_lib_in_package(self, stdin, stdout):
source = """\
def add_message(s):
package = __package__
return s + ' Received'
"""
expected_report = """\
s = 'from driver'
package = 'example_package'
return 'from driver Received'
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
'-m',
'example_package.driver_in_package'])
def test_driver_in_package(self, stdin, stdout):
source = """\
def foo(x):
return 42
"""
expected_report = """\
x = 'from driver in package'
return 42
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
# noinspection DuplicatedCode
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH,
'fail',
'badly'])
def test_driver_fails(self, stdin, stdout):
source = """\
foo = 'Hello, World!'
"""
expected_report = """\
foo = 'Hello, World!' | ---------------------------------------------------- |
| Traceback (most recent call last): |
| File "path/example_driver.py", line 6, in <module> |
| assert 'fail' not in sys.argv, sys.argv[1:] |
| AssertionError: ['fail', 'badly'] |
| ---------------------------------------------------- |
"""
stdin.read.return_value = source
with self.assertRaises(SystemExit):
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(expected_report, report)
# noinspection DuplicatedCode
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source.foo',
EXAMPLE_DRIVER_PATH,
'fail',
'badly'])
def test_traced_does_not_hide_error(self, stdin, stdout):
source = """\
def bar():
# This would normally
# be hidden because of
# --traced, but the start
# is shown so you can see
# the error.
return 42
def foo(x):
# This is shown, as normal.
return x
foo(1)
"""
expected_report = """\
def bar(): | ---------------------------------------------------- |
# This would normally | Traceback (most recent call last): |
# be hidden because of | File "path/example_driver.py", line 6, in <module> |
# --traced, but the start | assert 'fail' not in sys.argv, sys.argv[1:] |
# is shown so you can see | AssertionError: ['fail', 'badly'] |
# the error. | ---------------------------------------------------- |
def foo(x): | x = 1
# This is shown, as normal. |
return x | return 1"""
stdin.read.return_value = source
with self.assertRaises(SystemExit):
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
assert report == expected_report
# noinspection DuplicatedCode
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_PYCHARM_FAILURES_PATH])
def test_driver_pycharm_failures(self, stdin, stdout):
""" PyCharm's Pytest wrapper reports failures, but doesn't set exit code.
Look for === FAILURES === report.
"""
source = """\
foo = 'Hello, World!'
"""
expected_report = """\
foo = 'Hello, World!' | ------------------------- |
| Pytest reported failures. |
| ------------------------- |
"""
stdin.read.return_value = source
with self.assertRaises(SystemExit):
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(expected_report, report)
# noinspection DuplicatedCode
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--traced_file', 'foo.py',
'bogus_driver.py'])
def test_unknown_driver(self, stdin, stdout):
source = """\
s = 'Yo!'
"""
expected_report = """\
FileNotFoundError: [Errno 2] No such file or directory: 'bogus_driver.py' |
"""
stdin.read.return_value = source
with self.assertRaises(SystemExit):
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', 'different_source.py',
'--traced', 'different_source',
EXAMPLE_DRIVER_PATH])
def test_bad_driver(self, stdin, stdout):
source = """\
def foo(x):
name = __name__
return x + 1
BAR = 'baz'
"""
expected_report = """\
----------------------------------------------------------------------------------- |
example_driver.py doesn't call the different_source module. Try a different driver. |
----------------------------------------------------------------------------------- |
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_PATCHING_DRIVER_PATH])
def test_driver_imports_first(self, stdin, stdout):
source = """\
# This will raise a TypeError, unless we patch the sum() function before
# importing this module. example_patching_driver.py does the patch, so
# it has to be imported before this module.
start = sum([1, 2, "3"])
def foo(x):
return x + start
"""
expected_report = """\
start = 99
x = 10
return 109
"""
stdin.read.return_value = source
try:
main()
except SystemExit:
pass
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--bad_driver', "Run config 'example' is bad, try something else.",
'--source_width', '0',
'--traced_file', 'different_source.py',
'--traced', 'different_source',
EXAMPLE_DRIVER_PATH])
def test_bad_driver_message(self, stdin, stdout):
source = """\
def foo(x):
name = __name__
return x + 1
BAR = 'baz'
"""
expected_report = """\
------------------------------------------------ |
Run config 'example' is bad, try something else. |
------------------------------------------------ |
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', 'foo.py',
'--traced', 'foo',
'-m',
'unittest',
'foo'])
def test_unittest_driver_passes(self, stdin, stdout):
source = """\
from unittest import TestCase
def get_foo(x):
return x + 5
class FooTest(TestCase):
def test_get_foo(self):
y = get_foo(10)
self.assertEqual(15, y)
"""
expected_report = """\
x = 10
return 15
y = 15
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', 'foo.py',
'--traced', 'foo',
'-m',
'unittest',
'foo'])
def test_unittest_driver_fails(self, stdin, stdout):
source = """\
from unittest import TestCase
def get_foo(x):
return x + 500
class FooTest(TestCase):
def test_get_foo(self):
y = get_foo(10)
self.fail(y)
"""
expected_report = """\
---------------- |
SystemExit: True |
---------------- | | x = 10
| return 510
y = 510
AssertionError: 510
"""
if sys.version_info < (3, 0):
expected_report = expected_report.replace('(failures=1)',
'FAIL ')
stdin.read.return_value = source
with self.assertRaises(SystemExit) as ctx:
main()
# noinspection PyUnresolvedReferences
self.assertEqual(1, ctx.exception.code)
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(report, expected_report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
'-m',
'unittest',
'example_silent_driver'])
def test_silent_driver(self, stdin, stdout):
""" Driver calls code, but doesn't generate messages. """
source = """\
def bar(bucket):
bucket.add('bar')
"""
expected_report = """\
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
report = self.trim_exception(report)
expected_report = self.trim_exception(expected_report)
self.assertReportEqual(expected_report, report)
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', 'foo.py',
'--traced', 'foo',
'-m',
'doctest',
'foo.py'])
def test_doctest_driver_fails(self, stdin, stdout):
source = """\
def get_foo(x):
''' Example for doctest.
>>> get_foo(42)
942
'''
return x + 500
"""
expected_report = """\
------------------------------------------------ |
SystemExit: 1 |
------------------------------------------------ |
x = 42
return 542
"""
stdin.read.return_value = source
stdout.encoding = None
with self.assertRaises(SystemExit):
main()
report = stdout.write.call_args_list[0][0][0]
expected_report = self.trim_exception(expected_report)
report = self.trim_exception(report)
self.assertReportEqual(expected_report, report)
@staticmethod
def trim_exception(report):
report = re.sub(r"([ -])+\| *$", "", report, flags=re.MULTILINE)
report = re.sub(r"line \d+", "line 9999", report)
report = report.replace("IOError", "FileNotFoundError")
report = report.replace('path/example_driver.py', EXAMPLE_DRIVER_PATH)
report = report.replace('path/doctest.py',
str(doctest.__file__).strip('c'))
return report
@patch.multiple('sys', stdin=DEFAULT, stdout=DEFAULT, argv=[
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
'-m',
'example_package'])
def test_driver_package(self, stdin, stdout):
source = """\
def foo(x):
return 42
"""
expected_report = """\
x = 'from package __main__.py'
return 42
"""
stdin.read.return_value = source
main()
report = stdout.write.call_args_list[0][0][0]
self.assertReportEqual(expected_report, report)
def test_dunder_file(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
EXAMPLE_SOURCE_PATH])
source = """\
import os
filename = os.path.basename(__file__)
"""
expected_report = """\
filename = 'example_source.py'
"""
stdin.read.return_value = source
main()
assert expected_report == stdout.getvalue()
def test_dunder_file_for_module(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced', 'example_source',
'--traced_file', EXAMPLE_SOURCE_PATH,
'-m', 'example_source'])
source = """\
import os
filename = os.path.basename(__file__)
"""
expected_report = """\
filename = 'example_source.py'
"""
stdin.read.return_value = source
main()
assert expected_report == stdout.getvalue()
def test_canvas_main(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--canvas'])
source = """\
from turtle import *
forward(100)
"""
expected_report = """\
start_canvas
create_line
400
300
500
300
fill='black'
pensize=1
end_canvas
.
"""
stdin.read.return_value = source
main()
assert stdout.getvalue() == expected_report
def test_canvas_error(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--canvas'])
source = """\
1/0
"""
stdin.read.return_value = source
with pytest.raises(SystemExit):
main()
report = stdout.getvalue()
report_lines = report.splitlines()
end = report_lines.index('end_canvas')
assert report_lines[end-1] == " text='ZeroDivisionError: division by zero'"
def test_canvas_syntax_error(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--canvas'])
source = """\
x = 1
y = 2
"""
stdin.read.return_value = source
with pytest.raises(SystemExit):
main()
report = stdout.getvalue()
report_lines = report.splitlines()
end = report_lines.index('end_canvas')
assert report_lines[end-1] == " text='IndentationError: unexpected indent'"
def test_exception_with_driver(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH])
source = """\
def foo(x):
exit('Bad stuff.')
"""
expected_report = """\
---------------------- | x = 42
SystemExit: Bad stuff. | SystemExit: Bad stuff.
---------------------- |
"""
stdin.read.return_value = source
with pytest.raises(SystemExit) as ctx:
main()
assert expected_report == stdout.getvalue()
assert ctx.value.code == 1
def test_exit_return_code(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH])
source = """\
def foo(x):
exit(x)
"""
expected_report = """\
-------------- | x = 42
SystemExit: 42 | SystemExit: 42
-------------- |
"""
stdin.read.return_value = source
with pytest.raises(SystemExit) as ctx:
main()
assert expected_report == stdout.getvalue()
assert ctx.value.code == 42
def test_syntax_error(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
EXAMPLE_SOURCE_PATH])
source = """\
def missing_body():
"""
if sys.version_info < (3, 9, 0):
expected_report = """\
SyntaxError: unexpected EOF while parsing
"""
elif sys.version_info < (3, 10, 0):
expected_report = """\
IndentationError: expected an indented block
"""
else:
expected_report = """\
IndentationError: expected an indented block after function definition on line 1
"""
stdin.read.return_value = source
with pytest.raises(SystemExit):
main()
assert expected_report == stdout.getvalue()
def test_driver_syntax_error(stdin, stdout, argv):
argv.extend([
'dummy.py',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_SYNTAX_ERROR_PATH])
source = """\
x = 'Hello, World!'
"""
expected_report = """\
{} line 4: SyntaxError: invalid syntax
""".format(EXAMPLE_DRIVER_SYNTAX_ERROR_PATH)
stdin.read.return_value = source
with pytest.raises(SystemExit):
main()
assert expected_report == stdout.getvalue()
def test_trace_default_module(stdout, argv):
argv.extend([
'dummy.py',
EXAMPLE_PRINTING_PATH])
expected_report = """\
from __future__ import print_function |
|
|
def custom_print(text, suffix): | text = 'Hello, example' | suffix = '!'
print(text + suffix) | print('Hello, example!')
|
|
if __name__ == '__main__': |
custom_print('Hello, example', '!') |
"""
main()
assert expected_report == stdout.getvalue()
def test_dump_whole_file(stdout, argv):
argv.extend([
'dummy.py',
'--traced', '__main__',
EXAMPLE_PRINTING_PATH])
expected_report = """\
from __future__ import print_function |
|
|
def custom_print(text, suffix): | text = 'Hello, example' | suffix = '!'
print(text + suffix) | print('Hello, example!')
|
|
if __name__ == '__main__': |
custom_print('Hello, example', '!') |
"""
main()
assert expected_report == stdout.getvalue()
def test_traced(stdout, argv):
argv.extend([
'dummy.py',
'--traced', '__main__.custom_print',
EXAMPLE_PRINTING_PATH])
expected_report = """\
def custom_print(text, suffix): | text = 'Hello, example' | suffix = '!'
print(text + suffix) | print('Hello, example!')
"""
main()
assert expected_report == stdout.getvalue()
def test_no_driver(capsys):
expected_error = ('space_tracer: error: one of the following arguments '
'are required: driver or traced_file')
with pytest.raises(SystemExit):
TraceRunner().trace_command(['space_tracer'])
error = capsys.readouterr().err.splitlines()[-1]
assert error == expected_error
def test_main_from_stdin(stdin):
stdin.read.return_value = """\
print(40+2)
"""
expected_report = """\
print(40+2) | print('42')"""
report = TraceRunner().trace_command(['space_tracer', '-'])
assert report == expected_report
def test_traced_file_without_traced():
code = '''\
def foo(n):
return n + 20
'''
expected_report = '''\
n = 42
return 62'''
with replace_input(code):
report = TraceRunner().trace_command([
'space_tracer',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
EXAMPLE_DRIVER_PATH])
assert report == expected_report
def test_bad_driver_for_traced_file_without_traced():
code = '''\
def foo(n):
return n + 20
'''
expected_report = '''\
--------------------------------------------------------------------------- |
example_driver.py doesn't call example_printing.py. Try a different driver. |
--------------------------------------------------------------------------- |
'''
with replace_input(code):
report = TraceRunner().trace_command([
'space_tracer',
'--traced_file', EXAMPLE_PRINTING_PATH,
EXAMPLE_DRIVER_PATH])
assert report == expected_report
def test_traced_driver_environment():
code = '''\
try:
sys.exit("Sys exists, but it wasn't imported!")
except NameError:
pass
sys = None # Would mess up driver script if they shared module name spaces.
def foo(x):
return x + 20
'''
expected_report = '''\
NameError: name 'sys' is not defined
sys = None
x = 42
return 62'''
with replace_input(code):
report = TraceRunner().trace_command([
'space_tracer',
'--source_width', '0',
'--traced_file', EXAMPLE_SOURCE_PATH,
'--traced', 'example_source',
EXAMPLE_DRIVER_PATH])
assert report == expected_report
def test_top_level_error():
code = '''\
exit('Failed')
'''
expected_report = '''\
SystemExit: Failed'''
with replace_input(code):
report = TraceRunner().trace_command([
'space_tracer',
'--source_width', '0',
'--traced_file', EXAMPLE_DRIVER_PATH,
'--traced=__main__',
EXAMPLE_DRIVER_PATH])
assert report == expected_report
def test_trace_child_package():
expected_report = """\
def add_message(s): | s = 'from driver'
return s + ' received' | return 'from driver received'"""
report = TraceRunner().trace_command([
'space_tracer',
'--traced=example_package.lib_in_package.add_message',
EXAMPLE_DRIVER_PATH])
assert report == expected_report
def test_analyse():
code = """\
import turtle as t
x = 100
t.forward(x)
"""
expected_report = """\
x = 100
"""
report, stdout = analyze(code)
assert report == expected_report
assert stdout == ''
def test_analyse_canvas():
code = """\
import turtle as t
x = 100
t.forward(x)
"""
canvas_size = (100, 100)
expected_report = """\
start_canvas
create_line
50
50
150
50
fill='black'
pensize=1
end_canvas
.
x = 100
"""
report, stdout = analyze(code, canvas_size)
assert report == expected_report
assert stdout == ''
| {
"content_hash": "f937dff5a61f813481c44b87c431c244",
"timestamp": "",
"source": "github",
"line_count": 1149,
"max_line_length": 88,
"avg_line_length": 25.666666666666668,
"alnum_prop": 0.5314502729646333,
"repo_name": "donkirkby/live-py-plugin",
"id": "bf8d0f15e9733205a98dac7ad4a96609bb04f0ed",
"size": "29491",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "test/PySrc/tests/test_code_tracer_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "1996"
},
{
"name": "Emacs Lisp",
"bytes": "30322"
},
{
"name": "HTML",
"bytes": "2629"
},
{
"name": "Java",
"bytes": "131619"
},
{
"name": "JavaScript",
"bytes": "118014"
},
{
"name": "Python",
"bytes": "348902"
},
{
"name": "Shell",
"bytes": "66"
}
],
"symlink_target": ""
} |
"""This contains all local, site-specific configuration options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
# pylint: disable=unused-import
from grr_response_core.config.local import contexts
| {
"content_hash": "dcc18dbaa4304b69bbcd961cbb813fcd",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 67,
"avg_line_length": 37.714285714285715,
"alnum_prop": 0.7803030303030303,
"repo_name": "demonchild2112/travis-test",
"id": "d126dfb6c23f8ac992b91f36968583a99b47b4b8",
"size": "286",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "grr/core/grr_response_core/config/local/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Assembly",
"bytes": "227"
},
{
"name": "Batchfile",
"bytes": "3446"
},
{
"name": "C",
"bytes": "11321"
},
{
"name": "C++",
"bytes": "54535"
},
{
"name": "CSS",
"bytes": "35549"
},
{
"name": "Dockerfile",
"bytes": "1819"
},
{
"name": "HCL",
"bytes": "7208"
},
{
"name": "HTML",
"bytes": "190212"
},
{
"name": "JavaScript",
"bytes": "11691"
},
{
"name": "Jupyter Notebook",
"bytes": "199190"
},
{
"name": "Makefile",
"bytes": "3139"
},
{
"name": "PowerShell",
"bytes": "1984"
},
{
"name": "Python",
"bytes": "7213255"
},
{
"name": "Roff",
"bytes": "444"
},
{
"name": "Shell",
"bytes": "48882"
},
{
"name": "Standard ML",
"bytes": "8172"
},
{
"name": "TSQL",
"bytes": "51"
}
],
"symlink_target": ""
} |
r"""Minimal Flask application example for development with globus handler.
SPHINX-START
1. Register a Globus application at `https://developers.globus.org/` with the
`Redirect URL` as `http://localhost:5000/oauth/authorized/globus/`. See
here for more documentation:
`https://docs.globus.org/api/auth/developer-guide/#register-app`
2. Grab the *Client ID* and *Client Secret* after registering the application
and add them to your instance configuration as `consumer_key` and
`consumer_secret`.
.. code-block:: console
$ export GLOBUS_APP_CREDENTIALS_KEY=my_globus_client_id
$ export GLOBUS_APP_CREDENTIALS_SECRET=my_globus_client_secret
3. Create database and tables:
.. code-block:: console
$ cdvirtualenv src/invenio-oauthclient
$ pip install -e .[all]
$ cd examples
$ export FLASK_APP=globus_app.py
$ ./app-setup.sh
You can find the database in `examples/globus_app.db`.
4. Run the development server:
.. code-block:: console
$ flask run -p 5000 -h '0.0.0.0'
5. Open in a browser the page `http://localhost:5000/globus`.
You will be redirected to globus to authorize the application.
Click on `Allow` and you will be redirected back to
`http://localhost:5000/oauth/signup/globus/`, where you will be able to
finalize the local user registration.
6. To clean up and drop tables:
.. code-block:: console
$ ./app-teardown.sh
SPHINX-END
"""
import os
from flask import Flask, redirect, url_for
from flask_babelex import Babel
from flask_login import current_user
from flask_menu import Menu as FlaskMenu
from invenio_accounts import InvenioAccounts
from invenio_accounts.views import blueprint as blueprint_user
from invenio_db import InvenioDB
from invenio_mail import InvenioMail
from invenio_userprofiles import InvenioUserProfiles
from invenio_userprofiles.views import blueprint_ui_init as blueprint_userprofile_init
from invenio_oauthclient import InvenioOAuthClient
from invenio_oauthclient.contrib import globus
from invenio_oauthclient.views.client import blueprint as blueprint_client
from invenio_oauthclient.views.settings import blueprint as blueprint_settings
from invenio_oauthclient._compat import monkey_patch_werkzeug # noqa isort:skip
monkey_patch_werkzeug() # noqa isort:skip
from flask_oauthlib.client import OAuth as FlaskOAuth # noqa isort:skip
# [ Configure application credentials ]
GLOBUS_APP_CREDENTIALS = dict(
consumer_key=os.environ.get("GLOBUS_APP_CREDENTIALS_KEY"),
consumer_secret=os.environ.get("GLOBUS_APP_CREDENTIALS_SECRET"),
)
# Create Flask application
app = Flask(__name__)
app.config.update(
SQLALCHEMY_DATABASE_URI=os.environ.get(
"SQLALCHEMY_DATABASE_URI", "sqlite:///globus_app.db"
),
OAUTHCLIENT_REMOTE_APPS=dict(
globus=globus.REMOTE_APP,
),
GLOBUS_APP_CREDENTIALS=GLOBUS_APP_CREDENTIALS,
DEBUG=True,
SECRET_KEY="TEST",
SQLALCHEMY_ECHO=False,
SECURITY_PASSWORD_SALT="security-password-salt",
MAIL_SUPPRESS_SEND=True,
TESTING=True,
USERPROFILES_EXTEND_SECURITY_FORMS=True,
SQLALCHEMY_TRACK_MODIFICATIONS=False,
APP_THEME=["semantic-ui"],
THEME_ICONS={"semantic-ui": dict(link="linkify icon")},
)
Babel(app)
FlaskMenu(app)
InvenioDB(app)
InvenioAccounts(app)
InvenioUserProfiles(app)
FlaskOAuth(app)
InvenioOAuthClient(app)
InvenioMail(app)
app.register_blueprint(blueprint_user)
app.register_blueprint(blueprint_client)
app.register_blueprint(blueprint_settings)
app.register_blueprint(blueprint_userprofile_init)
@app.route("/")
def index():
"""Homepage."""
return "Home page (without any restrictions)"
@app.route("/globus")
def globus():
"""Try to print user email or redirect to login with globus."""
if not current_user.is_authenticated:
return redirect(url_for("invenio_oauthclient.login", remote_app="globus"))
return "hello {}".format(current_user.email)
| {
"content_hash": "d922a1a50feec6b49cfee95dee6d05f8",
"timestamp": "",
"source": "github",
"line_count": 134,
"max_line_length": 86,
"avg_line_length": 29.55223880597015,
"alnum_prop": 0.7381313131313131,
"repo_name": "inveniosoftware/invenio-oauthclient",
"id": "664fb1bca4abe37bb65ea47138d3f344b2aa00cd",
"size": "4207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/globus_app.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14813"
},
{
"name": "Python",
"bytes": "383168"
},
{
"name": "Shell",
"bytes": "1303"
}
],
"symlink_target": ""
} |
def can_build(env, platform):
return True
def configure(env):
pass
def get_doc_classes():
return [
"WebRTCPeer"
]
def get_doc_path():
return "doc_classes"
| {
"content_hash": "6f2fcf6337e6cc1fc5a5d8484286d6f7",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 29,
"avg_line_length": 14.307692307692308,
"alnum_prop": 0.6021505376344086,
"repo_name": "groud/godot",
"id": "5ed245bad28d52716ccee43fa229c2f8aa6860e4",
"size": "186",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "modules/webrtc/config.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C#",
"bytes": "294170"
},
{
"name": "C++",
"bytes": "22531001"
},
{
"name": "Java",
"bytes": "438099"
},
{
"name": "JavaScript",
"bytes": "18828"
},
{
"name": "Makefile",
"bytes": "451"
},
{
"name": "Objective-C",
"bytes": "2645"
},
{
"name": "Objective-C++",
"bytes": "191797"
},
{
"name": "Python",
"bytes": "354482"
},
{
"name": "Shell",
"bytes": "27053"
}
],
"symlink_target": ""
} |
from chibitest import TestCase, ok
from misaka import escape_html, Markdown, SaferHtmlRenderer
class EscapeHtmlTest(TestCase):
def test_escape_html(self):
ok(escape_html('a&<>"\'/')) == 'a&<>"'/'
def test_escape_html_slash(self):
ok(escape_html('a&<>"\'/', True)) == 'a&<>"'/'
render = Markdown(SaferHtmlRenderer())
render_escape = Markdown(SaferHtmlRenderer(sanitization_mode='escape'))
renderer_rewrite = SaferHtmlRenderer(
link_rewrite='//example.com/redirect/{url}',
img_src_rewrite='//img_proxy/{url}',
)
render_rewrite = Markdown(renderer_rewrite)
rewrite_url = renderer_rewrite.rewrite_url
class SaferHtmlRendererTest(TestCase):
def test_html_skip(self):
actual = render('Example <script>alert(1);</script>')
expected = '<p>Example alert(1);</p>\n'
ok(actual).diff(expected)
html = render('<sc<script>ript>xss</sc</script>ript>')
ok(html).not_contains('<sc')
ok(html).not_contains('ript>')
actual = render('<span><a href="javascript:xss">foo</a></span>')
expected = '<p>foo</p>\n'
ok(actual).diff(expected)
def test_html_escape(self):
supplied = 'Example <script>alert(1);</script>'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
html = render_escape('<sc<script>ript>xss</sc</script>ript>')
ok(html).not_contains('<sc')
ok(html).not_contains('ript>')
supplied = '<span><a href="javascript:xss">foo</a></span>'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_autolink_filtering_with_nice_data(self):
for url in ('http://a', "https://b?x&y"):
actual = render('<%s>' % url)
expected = '<p><a href="{0}">{0}</a></p>\n'.format(escape_html(url))
ok(actual).diff(expected)
supplied = "<alice@example.net>"
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_autolink_filtering_with_naughty_data(self):
actual = render('<javascript:foo>')
expected = '<p><javascript:foo></p>\n'
ok(actual).diff(expected)
url = 'javascript:0'
encoded_url = ''.join('&x{0:x};'.format(ord(c)) for c in url)
html = render('<%s>' % encoded_url)
ok(html).not_contains(url)
def test_link_filtering_with_nice_data(self):
for url in ('http://a', 'https://b'):
actual = render("['foo](%s \"bar'\")" % url)
expected = '<p><a href="{0}" title="bar'">'foo</a></p>\n'.format(url)
ok(actual).diff(expected)
def test_link_filtering_with_naughty_data(self):
supplied = '[foo](javascript:xss)'
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render(supplied)).diff(expected)
html = render('[foo](unknown:bar)')
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render(supplied)).diff(expected)
html = render('[" xss><xss>]("><xss>)')
ok(html).not_contains('<xss>')
ok(html).not_contains('" xss')
html = render('[" xss><xss>](https:"><xss>)')
ok(html).not_contains('<xss>')
ok(html).not_contains('" xss')
def test_image_src_filtering_with_nice_data(self):
actual = render('')
expected = '<p><img src="http:"foo"" /></p>\n'
ok(actual).diff(expected)
actual = render('')
expected = '<p><img src="https://example.org/" alt=""bar"" title="'title'" /></p>\n'
ok(actual).diff(expected)
def test_image_src_filtering_with_naughty_data(self):
actual = render('')
expected = '<p></p>\n'
ok(actual).diff(expected)
def test_autolink_rewriting(self):
for url in ('http://a', 'https://b?x&y'):
actual = render_rewrite('<%s>' % url)
expected = '<p><a href="%s">%s</a></p>\n'
expected %= (rewrite_url(url), escape_html(url))
ok(actual).diff(expected)
supplied = "<alice@example.net>"
expected = '<p>%s</p>\n' % escape_html(supplied)
ok(render_escape(supplied)).diff(expected)
def test_link_rewriting(self):
for url in ('http://a', 'https://b'):
actual = render_rewrite("['foo](%s \"bar'\")" % url)
expected = '<p><a href="%s" title="bar'">'foo</a></p>\n' % rewrite_url(url)
ok(actual).diff(expected)
def test_image_src_rewriting(self):
actual = render_rewrite('')
expected = '<p><img src="//img_proxy/http%3A%22foo%22" /></p>\n'
ok(actual).diff(expected)
| {
"content_hash": "b3edb3eb5f92a3c4211f1489195cb5bf",
"timestamp": "",
"source": "github",
"line_count": 126,
"max_line_length": 110,
"avg_line_length": 38.84920634920635,
"alnum_prop": 0.5662921348314607,
"repo_name": "hepochen/hoedown_misaka",
"id": "e4226682e49ce096d6697266d2c78975bab41bb0",
"size": "4920",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_xss_protection.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "149375"
},
{
"name": "HTML",
"bytes": "99727"
},
{
"name": "Python",
"bytes": "83281"
}
],
"symlink_target": ""
} |
r"""Runs a binary on the verilated system, which is expected to write
one of "PASS!\r\n" or "FAIL!\r\n" to UART to determine success or failure.
Failing to write either will result in a timeout.
This test requires some configuration options. Use the following steps to run
the test manually after building a Verilator simulation and the device boot ROM
and a device software target.
$ cd ${REPO_TOP}
$ pytest -s -v test/systemtest/functional_verilator_test.py \
--test_bin sw.elf \
--rom_bin boot_rom.elf \
--verilator_model build/lowrisc_systems_top_earlgrey_verilator_0.1/sim-verilator/Vtop_earlgrey_verilator
"""
import logging
import re
import pytest
import test_utils
logging.basicConfig(level=logging.DEBUG)
class TestFunctionalVerilator:
"""
Execute a test binary in a Verilator-simulated hardware build, using UART
output to validate test success or failure.
"""
@pytest.fixture
def sim_top_earlgrey(self, tmp_path, sim_top_build, sw_test_bin, rom_bin):
cmd_sim = [
str(sim_top_build),
'--meminit=flash,' + str(sw_test_bin),
'--meminit=rom,' + str(rom_bin)
]
p_sim = test_utils.Process(cmd_sim,
logdir=str(tmp_path),
cwd=str(tmp_path),
startup_done_expect='Simulation running',
startup_timeout=10)
p_sim.run()
yield p_sim
p_sim.terminate()
@pytest.mark.timeout(120)
def test_execute_binary(self, sim_top_earlgrey, uart_timeout, logfile):
"""
Executes the binary and inspects its UART for "PASS!\r\n" or "FAIL!\r\n".
"""
logger = logging.getLogger(__name__)
test_utils.setup_logfile(logger, logfile)
# Verilator will print the string "UART: created /dev/pts/#" to
# indicate which pseudoterminal the UART port is bound to.
uart_match = sim_top_earlgrey.find_in_output(
re.compile('UART: Created (/dev/pts/\\d+)'), 5)
assert uart_match is not None
uart_path = uart_match.group(1)
logger.info("Found UART port at %s." % uart_path)
# Now, open the UART device and read line by line until we pass or
# fail.
with open(uart_path, 'rb') as uart_device:
uart_fd = uart_device.fileno()
pattern = re.compile('.*?(PASS!\r\n|FAIL!\r\n)')
match = test_utils.stream_fd_to_log(uart_fd, logger, pattern,
uart_timeout)
assert match is not None, ('Deadline exceeded: did not see PASS! or FAIL! within %ds.' % uart_timeout)
assert match.group(1).strip() == 'PASS!'
| {
"content_hash": "30e189747ca267c3fa875c266214884b",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 114,
"avg_line_length": 36.84,
"alnum_prop": 0.6015200868621065,
"repo_name": "chipsalliance/Surelog",
"id": "4394279e1112c182a5b35241d61769661efab2ee",
"size": "2933",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "third_party/tests/Opentitan/test/systemtest/functional_verilator_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "155641"
},
{
"name": "C",
"bytes": "3114"
},
{
"name": "C++",
"bytes": "2808920"
},
{
"name": "CMake",
"bytes": "41750"
},
{
"name": "Forth",
"bytes": "81"
},
{
"name": "Makefile",
"bytes": "4820"
},
{
"name": "Nix",
"bytes": "784"
},
{
"name": "Python",
"bytes": "110922"
},
{
"name": "SWIG",
"bytes": "351"
},
{
"name": "Shell",
"bytes": "1349"
},
{
"name": "Slash",
"bytes": "37570"
},
{
"name": "SystemVerilog",
"bytes": "872314"
},
{
"name": "Tcl",
"bytes": "68865"
},
{
"name": "V",
"bytes": "1092"
},
{
"name": "Verilog",
"bytes": "495242"
}
],
"symlink_target": ""
} |
from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^admin/', include(admin.site.urls)),
url(r'^forum/', include('pybb.urls', namespace='pybb')),
url(r'^login/', 'auth.views.loginPrompt'),
url(r'^', include('cms.urls')),
)
if settings.DEBUG:
urlpatterns = patterns('',
url(r'^media/(?P<path>.*)$', 'django.views.static.serve',
{'document_root': settings.MEDIA_ROOT, 'show_indexes': True}),
url(r'', include('django.contrib.staticfiles.urls')),
) + urlpatterns
| {
"content_hash": "f11605a1f4732253cc28e07bdb52b260",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 70,
"avg_line_length": 32.578947368421055,
"alnum_prop": 0.6639741518578353,
"repo_name": "tsitra/ImgurFitWebsite",
"id": "0df0cfef83fc897abeff3af8c16ba45e519c9009",
"size": "619",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ImgurFitWebsite/urls.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "570984"
},
{
"name": "JavaScript",
"bytes": "22934"
},
{
"name": "Python",
"bytes": "38276"
},
{
"name": "Ruby",
"bytes": "262"
}
],
"symlink_target": ""
} |
import pytest
from pycroft.model.base import ModelBase
@pytest.mark.parametrize('value, expected', [
("Noun", "noun"),
("HTML", "html"),
("HTML40", "html40"),
("HTMLParser", "html_parser"),
("HTML40Parser", "html40_parser"),
("SafeHTML", "safe_html"),
("Version2CSV", "version2_csv"),
])
def test_snake_case_conversion(value, expected):
assert ModelBase._to_snake_case(value) == expected
| {
"content_hash": "087cd9680207fd6da3c2f255b67a1fac",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 54,
"avg_line_length": 26.4375,
"alnum_prop": 0.6359338061465721,
"repo_name": "agdsn/pycroft",
"id": "d3ce37d65166ef2c58d7595dbb2d05079c1cfcaa",
"size": "631",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/model/test_base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "10320"
},
{
"name": "Dockerfile",
"bytes": "3341"
},
{
"name": "HTML",
"bytes": "124781"
},
{
"name": "JavaScript",
"bytes": "74707"
},
{
"name": "Mako",
"bytes": "494"
},
{
"name": "Python",
"bytes": "1172012"
},
{
"name": "Shell",
"bytes": "13660"
},
{
"name": "TypeScript",
"bytes": "5231"
}
],
"symlink_target": ""
} |
from parsl.channels.ssh.ssh import SSHChannel
from parsl.channels.local.local import LocalChannel
from parsl.channels.ssh_il.ssh_il import SSHInteractiveLoginChannel
__all__ = ['SSHChannel', 'LocalChannel', 'SSHInteractiveLoginChannel']
| {
"content_hash": "58528a2a5fda113c04d428c0aa164110",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 70,
"avg_line_length": 47.6,
"alnum_prop": 0.8151260504201681,
"repo_name": "swift-lang/swift-e-lab",
"id": "8bad4d3ef9737e7ffb0faa0b90cf941dbba85c11",
"size": "238",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "parsl/channels/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "59197"
},
{
"name": "Python",
"bytes": "104539"
},
{
"name": "Shell",
"bytes": "1283"
}
],
"symlink_target": ""
} |
"""
Deprecated: This is included only to support the use of the old v1 client
class. It will be removed once v2 is at parity with v1. Do not use this for any
new functionality.
A simple JSON REST request abstraction layer that is used by the
``dropbox.client`` and ``dropbox.session`` modules. You shouldn't need to use
this.
"""
import io
import pkg_resources
import six
import socket
import ssl
import sys
import urllib
try:
import json
except ImportError:
import simplejson as json
try:
import urllib3
except ImportError:
raise ImportError('Dropbox python client requires urllib3.')
if six.PY3:
url_encode = urllib.parse.urlencode
else:
url_encode = urllib.urlencode
SDK_VERSION = "3.33"
TRUSTED_CERT_FILE = pkg_resources.resource_filename(__name__, 'trusted-certs.crt')
class RESTResponse(io.IOBase):
"""
Responses to requests can come in the form of ``RESTResponse``. These are
thin wrappers around the socket file descriptor.
:meth:`read()` and :meth:`close()` are implemented.
It is important to call :meth:`close()` to return the connection
back to the connection pool to be reused. If a connection
is not closed by the caller it may leak memory. The object makes a
best-effort attempt upon destruction to call :meth:`close()`,
but it's still best to explicitly call :meth:`close()`.
"""
def __init__(self, resp):
# arg: A urllib3.HTTPResponse object
self.urllib3_response = resp
self.status = resp.status
self.version = resp.version
self.reason = resp.reason
self.strict = resp.strict
self.is_closed = False
def __del__(self):
# Attempt to close when ref-count goes to zero.
self.close()
def __exit__(self, typ, value, traceback):
# Allow this to be used in "with" blocks.
self.close()
# -----------------
# Important methods
# -----------------
def read(self, amt=None):
"""
Read data off the underlying socket.
Parameters
amt
Amount of data to read. Defaults to ``None``, indicating to read
everything.
Returns
Data off the socket. If ``amt`` is not ``None``, at most ``amt`` bytes are returned.
An empty string when the socket has no data.
Raises
``ValueError``
If the ``RESTResponse`` has already been closed.
"""
if self.is_closed:
raise ValueError('Response already closed')
return self.urllib3_response.read(amt)
BLOCKSIZE = 4 * 1024 * 1024 # 4MB at a time just because
def close(self):
"""Closes the underlying socket."""
# Double closing is harmless
if self.is_closed:
return
# Mark as closed and release the connection (exactly once)
self.is_closed = True
self.urllib3_response.release_conn()
@property
def closed(self):
return self.is_closed
# ---------------------------------
# Backwards compat for HTTPResponse
# ---------------------------------
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
# Some compat functions showed up recently in urllib3
try:
urllib3.HTTPResponse.flush
urllib3.HTTPResponse.fileno
def fileno(self):
return self.urllib3_response.fileno()
def flush(self):
return self.urllib3_response.flush()
except AttributeError:
pass
def create_connection(address):
host, port = address
err = None
for res in socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket.socket(af, socktype, proto)
sock.connect(sa)
return sock
except socket.error as e:
err = e
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise socket.error("getaddrinfo returns an empty list")
def json_loadb(data):
if sys.version_info >= (3,):
data = data.decode('utf8')
return json.loads(data)
class RESTClientObject(object):
def __init__(self, max_reusable_connections=8, mock_urlopen=None):
"""
Parameters
max_reusable_connections
max connections to keep alive in the pool
mock_urlopen
an optional alternate urlopen function for testing
This class uses ``urllib3`` to maintain a pool of connections. We attempt
to grab an existing idle connection from the pool, otherwise we spin
up a new connection. Once a connection is closed, it is reinserted
into the pool (unless the pool is full).
SSL settings:
- Certificates validated using Dropbox-approved trusted root certs
- TLS v1.0 (newer TLS versions are not supported by urllib3)
- Default ciphersuites. Choosing ciphersuites is not supported by urllib3
- Hostname verification is provided by urllib3
"""
self.mock_urlopen = mock_urlopen
self.pool_manager = urllib3.PoolManager(
num_pools=4, # only a handful of hosts. api.dropbox.com, api-content.dropbox.com
maxsize=max_reusable_connections,
block=False,
timeout=60.0, # long enough so datastores await doesn't get interrupted
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=TRUSTED_CERT_FILE,
ssl_version=ssl.PROTOCOL_TLSv1,
)
def request(self, method, url, post_params=None, body=None, headers=None, raw_response=False,
is_json_request=False):
"""Performs a REST request. See :meth:`RESTClient.request()` for detailed description."""
headers = headers or {}
headers['User-Agent'] = 'OfficialDropboxPythonSDK/' + SDK_VERSION
if post_params is not None:
if body:
raise ValueError("body parameter cannot be used with post_params parameter")
if is_json_request:
body = json.dumps(post_params)
headers["Content-type"] = "application/json"
else:
body = params_to_urlencoded(post_params)
headers["Content-type"] = "application/x-www-form-urlencoded"
# Handle StringIO instances, because urllib3 doesn't.
if hasattr(body, 'getvalue'):
body = str(body.getvalue())
# Reject any headers containing newlines; the error from the server isn't pretty.
for key, value in headers.items():
if isinstance(value, six.string_types) and '\n' in value:
raise ValueError("headers should not contain newlines (%s: %s)" %
(key, value))
try:
# Grab a connection from the pool to make the request.
# We return it to the pool when caller close() the response
urlopen = self.mock_urlopen if self.mock_urlopen else self.pool_manager.urlopen
r = urlopen(
method=method,
url=url,
body=body,
headers=headers,
preload_content=False
)
r = RESTResponse(r) # wrap up the urllib3 response before proceeding
except socket.error as e:
raise RESTSocketError(url, e)
except urllib3.exceptions.SSLError as e:
raise RESTSocketError(url, "SSL certificate error: %s" % e)
if r.status not in (200, 206):
raise ErrorResponse(r, r.read())
return self.process_response(r, raw_response)
def process_response(self, r, raw_response):
if raw_response:
return r
else:
s = r.read()
try:
resp = json_loadb(s)
except ValueError:
raise ErrorResponse(r, s)
r.close()
return resp
def GET(self, url, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("GET", url, headers=headers, raw_response=raw_response)
def POST(self, url, params=None, headers=None, raw_response=False, is_json_request=False):
assert type(raw_response) == bool
return self.request("POST", url,
post_params=params, headers=headers, raw_response=raw_response,
is_json_request=is_json_request)
def PUT(self, url, body, headers=None, raw_response=False):
assert type(raw_response) == bool
return self.request("PUT", url, body=body, headers=headers, raw_response=raw_response)
class RESTClient(object):
"""
A class with all static methods to perform JSON REST requests that is used internally
by the Dropbox Client API. It provides just enough gear to make requests
and get responses as JSON data (when applicable). All requests happen over SSL.
"""
IMPL = RESTClientObject()
@classmethod
def request(cls, *n, **kw):
"""Perform a REST request and parse the response.
Parameters
method
An HTTP method (e.g. ``'GET'`` or ``'POST'``).
url
The URL to make a request to.
post_params
A dictionary of parameters to put in the body of the request.
This option may not be used if the body parameter is given.
body
The body of the request. Typically, this value will be a string.
It may also be a file-like object. The body
parameter may not be used with the post_params parameter.
headers
A dictionary of headers to send with the request.
raw_response
Whether to return a :class:`RESTResponse` object. Default ``False``.
It's best enabled for requests that return large amounts of data that you
would want to ``.read()`` incrementally rather than loading into memory. Also
use this for calls where you need to read metadata like status or headers,
or if the body is not JSON.
Returns
The JSON-decoded data from the server, unless ``raw_response`` is
set, in which case a :class:`RESTResponse` object is returned instead.
Raises
:class:`ErrorResponse`
The returned HTTP status is not 200, or the body was
not parsed from JSON successfully.
:class:`RESTSocketError`
A ``socket.error`` was raised while contacting Dropbox.
"""
return cls.IMPL.request(*n, **kw)
@classmethod
def GET(cls, *n, **kw):
"""Perform a GET request using :meth:`RESTClient.request()`."""
return cls.IMPL.GET(*n, **kw)
@classmethod
def POST(cls, *n, **kw):
"""Perform a POST request using :meth:`RESTClient.request()`."""
return cls.IMPL.POST(*n, **kw)
@classmethod
def PUT(cls, *n, **kw):
"""Perform a PUT request using :meth:`RESTClient.request()`."""
return cls.IMPL.PUT(*n, **kw)
class RESTSocketError(socket.error):
"""A light wrapper for ``socket.error`` that adds some more information."""
def __init__(self, host, e):
msg = "Error connecting to \"%s\": %s" % (host, str(e))
socket.error.__init__(self, msg)
# Dummy class for docstrings, see doco.py.
class _ErrorResponse__doc__(Exception):
"""Exception raised when :class:`DropboxClient` exeriences a problem.
For example, this is raised when the server returns an unexpected
non-200 HTTP response.
"""
_status__doc__ = "HTTP response status (an int)."
_reason__doc__ = "HTTP response reason (a string)."
_headers__doc__ = "HTTP response headers (a list of (header, value) tuples)."
_body__doc__ = "HTTP response body (string or JSON dict)."
_error_msg__doc__ = "Error message for developer (optional)."
_user_error_msg__doc__ = "Error message for end user (optional)."
class ErrorResponse(Exception):
"""
Raised by :meth:`RESTClient.request()` for requests that:
- Return a non-200 HTTP response, or
- Have a non-JSON response body, or
- Have a malformed/missing header in the response.
Most errors that Dropbox returns will have an error field that is unpacked and
placed on the ErrorResponse exception. In some situations, a user_error field
will also come back. Messages under user_error are worth showing to an end-user
of your app, while other errors are likely only useful for you as the developer.
"""
def __init__(self, http_resp, body):
"""
Parameters
http_resp
The :class:`RESTResponse` which errored
body
Body of the :class:`RESTResponse`.
The reason we can't simply call ``http_resp.read()`` to
get the body, is that ``read()`` is not idempotent.
Since it can't be called more than once,
we have to pass the string body in separately
"""
self.status = http_resp.status
self.reason = http_resp.reason
self.body = body
self.headers = http_resp.getheaders()
http_resp.close() # won't need this connection anymore
try:
self.body = json_loadb(self.body)
self.error_msg = self.body.get('error')
self.user_error_msg = self.body.get('user_error')
except ValueError:
self.error_msg = None
self.user_error_msg = None
def __str__(self):
if self.user_error_msg and self.user_error_msg != self.error_msg:
# one is translated and the other is English
msg = "%r (%r)" % (self.user_error_msg, self.error_msg)
elif self.error_msg:
msg = repr(self.error_msg)
elif not self.body:
msg = repr(self.reason)
else:
msg = "Error parsing response body or headers: " +\
"Body - %.100r Headers - %r" % (self.body, self.headers)
return "[%d] %s" % (self.status, msg)
def params_to_urlencoded(params):
"""
Returns a application/x-www-form-urlencoded 'str' representing the key/value pairs in 'params'.
Keys are values are str()'d before calling urllib.urlencode, with the exception of unicode
objects which are utf8-encoded.
"""
def encode(o):
if isinstance(o, six.text_type):
return o.encode('utf8')
else:
return str(o)
utf8_params = {encode(k): encode(v) for k, v in six.iteritems(params)}
return url_encode(utf8_params)
| {
"content_hash": "3bf503de17655f24a4d1a3b2368b6c5e",
"timestamp": "",
"source": "github",
"line_count": 422,
"max_line_length": 99,
"avg_line_length": 35.699052132701425,
"alnum_prop": 0.5994025887819449,
"repo_name": "ewjoachim/dropbox-sdk-python",
"id": "cf39fb51b452111ae4f15cfbb1e9e81a5be2cd79",
"size": "15065",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dropbox/rest.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "471500"
},
{
"name": "Shell",
"bytes": "260"
}
],
"symlink_target": ""
} |
__author__ = 'hujin'
import copy
from twisted.python import log
from twisted.internet.defer import succeed, fail
from dockerman.storage import Service
from dockerman.event import Event
class Manager(object):
def __init__(self, client, store, dispatcher):
"""
:type client: dockerman.docker.Client
:type store: dockerman.storage.ServiceStore
:type dispatcher: dockerman.event.Dispatcher
:return:
"""
self.client = client
self.store = store
self.dispatcher = dispatcher
def create_service(self, definition):
"""
Create a service
:param definition: The definition of the service to create
:return: twisted.internet.defer.Deferred
"""
service = Service(definition)
try:
self.store.validate_service(service)
except RuntimeError as e:
return fail(e)
if 'version' in definition:
del definition['version']
if 'attributes' in definition:
del definition['attributes']
d = self.client.create_container(**definition)
def success(result):
service['id'] = str(result['Id'])
self.store.add_service(service)
return service
d.addCallback(success)
return d
def start_service(self, _id):
"""
Start a service
:param _id: The id of the service
:return: twisted.internet.defer.Deferred
"""
service = self.store.find_by_id(_id)
if service is None:
return fail(RuntimeError('Service is not exists'))
d = self.client.start_container(_id)
def success(result):
self.store.start_service(service)
return result
d.addCallback(success)
return d
def stop_service(self, _id):
"""
Stop a service
:param _id: The id of the service
:return: twisted.internet.defer.Deferred
"""
service = self.store.find_by_id(_id)
if service is None:
return fail(RuntimeError('Service is not exists'))
d = self.client.stop_container(_id)
def success(result):
self.store.stop_service(service)
return result
d.addCallback(success)
return d
def resolve_port(self, ports):
if ports is None:
return None
for key, value in ports.items():
return key.split('/')[0]
return None
def get_service(self, sid):
service = self.store.find_by_id(sid)
if service is None:
return fail(RuntimeError('Service is not exists'))
service = copy.deepcopy(service)
d = self.client.inspect_container(sid)
def success(result):
service['network'] = {
'ip': result['NetworkSettings']['IPAddress'],
'port': self.resolve_port(result['NetworkSettings']['Ports'])
}
return service
d.addCallback(success)
return d
def handle_event(self, message):
if message['status'] not in ['start', 'die']:
return
service = self.store.find_by_id(str(message['id']))
if message['status'] == 'start':
self._on_container_start(service)
else:
self.store.stop_service(service)
self.dispatcher.dispatch(Event('service.stop', service))
def _on_container_start(self, service):
def success(result):
service['network'] = {
'ip': result['NetworkSettings']['IPAddress'],
'port': self.resolve_port(result['NetworkSettings']['Ports'])
}
self.store.start_service(service)
self.dispatcher.dispatch(Event('service.start', service))
d = self.client.inspect_container(service['id'])
d.addCallback(success)
| {
"content_hash": "143bd0ed534e7fe58b2ab6ae1993fdf2",
"timestamp": "",
"source": "github",
"line_count": 150,
"max_line_length": 77,
"avg_line_length": 26.28,
"alnum_prop": 0.5705225773718925,
"repo_name": "bixuehujin/dockerman",
"id": "b748f114beba7f629581f43dfd01c858e31e51ee",
"size": "3942",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dockerman/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "28209"
}
],
"symlink_target": ""
} |
from distutils import extension
from distutils import util
import errno
import os
import os.path
import pkg_resources
import platform
import re
import shlex
import shutil
import sys
import sysconfig
import setuptools
from setuptools.command import build_ext
# TODO(atash) add flag to disable Cython use
os.chdir(os.path.dirname(os.path.abspath(__file__)))
sys.path.insert(0, os.path.abspath('.'))
import protoc_lib_deps
import grpc_version
PY3 = sys.version_info.major == 3
# Environment variable to determine whether or not the Cython extension should
# *use* Cython or use the generated C files. Note that this requires the C files
# to have been generated by building first *with* Cython support.
BUILD_WITH_CYTHON = os.environ.get('GRPC_PYTHON_BUILD_WITH_CYTHON', False)
# There are some situations (like on Windows) where CC, CFLAGS, and LDFLAGS are
# entirely ignored/dropped/forgotten by distutils and its Cygwin/MinGW support.
# We use these environment variables to thus get around that without locking
# ourselves in w.r.t. the multitude of operating systems this ought to build on.
# We can also use these variables as a way to inject environment-specific
# compiler/linker flags. We assume GCC-like compilers and/or MinGW as a
# reasonable default.
EXTRA_ENV_COMPILE_ARGS = os.environ.get('GRPC_PYTHON_CFLAGS', None)
EXTRA_ENV_LINK_ARGS = os.environ.get('GRPC_PYTHON_LDFLAGS', None)
if EXTRA_ENV_COMPILE_ARGS is None:
EXTRA_ENV_COMPILE_ARGS = '-fno-wrapv -frtti -std=c++11'
if 'win32' in sys.platform:
# We use define flags here and don't directly add to DEFINE_MACROS below to
# ensure that the expert user/builder has a way of turning it off (via the
# envvars) without adding yet more GRPC-specific envvars.
# See https://sourceforge.net/p/mingw-w64/bugs/363/
if '32' in platform.architecture()[0]:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime32 -D_timeb=__timeb32 -D_ftime_s=_ftime32_s'
else:
EXTRA_ENV_COMPILE_ARGS += ' -D_ftime=_ftime64 -D_timeb=__timeb64'
if EXTRA_ENV_LINK_ARGS is None:
EXTRA_ENV_LINK_ARGS = '-lpthread'
if 'win32' in sys.platform:
# TODO(atash) check if this is actually safe to just import and call on
# non-Windows (to avoid breaking import style)
from distutils.cygwinccompiler import get_msvcr
msvcr = get_msvcr()[0]
EXTRA_ENV_LINK_ARGS += (
' -static-libgcc -static-libstdc++ -mcrtdll={msvcr} '
'-static'.format(msvcr=msvcr))
EXTRA_COMPILE_ARGS = shlex.split(EXTRA_ENV_COMPILE_ARGS)
EXTRA_LINK_ARGS = shlex.split(EXTRA_ENV_LINK_ARGS)
CC_FILES = [
os.path.normpath(cc_file) for cc_file in protoc_lib_deps.CC_FILES]
PROTO_FILES = [
os.path.normpath(proto_file) for proto_file in protoc_lib_deps.PROTO_FILES]
CC_INCLUDE = os.path.normpath(protoc_lib_deps.CC_INCLUDE)
PROTO_INCLUDE = os.path.normpath(protoc_lib_deps.PROTO_INCLUDE)
GRPC_PYTHON_TOOLS_PACKAGE = 'grpc.tools'
GRPC_PYTHON_PROTO_RESOURCES_NAME = '_proto'
DEFINE_MACROS = (('HAVE_PTHREAD', 1),)
if "win32" in sys.platform and '64bit' in platform.architecture()[0]:
DEFINE_MACROS += (('MS_WIN64', 1),)
# By default, Python3 distutils enforces compatibility of
# c plugins (.so files) with the OSX version Python3 was built with.
# For Python3.4, this is OSX 10.6, but we need Thread Local Support (__thread)
if 'darwin' in sys.platform and PY3:
mac_target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET')
if mac_target and (pkg_resources.parse_version(mac_target) <
pkg_resources.parse_version('10.9.0')):
os.environ['MACOSX_DEPLOYMENT_TARGET'] = '10.9'
os.environ['_PYTHON_HOST_PLATFORM'] = re.sub(
r'macosx-[0-9]+\.[0-9]+-(.+)',
r'macosx-10.9-\1',
util.get_platform())
def package_data():
tools_path = GRPC_PYTHON_TOOLS_PACKAGE.replace('.', os.path.sep)
proto_resources_path = os.path.join(tools_path,
GRPC_PYTHON_PROTO_RESOURCES_NAME)
proto_files = []
for proto_file in PROTO_FILES:
source = os.path.join(PROTO_INCLUDE, proto_file)
target = os.path.join(proto_resources_path, proto_file)
relative_target = os.path.join(GRPC_PYTHON_PROTO_RESOURCES_NAME, proto_file)
try:
os.makedirs(os.path.dirname(target))
except OSError as error:
if error.errno == errno.EEXIST:
pass
else:
raise
shutil.copy(source, target)
proto_files.append(relative_target)
return {GRPC_PYTHON_TOOLS_PACKAGE: proto_files}
def extension_modules():
if BUILD_WITH_CYTHON:
plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.pyx')]
else:
plugin_sources = [os.path.join('grpc', 'tools', '_protoc_compiler.cpp')]
plugin_sources += [
os.path.join('grpc', 'tools', 'main.cc'),
os.path.join('grpc_root', 'src', 'compiler', 'python_generator.cc')] + [
os.path.join(CC_INCLUDE, cc_file)
for cc_file in CC_FILES]
plugin_ext = extension.Extension(
name='grpc.tools._protoc_compiler',
sources=plugin_sources,
include_dirs=[
'.',
'grpc_root',
os.path.join('grpc_root', 'include'),
CC_INCLUDE,
],
language='c++',
define_macros=list(DEFINE_MACROS),
extra_compile_args=list(EXTRA_COMPILE_ARGS),
extra_link_args=list(EXTRA_LINK_ARGS),
)
extensions = [plugin_ext]
if BUILD_WITH_CYTHON:
from Cython import Build
return Build.cythonize(extensions)
else:
return extensions
setuptools.setup(
name='grpcio_tools',
version=grpc_version.VERSION,
license='3-clause BSD',
ext_modules=extension_modules(),
packages=setuptools.find_packages('.'),
namespace_packages=['grpc'],
install_requires=[
'protobuf>=3.0.0a3',
'grpcio>=0.15.0',
],
package_data=package_data(),
)
| {
"content_hash": "06b49a4367699139ff7124c23c9d26d5",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 93,
"avg_line_length": 37.08387096774194,
"alnum_prop": 0.6917188587334725,
"repo_name": "bogdandrutu/grpc",
"id": "bb1f1cf085b65e3bf06e08d51440fd7e3e37cc21",
"size": "7277",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tools/distrib/python/grpcio_tools/setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "27289"
},
{
"name": "C",
"bytes": "5553319"
},
{
"name": "C#",
"bytes": "1240799"
},
{
"name": "C++",
"bytes": "1781073"
},
{
"name": "CMake",
"bytes": "38252"
},
{
"name": "DTrace",
"bytes": "147"
},
{
"name": "JavaScript",
"bytes": "337671"
},
{
"name": "M4",
"bytes": "36871"
},
{
"name": "Makefile",
"bytes": "643720"
},
{
"name": "Objective-C",
"bytes": "291359"
},
{
"name": "PHP",
"bytes": "150515"
},
{
"name": "Protocol Buffer",
"bytes": "116289"
},
{
"name": "Python",
"bytes": "1139919"
},
{
"name": "Ruby",
"bytes": "573350"
},
{
"name": "Shell",
"bytes": "48311"
},
{
"name": "Swift",
"bytes": "5425"
}
],
"symlink_target": ""
} |
import abc
from typing import Dict, List, Any, Tuple, cast, Type, Iterator, Union
from cephlib.numeric_types import TimeSeries, DataSource
from cephlib.statistic import StatProps
from cephlib.istorage import IImagesStorage, Storable, ISensorStorage
from cephlib.node import NodeInfo
from cephlib.node_impl import IRPCNode
from .suits.job import JobConfig
class SuiteConfig(Storable):
"""
Test suite input configuration.
test_type - test type name
params - parameters from yaml file for this test
run_uuid - UUID to be used to create file names & Co
nodes - nodes to run tests on
remote_dir - directory on nodes to be used for local files
"""
__ignore_fields__ = ['nodes', 'run_uuid', 'remote_dir']
def __init__(self,
test_type: str,
params: Dict[str, Any],
run_uuid: str,
nodes: List[IRPCNode],
remote_dir: str,
idx: int,
keep_raw_files: bool) -> None:
self.test_type = test_type
self.params = params
self.run_uuid = run_uuid
self.nodes = nodes
self.nodes_ids = [node.node_id for node in nodes]
self.remote_dir = remote_dir
self.keep_raw_files = keep_raw_files
if 'load' in self.params:
self.storage_id = "{}_{}_{}".format(self.test_type, self.params['load'], idx)
else:
self.storage_id = "{}_{}".format(self.test_type, idx)
def __eq__(self, o: object) -> bool:
if type(o) is not self.__class__:
return False
other = cast(SuiteConfig, o)
return (self.test_type == other.test_type and
self.params == other.params and
set(self.nodes_ids) == set(other.nodes_ids))
# (node_name, source_dev, metric_name) => metric_results
JobMetrics = Dict[Tuple[str, str, str], TimeSeries]
JobStatMetrics = Dict[Tuple[str, str, str], StatProps]
class IWallyStorage(ISensorStorage, IImagesStorage, metaclass=abc.ABCMeta):
@abc.abstractmethod
def flush(self) -> None:
pass
@abc.abstractmethod
def put_or_check_suite(self, suite: SuiteConfig) -> None:
pass
@abc.abstractmethod
def put_job(self, suite: SuiteConfig, job: JobConfig) -> None:
pass
@abc.abstractmethod
def put_extra(self, data: bytes, source: DataSource) -> None:
pass
@abc.abstractmethod
def put_stat(self, data: StatProps, source: DataSource) -> None:
pass
@abc.abstractmethod
def get_stat(self, stat_cls: Type[StatProps], source: DataSource) -> StatProps:
pass
@abc.abstractmethod
def iter_suite(self, suite_type: str = None) -> Iterator[SuiteConfig]:
pass
@abc.abstractmethod
def iter_job(self, suite: SuiteConfig) -> Iterator[JobConfig]:
pass
# return path to file to be inserted into report
@abc.abstractmethod
def put_plot_file(self, data: bytes, source: DataSource) -> str:
pass
@abc.abstractmethod
def get_job_info(self, suite: SuiteConfig, job: JobConfig, key: str) -> Any:
pass
@abc.abstractmethod
def get_ts(self, ds: DataSource) -> TimeSeries:
pass
@abc.abstractmethod
def put_ts(self, ts: TimeSeries) -> None:
pass
@abc.abstractmethod
def iter_ts(self, **ds_parts) -> Iterator[DataSource]:
pass
@abc.abstractmethod
def put_job_info(self, suite: SuiteConfig, job: JobConfig, key: str, data: Any) -> None:
pass
@abc.abstractmethod
def load_nodes(self) -> List[NodeInfo]:
pass | {
"content_hash": "6d1cdd7c805d86d8e24dee763d8dcca5",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 92,
"avg_line_length": 29.48780487804878,
"alnum_prop": 0.6192445547284257,
"repo_name": "Mirantis/disk_perf_test_tool",
"id": "f2b84e6a5ec6eed4f902a4339769602b1addb064",
"size": "3627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "wally/result_classes.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "1916"
},
{
"name": "HTML",
"bytes": "25130"
},
{
"name": "JavaScript",
"bytes": "3474"
},
{
"name": "Makefile",
"bytes": "635"
},
{
"name": "Python",
"bytes": "370984"
},
{
"name": "Shell",
"bytes": "27277"
}
],
"symlink_target": ""
} |
"""
ReplicaExchangeSampler
======================
Derived multi-thermodynamic state multistate class with exchanging configurations between replicas
COPYRIGHT
Current version by Andrea Rizzi <andrea.rizzi@choderalab.org>, Levi N. Naden <levi.naden@choderalab.org> and
John D. Chodera <john.chodera@choderalab.org> while at Memorial Sloan Kettering Cancer Center.
Original version by John D. Chodera <jchodera@gmail.com> while at the University of
California Berkeley.
LICENSE
This code is licensed under the latest available version of the MIT License.
"""
# ==============================================================================
# GLOBAL IMPORTS
# ==============================================================================
import os
import math
import copy
import logging
import numpy as np
import mdtraj as md
from numba import njit
from openmmtools import multistate, utils
from openmmtools.multistate.multistateanalyzer import MultiStateSamplerAnalyzer
import mpiplus
logger = logging.getLogger(__name__)
# ==============================================================================
# REPLICA-EXCHANGE SIMULATION
# ==============================================================================
class ReplicaExchangeSampler(multistate.MultiStateSampler):
"""Replica-exchange simulation facility.
This MultiStateSampler class provides a general replica-exchange simulation facility,
allowing any set of thermodynamic states to be specified, along with a
set of initial positions to be assigned to the replicas in a round-robin
fashion.
No distinction is made between one-dimensional and multidimensional replica
layout. By default, the replica mixing scheme attempts to mix *all* replicas
to minimize slow diffusion normally found in multidimensional replica exchange
simulations (Modification of the 'replica_mixing_scheme' setting will allow
the traditional 'neighbor swaps only' scheme to be used.)
Stored configurations, energies, swaps, and restart information are all written
to a single output file using the platform portable, robust, and efficient
NetCDF4 library.
Parameters
----------
mcmc_moves : MCMCMove or list of MCMCMove, optional
The MCMCMove used to propagate the states. If a list of MCMCMoves,
they will be assigned to the correspondent thermodynamic state on
creation. If None is provided, Langevin dynamics with 2fm timestep, 5.0/ps collision rate,
and 500 steps per iteration will be used.
number_of_iterations : int or infinity, optional, default: 1
The number of iterations to perform. Both ``float('inf')`` and
``numpy.inf`` are accepted for infinity. If you set this to infinity,
be sure to set also ``online_analysis_interval``.
replica_mixing_scheme : 'swap-all', 'swap-neighbors' or None, Default: 'swap-all'
The scheme used to swap thermodynamic states between replicas.
online_analysis_interval : None or Int >= 1, optional, default None
Choose the interval at which to perform online analysis of the free energy.
After every interval, the simulation will be stopped and the free energy estimated.
If the error in the free energy estimate is at or below ``online_analysis_target_error``, then the simulation
will be considered completed.
online_analysis_target_error : float >= 0, optional, default 0.2
The target error for the online analysis measured in kT per phase.
Once the free energy is at or below this value, the phase will be considered complete.
If ``online_analysis_interval`` is None, this option does nothing.
online_analysis_minimum_iterations : int >= 0, optional, default 50
Set the minimum number of iterations which must pass before online analysis is carried out.
Since the initial samples likely not to yield a good estimate of free energy, save time and just skip them
If ``online_analysis_interval`` is None, this does nothing
Attributes
----------
n_replicas
iteration
mcmc_moves
sampler_states
metadata
is_completed
Examples
--------
Parallel tempering simulation of alanine dipeptide in implicit solvent (replica
exchange among temperatures). This is just an illustrative example; use :class:`ParallelTempering`
class for actual production parallel tempering simulations.
Create the system.
>>> import math
>>> from openmm import unit
>>> from openmmtools import testsystems, states, mcmc
>>> testsystem = testsystems.AlanineDipeptideImplicit()
>>> import os
>>> import tempfile
Create thermodynamic states for parallel tempering with exponentially-spaced schedule.
>>> n_replicas = 3 # Number of temperature replicas.
>>> T_min = 298.0 * unit.kelvin # Minimum temperature.
>>> T_max = 600.0 * unit.kelvin # Maximum temperature.
>>> temperatures = [T_min + (T_max - T_min) * (math.exp(float(i) / float(n_replicas-1)) - 1.0) / (math.e - 1.0)
... for i in range(n_replicas)]
>>> thermodynamic_states = [states.ThermodynamicState(system=testsystem.system, temperature=T)
... for T in temperatures]
Initialize simulation object with options. Run with a GHMC integrator.
>>> move = mcmc.GHMCMove(timestep=2.0*unit.femtoseconds, n_steps=50)
>>> simulation = ReplicaExchangeSampler(mcmc_moves=move, number_of_iterations=2)
Create simulation with its storage file (in a temporary directory) and run.
>>> storage_path = tempfile.NamedTemporaryFile(delete=False).name + '.nc'
>>> reporter = multistate.MultiStateReporter(storage_path, checkpoint_interval=1)
>>> simulation.create(thermodynamic_states=thermodynamic_states,
... sampler_states=states.SamplerState(testsystem.positions),
... storage=reporter)
Please cite the following:
<BLANKLINE>
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
<BLANKLINE>
>>> simulation.run() # This runs for a maximum of 2 iterations.
>>> simulation.iteration
2
>>> simulation.run(n_iterations=1)
>>> simulation.iteration
2
To resume a simulation from an existing storage file and extend it beyond
the original number of iterations.
>>> del simulation
>>> simulation = ReplicaExchangeSampler.from_storage(reporter)
Please cite the following:
<BLANKLINE>
Friedrichs MS, Eastman P, Vaidyanathan V, Houston M, LeGrand S, Beberg AL, Ensign DL, Bruns CM, and Pande VS. Accelerating molecular dynamic simulations on graphics processing unit. J. Comput. Chem. 30:864, 2009. DOI: 10.1002/jcc.21209
Eastman P and Pande VS. OpenMM: A hardware-independent framework for molecular simulations. Comput. Sci. Eng. 12:34, 2010. DOI: 10.1109/MCSE.2010.27
Eastman P and Pande VS. Efficient nonbonded interactions for molecular dynamics on a graphics processing unit. J. Comput. Chem. 31:1268, 2010. DOI: 10.1002/jcc.21413
Eastman P and Pande VS. Constant constraint matrix approximation: A robust, parallelizable constraint method for molecular simulations. J. Chem. Theor. Comput. 6:434, 2010. DOI: 10.1021/ct900463w
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
<BLANKLINE>
>>> simulation.extend(n_iterations=1)
>>> simulation.iteration
3
You can extract several information from the NetCDF file using the Reporter
class while the simulation is running. This reads the SamplerStates of every
run iteration.
>>> reporter = multistate.MultiStateReporter(storage=storage_path, open_mode='r', checkpoint_interval=1)
>>> sampler_states = reporter.read_sampler_states(iteration=1)
>>> len(sampler_states)
3
>>> sampler_states[0].positions.shape # Alanine dipeptide has 22 atoms.
(22, 3)
Clean up.
>>> os.remove(storage_path)
:param number_of_iterations: Maximum number of integer iterations that will be run
:param replica_mixing_scheme: Scheme which describes how replicas are exchanged each iteration as string
:param online_analysis_interval: How frequently to carry out online analysis in number of iterations
:param online_analysis_target_error: Target free energy difference error float at which simulation will be stopped during online analysis, in dimensionless energy
:param online_analysis_minimum_iterations: Minimum number of iterations needed before online analysis is run as int
"""
# -------------------------------------------------------------------------
# Constructors.
# -------------------------------------------------------------------------
def __init__(self, replica_mixing_scheme='swap-all', **kwargs):
# Initialize multi-state sampler simulation.
super(ReplicaExchangeSampler, self).__init__(**kwargs)
self.replica_mixing_scheme = replica_mixing_scheme
class _StoredProperty(multistate.MultiStateSampler._StoredProperty):
@staticmethod
def _repex_mixing_scheme_validator(instance, replica_mixing_scheme):
supported_schemes = ['swap-all', 'swap-neighbors', None]
if replica_mixing_scheme not in supported_schemes:
raise ValueError("Unknown replica mixing scheme '{}'. Supported values "
"are {}.".format(replica_mixing_scheme, supported_schemes))
if instance.locality is not None:
if replica_mixing_scheme not in ['swap-neighbors']:
raise ValueError("replica_mixing_scheme must be 'swap-neighbors' if locality is used")
return replica_mixing_scheme
replica_mixing_scheme = _StoredProperty('replica_mixing_scheme',
validate_function=_StoredProperty._repex_mixing_scheme_validator)
_TITLE_TEMPLATE = ('Replica-exchange sampler simulation created using ReplicaExchangeSampler class '
'of openmmtools.multistate on {}')
def _pre_write_create(self, thermodynamic_states, sampler_states, *args, **kwargs):
"""Overwrite parent implementation to make sure the number of
thermodynamic states is equal to the number of sampler states.
"""
# Make sure there are no more sampler states than thermodynamic states.
n_states = len(thermodynamic_states)
if len(sampler_states) > n_states:
raise ValueError('Passed {} SamplerStates but only {} ThermodynamicStates'.format(
len(sampler_states), n_states))
# Distribute sampler states to replicas in a round-robin fashion.
# The sampler states are deep-copied inside super()._pre_write_create().
sampler_states = [sampler_states[i % len(sampler_states)] for i in range(n_states)]
super()._pre_write_create(thermodynamic_states, sampler_states, *args, **kwargs)
@mpiplus.on_single_node(0, broadcast_result=True)
def _mix_replicas(self):
"""Attempt to swap replicas according to user-specified scheme."""
logger.debug("Mixing replicas...")
# Reset storage to keep track of swap attempts this iteration.
self._n_accepted_matrix[:, :] = 0
self._n_proposed_matrix[:, :] = 0
# Perform swap attempts according to requested scheme.
with utils.time_it('Mixing of replicas'):
if self.replica_mixing_scheme == 'swap-neighbors':
self._mix_neighboring_replicas()
elif self.replica_mixing_scheme == 'swap-all':
nswap_attempts = self.n_replicas**3
# Try to use numba-accelerated mixing code if possible,
# otherwise fall back to Python-accelerated code.
try:
self._mix_all_replicas_numba(
nswap_attempts, self.n_replicas,
self._replica_thermodynamic_states, self._energy_thermodynamic_states,
self._n_accepted_matrix, self._n_proposed_matrix
)
except (ValueError, ImportError) as e:
logger.warning(str(e))
self._mix_all_replicas(nswap_attempts)
else:
assert self.replica_mixing_scheme is None
# Determine fraction of swaps accepted this iteration.
n_swaps_proposed = self._n_proposed_matrix.sum()
n_swaps_accepted = self._n_accepted_matrix.sum()
swap_fraction_accepted = 0.0
if n_swaps_proposed > 0:
swap_fraction_accepted = n_swaps_accepted / n_swaps_proposed
logger.debug("Accepted {}/{} attempted swaps ({:.1f}%)".format(n_swaps_accepted, n_swaps_proposed,
swap_fraction_accepted * 100.0))
return self._replica_thermodynamic_states
@staticmethod
@njit
def _mix_all_replicas_numba(
nswap_attempts,
n_replicas, _replica_thermodynamic_states, _energy_thermodynamic_states,
_n_accepted_matrix, _n_proposed_matrix):
"""
numba-accelerated version of _mix_all_replicas()
All arguments must be passed during the function call because of numba jit limitations.
Parameters
----------
nswap_attempts : int
Number of swaps to attempt
n_replicas : int
Number of replicas
_replica_thermodynamic_states : array-like of int of shape [n_replicas]
_replica_thermodynamic_states[replica_index] is the thermodynamic state visited by that replica
_energy_thermodynamic_states : array-like of float of shape [n_replicas, n_replicas]
_energy_thermodynamic_states[replica_index,state_index] is the reduced potential of state ``state_index``
for replica ``replica_index``
_n_accepted_matrix : array-like of float of shape [n_replicas, n_replicas]
_n_accepted_matrix[from_state,to_state] is the number of accepted swaps
_n_proposed_matrix : array-like of float of shape [n_replicas, n_replicas]
_n_accepted_matrix[from_state,to_state] is the number of proposed swaps
"""
for swap_attempt in range(nswap_attempts):
# Choose random replicas uniformly to attempt to swap.
replica_i = np.random.randint(n_replicas)
replica_j = np.random.randint(n_replicas)
# Determine the thermodynamic states associated to these replicas.
thermodynamic_state_i = _replica_thermodynamic_states[replica_i]
thermodynamic_state_j = _replica_thermodynamic_states[replica_j]
# Compute log probability of swap.
energy_ij = _energy_thermodynamic_states[replica_i, thermodynamic_state_j]
energy_ji = _energy_thermodynamic_states[replica_j, thermodynamic_state_i]
energy_ii = _energy_thermodynamic_states[replica_i, thermodynamic_state_i]
energy_jj = _energy_thermodynamic_states[replica_j, thermodynamic_state_j]
log_p_accept = - (energy_ij + energy_ji) + energy_ii + energy_jj
# Record that this move has been proposed.
_n_proposed_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
_n_proposed_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
# Accept or reject.
if log_p_accept >= 0.0 or np.random.rand() < np.exp(log_p_accept):
# Swap states in replica slots i and j.
_replica_thermodynamic_states[replica_i] = thermodynamic_state_j
_replica_thermodynamic_states[replica_j] = thermodynamic_state_i
# Accumulate statistics.
_n_accepted_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
_n_accepted_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
def _mix_all_replicas(self, nswap_attempts=100):
"""Exchange all replicas with Python."""
# Determine number of swaps to attempt to ensure thorough mixing.
# TODO: Replace this with analytical result computed to guarantee sufficient mixing, or
# TODO: adjust it based on how many we can afford to do and not have mixing take a
# TODO: substantial fraction of iteration time.
logger.debug("Will attempt to swap all pairs of replicas, using a total of %d attempts." % nswap_attempts)
# Attempt swaps to mix replicas.
for swap_attempt in range(nswap_attempts):
# Choose random replicas uniformly to attempt to swap.
replica_i = np.random.randint(self.n_replicas)
replica_j = np.random.randint(self.n_replicas)
self._attempt_swap(replica_i, replica_j)
def _mix_neighboring_replicas(self):
"""Attempt exchanges between neighboring replicas only."""
logger.debug("Will attempt to swap only neighboring replicas.")
# TODO: Extend this to allow more remote swaps or more thorough mixing if locality > 1.
# Attempt swaps of pairs of replicas using traditional scheme (e.g. [0,1], [2,3], ...).
offset = np.random.randint(2) # Offset is 0 or 1.
for thermodynamic_state_i in range(offset, self.n_replicas-1, 2):
thermodynamic_state_j = thermodynamic_state_i + 1 # Neighboring state.
# Determine which replicas currently hold the thermodynamic states.
replica_i = np.where(self._replica_thermodynamic_states == thermodynamic_state_i)
replica_j = np.where(self._replica_thermodynamic_states == thermodynamic_state_j)
self._attempt_swap(replica_i, replica_j)
def _attempt_swap(self, replica_i, replica_j):
"""Attempt a single exchange between two replicas."""
# Determine the thermodynamic states associated to these replicas.
thermodynamic_state_i = self._replica_thermodynamic_states[replica_i]
thermodynamic_state_j = self._replica_thermodynamic_states[replica_j]
# Compute log probability of swap.
energy_ij = self._energy_thermodynamic_states[replica_i, thermodynamic_state_j]
energy_ji = self._energy_thermodynamic_states[replica_j, thermodynamic_state_i]
energy_ii = self._energy_thermodynamic_states[replica_i, thermodynamic_state_i]
energy_jj = self._energy_thermodynamic_states[replica_j, thermodynamic_state_j]
log_p_accept = - (energy_ij + energy_ji) + energy_ii + energy_jj
# Record that this move has been proposed.
self._n_proposed_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
self._n_proposed_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
# Accept or reject.
if log_p_accept >= 0.0 or np.random.rand() < math.exp(log_p_accept):
# Swap states in replica slots i and j.
self._replica_thermodynamic_states[replica_i] = thermodynamic_state_j
self._replica_thermodynamic_states[replica_j] = thermodynamic_state_i
# Accumulate statistics.
self._n_accepted_matrix[thermodynamic_state_i, thermodynamic_state_j] += 1
self._n_accepted_matrix[thermodynamic_state_j, thermodynamic_state_i] += 1
@mpiplus.on_single_node(rank=0, broadcast_result=False, sync_nodes=False)
def _display_citations(self, overwrite_global=False, citation_stack=None):
"""
Display papers to be cited.
The overwrite_golbal command will force the citation to display even if the "have_citations_been_shown" variable
is True
"""
gibbs_citations = """\
Chodera JD and Shirts MR. Replica exchange and expanded ensemble simulations as Gibbs multistate: Simple improvements for enhanced mixing. J. Chem. Phys., 135:194110, 2011. DOI:10.1063/1.3660669
"""
if self.replica_mixing_scheme == 'swap-all':
if citation_stack is None:
citation_stack = [gibbs_citations]
else:
citation_stack = [gibbs_citations] + citation_stack
super()._display_citations(overwrite_global=overwrite_global, citation_stack=citation_stack)
class ReplicaExchangeAnalyzer(MultiStateSamplerAnalyzer):
"""
The ReplicaExchangeAnalyzer is the analyzer for a simulation generated from a Replica Exchange sampler simulation,
implemented as an instance of the :class:`MultiStateSamplerAnalyzer`.
See Also
--------
PhaseAnalyzer
MultiStateSamplerAnalyzer
"""
pass
# ==============================================================================
# MAIN AND TESTS
# ==============================================================================
if __name__ == "__main__":
import doctest
doctest.testmod()
| {
"content_hash": "9502f3cf008d446b53486bd84a0f7899",
"timestamp": "",
"source": "github",
"line_count": 441,
"max_line_length": 247,
"avg_line_length": 49.87528344671202,
"alnum_prop": 0.6557854057740395,
"repo_name": "choderalab/openmmtools",
"id": "a1a23c34d8647837e61d56e6465d14dfff48cb70",
"size": "22206",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "openmmtools/multistate/replicaexchange.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "1898386"
},
{
"name": "Shell",
"bytes": "2331"
}
],
"symlink_target": ""
} |
class openssl:
name = "openssl 1.1.0c"
url = "https://www.openssl.org/source/openssl-1.1.0c.tar.gz"
dirname = "" # leave empty to auto guess
ffmpeg_opts = [ "--enable-openssl" ];
def skip(self, prefix, force):
if force: return False;
if file_exist(prefix + "/bin/openssl"): return True;
return False;
def configure(self, prefix):
runcmd("./config --prefix={} --openssldir={}/ssl".format(prefix, prefix)
+ " enable-heartbeats enable-weak-ssl-ciphers"
+ " enable-md2 enable-rc5 zlib");
def make(self, prefix, opts):
runcmd("make {}".format(opts));
def install(self, prefix):
runcmd("make install");
deps.append(openssl());
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
| {
"content_hash": "aaddb53d85449b154632bc572cd266d3",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 80,
"avg_line_length": 31.28,
"alnum_prop": 0.6086956521739131,
"repo_name": "chunying/ffmpeg3",
"id": "1a9d16cd7d25cc5727a4378f1892e1a4601bbabe",
"size": "783",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "module/unused/openssl.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "95"
},
{
"name": "Makefile",
"bytes": "74"
},
{
"name": "Python",
"bytes": "74890"
},
{
"name": "Shell",
"bytes": "1983"
}
],
"symlink_target": ""
} |
"""Module for managing nova instances for share drivers."""
import abc
import os
import socket
import time
import netaddr
from oslo_config import cfg
from oslo_log import log
from oslo_utils import importutils
import six
from manila.common import constants as const
from manila import compute
from manila import context
from manila import exception
from manila.i18n import _
from manila.i18n import _LW
from manila.network.linux import ip_lib
from manila.network.neutron import api as neutron
from manila import utils
LOG = log.getLogger(__name__)
NEUTRON_NAME = "neutron"
NOVA_NAME = "nova"
share_servers_handling_mode_opts = [
cfg.StrOpt(
"service_image_name",
default="manila-service-image",
help="Name of image in Glance, that will be used for service instance "
"creation."),
cfg.StrOpt(
"service_instance_name_template",
default="manila_service_instance_%s",
help="Name of service instance."),
cfg.StrOpt(
"manila_service_keypair_name",
default="manila-service",
help="Keypair name that will be created and used for service "
"instances."),
cfg.StrOpt(
"path_to_public_key",
default="~/.ssh/id_rsa.pub",
help="Path to hosts public key."),
cfg.StrOpt(
"service_instance_security_group",
default="manila-service",
help="Security group name, that will be used for "
"service instance creation."),
cfg.IntOpt(
"service_instance_flavor_id",
default=100,
help="ID of flavor, that will be used for service instance "
"creation."),
cfg.StrOpt(
"service_network_name",
default="manila_service_network",
help="Name of manila service network. Used only with Neutron."),
cfg.StrOpt(
"service_network_cidr",
default="10.254.0.0/16",
help="CIDR of manila service network. Used only with Neutron."),
cfg.IntOpt(
"service_network_division_mask",
default=28,
help="This mask is used for dividing service network into "
"subnets, IP capacity of subnet with this mask directly "
"defines possible amount of created service VMs "
"per tenant's subnet. Used only with Neutron."),
cfg.StrOpt(
"interface_driver",
default="manila.network.linux.interface.OVSInterfaceDriver",
help="Vif driver. Used only with Neutron."),
cfg.BoolOpt(
"connect_share_server_to_tenant_network",
default=False,
help="Attach share server directly to share network. "
"Used only with Neutron."),
cfg.StrOpt(
"service_instance_network_helper_type",
default=NEUTRON_NAME,
help="Allowed values are %s." % [NOVA_NAME, NEUTRON_NAME])
]
no_share_servers_handling_mode_opts = [
cfg.StrOpt(
"service_instance_name_or_id",
help="Name or ID of service instance in Nova to use for share "
"exports. Used only when share servers handling is disabled."),
cfg.StrOpt(
"service_net_name_or_ip",
help="Can be either name of network that is used by service "
"instance within Nova to get IP address or IP address itself "
"for managing shares there. "
"Used only when share servers handling is disabled."),
cfg.StrOpt(
"tenant_net_name_or_ip",
help="Can be either name of network that is used by service "
"instance within Nova to get IP address or IP address itself "
"for exporting shares. "
"Used only when share servers handling is disabled."),
]
common_opts = [
cfg.StrOpt(
"service_instance_user",
help="User in service instance that will be used for authentication."),
cfg.StrOpt(
"service_instance_password",
default=None,
secret=True,
help="Password for service instance user."),
cfg.StrOpt(
"path_to_private_key",
default="~/.ssh/id_rsa",
help="Path to host's private key."),
cfg.IntOpt(
"max_time_to_build_instance",
default=300,
help="Maximum time in seconds to wait for creating service instance."),
]
CONF = cfg.CONF
class ServiceInstanceManager(object):
"""Manages nova instances for various share drivers.
This class provides following external methods:
1. set_up_service_instance: creates instance and sets up share
infrastructure.
2. ensure_service_instance: ensure service instance is available.
3. delete_service_instance: removes service instance and network
infrastructure.
"""
_INSTANCE_CONNECTION_PROTO = "SSH"
def get_config_option(self, key):
"""Returns value of config option.
:param key: key of config' option.
:returns: str -- value of config's option.
first priority is driver's config,
second priority is global config.
"""
if self.driver_config:
return self.driver_config.safe_get(key)
return CONF.get(key)
def _get_network_helper(self):
network_helper_type = (
self.get_config_option(
"service_instance_network_helper_type").lower())
if network_helper_type == NEUTRON_NAME:
return NeutronNetworkHelper(self)
elif network_helper_type == NOVA_NAME:
return NovaNetworkHelper(self)
else:
raise exception.ManilaException(
_("Wrong value '%(provided)s' for config opt "
"'service_instance_network_helper_type'. "
"Allowed values are %(allowed)s.") % dict(
provided=network_helper_type,
allowed=[NOVA_NAME, NEUTRON_NAME]))
def __init__(self, driver_config=None):
super(ServiceInstanceManager, self).__init__()
self.driver_config = driver_config
if self.driver_config:
self.driver_config.append_config_values(common_opts)
if self.get_config_option("driver_handles_share_servers"):
self.driver_config.append_config_values(
share_servers_handling_mode_opts)
else:
self.driver_config.append_config_values(
no_share_servers_handling_mode_opts)
else:
CONF.register_opts(common_opts)
if self.get_config_option("driver_handles_share_servers"):
CONF.register_opts(share_servers_handling_mode_opts)
else:
CONF.register_opts(no_share_servers_handling_mode_opts)
if not self.get_config_option("service_instance_user"):
raise exception.ServiceInstanceException(
_('Service instance user is not specified.'))
self.admin_context = context.get_admin_context()
self._execute = utils.execute
self.compute_api = compute.API()
self.path_to_private_key = self.get_config_option(
"path_to_private_key")
self.max_time_to_build_instance = self.get_config_option(
"max_time_to_build_instance")
if self.get_config_option("driver_handles_share_servers"):
self.path_to_public_key = self.get_config_option(
"path_to_public_key")
self._network_helper = None
@property
@utils.synchronized("instantiate_network_helper")
def network_helper(self):
if not self._network_helper:
self._network_helper = self._get_network_helper()
self._network_helper.setup_connectivity_with_service_instances()
return self._network_helper
def get_common_server(self):
data = {
'public_address': None,
'private_address': None,
'service_net_name_or_ip': self.get_config_option(
'service_net_name_or_ip'),
'tenant_net_name_or_ip': self.get_config_option(
'tenant_net_name_or_ip'),
}
data['instance'] = self.compute_api.server_get_by_name_or_id(
self.admin_context,
self.get_config_option('service_instance_name_or_id'))
if netaddr.valid_ipv4(data['service_net_name_or_ip']):
data['private_address'] = [data['service_net_name_or_ip']]
else:
data['private_address'] = self._get_addresses_by_network_name(
data['service_net_name_or_ip'], data['instance'])
if netaddr.valid_ipv4(data['tenant_net_name_or_ip']):
data['public_address'] = [data['tenant_net_name_or_ip']]
else:
data['public_address'] = self._get_addresses_by_network_name(
data['tenant_net_name_or_ip'], data['instance'])
if not (data['public_address'] and data['private_address']):
raise exception.ManilaException(
"Can not find one of net addresses for service instance. "
"Instance: %(instance)s, "
"private_address: %(private_address)s, "
"public_address: %(public_address)s." % data)
share_server = {
'username': self.get_config_option('service_instance_user'),
'password': self.get_config_option('service_instance_password'),
'pk_path': self.path_to_private_key,
'instance_id': data['instance']['id'],
}
for key in ('private_address', 'public_address'):
data[key + '_v4'] = None
for address in data[key]:
if netaddr.valid_ipv4(address):
data[key + '_v4'] = address
break
share_server['ip'] = data['private_address_v4']
share_server['public_address'] = data['public_address_v4']
return {'backend_details': share_server}
def _get_addresses_by_network_name(self, net_name, server):
net_ips = []
if 'networks' in server and net_name in server['networks']:
net_ips = server['networks'][net_name]
elif 'addresses' in server and net_name in server['addresses']:
net_ips = [addr['addr'] for addr in server['addresses'][net_name]]
return net_ips
def _get_service_instance_name(self, share_server_id):
"""Returns service vms name."""
if self.driver_config:
# Make service instance name unique for multibackend installation
name = "%s_%s" % (self.driver_config.config_group, share_server_id)
else:
name = share_server_id
return self.get_config_option("service_instance_name_template") % name
def _get_server_ip(self, server, net_name):
"""Returns service IP address of service instance."""
net_ips = self._get_addresses_by_network_name(net_name, server)
if not net_ips:
msg = _("Failed to get service instance IP address. "
"Service network name is '%(net_name)s' "
"and provided data are '%(data)s'.")
msg = msg % {'net_name': net_name, 'data': six.text_type(server)}
raise exception.ServiceInstanceException(msg)
return net_ips[0]
@utils.synchronized(
"service_instance_get_or_create_security_group", external=True)
def _get_or_create_security_group(self, context, name=None,
description=None):
"""Get or create security group for service_instance.
:param context: context, that should be used
:param name: this is used for selection/creation of sec.group
:param description: this is used on sec.group creation step only
:returns: SecurityGroup -- security group instance from Nova
:raises: exception.ServiceInstanceException.
"""
name = name or self.get_config_option(
"service_instance_security_group")
if not name:
LOG.warning(_LW("Name for service instance security group is not "
"provided. Skipping security group step."))
return None
s_groups = [s for s in self.compute_api.security_group_list(context)
if s.name == name]
if not s_groups:
# Creating security group
if not description:
description = "This security group is intended "\
"to be used by share service."
LOG.debug("Creating security group with name '%s'.", name)
sg = self.compute_api.security_group_create(
context, name, description)
for protocol, ports in const.SERVICE_INSTANCE_SECGROUP_DATA:
self.compute_api.security_group_rule_create(
context,
parent_group_id=sg.id,
ip_protocol=protocol,
from_port=ports[0],
to_port=ports[1],
cidr="0.0.0.0/0",
)
elif len(s_groups) > 1:
msg = _("Ambiguous security_groups.")
raise exception.ServiceInstanceException(msg)
else:
sg = s_groups[0]
return sg
def ensure_service_instance(self, context, server):
"""Ensures that server exists and active."""
try:
inst = self.compute_api.server_get(self.admin_context,
server['instance_id'])
except exception.InstanceNotFound:
LOG.warning(_LW("Service instance %s does not exist."),
server['instance_id'])
return False
if inst['status'] == 'ACTIVE':
return self._check_server_availability(server)
return False
def _delete_server(self, context, server_id):
"""Deletes the server."""
try:
self.compute_api.server_get(context, server_id)
except exception.InstanceNotFound:
LOG.debug("Service instance '%s' was not found. "
"Nothing to delete, skipping.", server_id)
return
self.compute_api.server_delete(context, server_id)
t = time.time()
while time.time() - t < self.max_time_to_build_instance:
try:
self.compute_api.server_get(context, server_id)
except exception.InstanceNotFound:
LOG.debug("Service instance '%s' was deleted "
"successfully.", server_id)
break
time.sleep(2)
else:
raise exception.ServiceInstanceException(
_("Instance '%(id)s' has not been deleted in %(s)ss. "
"Giving up.") % {
'id': server_id, 's': self.max_time_to_build_instance})
def set_up_service_instance(self, context, network_info):
"""Finds or creates and sets up service vm.
:param context: defines context, that should be used
:param network_info: network info for getting allocations
:returns: dict with service instance details
:raises: exception.ServiceInstanceException
"""
instance_name = network_info['server_id']
server = self._create_service_instance(
context, instance_name, network_info)
instance_details = self._get_new_instance_details(server)
if not self._check_server_availability(instance_details):
raise exception.ServiceInstanceException(
_('%(conn_proto)s connection has not been '
'established to %(server)s in %(time)ss. Giving up.') % {
'conn_proto': self._INSTANCE_CONNECTION_PROTO,
'server': server['ip'],
'time': self.max_time_to_build_instance})
return instance_details
def _get_new_instance_details(self, server):
instance_details = {
'instance_id': server['id'],
'ip': server['ip'],
'pk_path': server.get('pk_path'),
'subnet_id': server.get('subnet_id'),
'password': self.get_config_option('service_instance_password'),
'username': self.get_config_option('service_instance_user'),
'public_address': server['public_address'],
'service_ip': server['service_ip'],
}
if server.get('router_id'):
instance_details['router_id'] = server['router_id']
if server.get('service_port_id'):
instance_details['service_port_id'] = server['service_port_id']
if server.get('public_port_id'):
instance_details['public_port_id'] = server['public_port_id']
for key in ('password', 'pk_path', 'subnet_id'):
if not instance_details[key]:
instance_details.pop(key)
return instance_details
@utils.synchronized("service_instance_get_key", external=True)
def _get_key(self, context):
"""Get ssh key.
:param context: defines context, that should be used
:returns: tuple with keypair name and path to private key.
"""
if not (self.path_to_public_key and self.path_to_private_key):
return (None, None)
path_to_public_key = os.path.expanduser(self.path_to_public_key)
path_to_private_key = os.path.expanduser(self.path_to_private_key)
if (not os.path.exists(path_to_public_key) or
not os.path.exists(path_to_private_key)):
return (None, None)
keypair_name = self.get_config_option("manila_service_keypair_name")
keypairs = [k for k in self.compute_api.keypair_list(context)
if k.name == keypair_name]
if len(keypairs) > 1:
raise exception.ServiceInstanceException(_('Ambiguous keypairs.'))
public_key, __ = self._execute('cat', path_to_public_key)
if not keypairs:
keypair = self.compute_api.keypair_import(
context, keypair_name, public_key)
else:
keypair = keypairs[0]
if keypair.public_key != public_key:
LOG.debug('Public key differs from existing keypair. '
'Creating new keypair.')
self.compute_api.keypair_delete(context, keypair.id)
keypair = self.compute_api.keypair_import(
context, keypair_name, public_key)
return keypair.name, path_to_private_key
def _get_service_image(self, context):
"""Returns ID of service image for service vm creating."""
service_image_name = self.get_config_option("service_image_name")
images = [image.id for image in self.compute_api.image_list(context)
if image.name == service_image_name]
if len(images) == 1:
return images[0]
elif not images:
raise exception.ServiceInstanceException(
_("Image with name '%s' not found.") % service_image_name)
else:
raise exception.ServiceInstanceException(
_("Found more than one image by name '%s'.") %
service_image_name)
def _create_service_instance(self, context, instance_name, network_info):
"""Creates service vm and sets up networking for it."""
service_image_id = self._get_service_image(context)
key_name, key_path = self._get_key(context)
if not (self.get_config_option("service_instance_password") or
key_name):
raise exception.ServiceInstanceException(
_('Neither service instance password nor key are available.'))
if not key_path:
LOG.warning(_LW(
'No key path is available. May be non-existent key path is '
'provided. Check path_to_private_key (current value '
'%(private_path)s) and path_to_public_key (current value '
'%(public_path)s) in manila configuration file.'), dict(
private_path=self.path_to_private_key,
public_path=self.path_to_public_key))
network_data = self.network_helper.setup_network(network_info)
fail_safe_data = dict(
router_id=network_data.get('router_id'),
subnet_id=network_data.get('subnet_id'))
if network_data.get('service_port'):
fail_safe_data['service_port_id'] = (
network_data['service_port']['id'])
if network_data.get('public_port'):
fail_safe_data['public_port_id'] = (
network_data['public_port']['id'])
try:
create_kwargs = self._get_service_instance_create_kwargs()
service_instance = self.compute_api.server_create(
context,
name=instance_name,
image=service_image_id,
flavor=self.get_config_option("service_instance_flavor_id"),
key_name=key_name,
nics=network_data['nics'],
availability_zone=CONF.storage_availability_zone,
**create_kwargs)
fail_safe_data['instance_id'] = service_instance['id']
service_instance = self.wait_for_instance_to_be_active(
service_instance['id'],
self.max_time_to_build_instance)
security_group = self._get_or_create_security_group(context)
if security_group:
if self.network_helper.NAME == NOVA_NAME:
# NOTE(vponomaryov): Nova-network allows to assign
# secgroups only by names.
sg_id = security_group.name
else:
sg_id = security_group.id
LOG.debug(
"Adding security group '%(sg)s' to server '%(si)s'.",
dict(sg=sg_id, si=service_instance["id"]))
self.compute_api.add_security_group_to_server(
context, service_instance["id"], sg_id)
if self.network_helper.NAME == NEUTRON_NAME:
service_instance['ip'] = self._get_server_ip(
service_instance,
self.get_config_option("service_network_name"))
public_ip = network_data.get(
'public_port', network_data['service_port'])['fixed_ips']
service_instance['public_address'] = public_ip[0]['ip_address']
else:
net_name = self.network_helper.get_network_name(network_info)
service_instance['ip'] = self._get_server_ip(
service_instance, net_name)
service_instance['public_address'] = service_instance['ip']
except Exception as e:
e.detail_data = {'server_details': fail_safe_data}
raise
service_instance.update(fail_safe_data)
service_instance['pk_path'] = key_path
for pair in [('router', 'router_id'), ('service_subnet', 'subnet_id')]:
if pair[0] in network_data and 'id' in network_data[pair[0]]:
service_instance[pair[1]] = network_data[pair[0]]['id']
service_instance['service_ip'] = network_data.get('service_ip')
return service_instance
def _get_service_instance_create_kwargs(self):
"""Specify extra arguments used when creating the service instance.
Classes inheriting the service instance manager can use this to easily
pass extra arguments such as user data or metadata.
"""
return {}
def _check_server_availability(self, instance_details):
t = time.time()
while time.time() - t < self.max_time_to_build_instance:
LOG.debug('Checking server availability.')
if not self._test_server_connection(instance_details):
time.sleep(5)
else:
return True
return False
def _test_server_connection(self, server):
try:
socket.socket().connect((server['ip'], 22))
LOG.debug('Server %s is available via SSH.',
server['ip'])
return True
except socket.error as e:
LOG.debug(e)
LOG.debug("Server %s is not available via SSH. Waiting...",
server['ip'])
return False
def delete_service_instance(self, context, server_details):
"""Removes share infrastructure.
Deletes service vm and subnet, associated to share network.
"""
instance_id = server_details.get("instance_id")
self._delete_server(context, instance_id)
self.network_helper.teardown_network(server_details)
def wait_for_instance_to_be_active(self, instance_id, timeout):
t = time.time()
while time.time() - t < timeout:
try:
service_instance = self.compute_api.server_get(
self.admin_context,
instance_id)
except exception.InstanceNotFound as e:
LOG.debug(e)
time.sleep(1)
continue
instance_status = service_instance['status']
# NOTE(vponomaryov): emptiness of 'networks' field checked as
# workaround for nova/neutron bug #1210483.
if (instance_status == 'ACTIVE' and
service_instance.get('networks', {})):
return service_instance
elif service_instance['status'] == 'ERROR':
break
LOG.debug("Waiting for instance %(instance_id)s to be active. "
"Current status: %(instance_status)s." %
dict(instance_id=instance_id,
instance_status=instance_status))
time.sleep(1)
raise exception.ServiceInstanceException(
_("Instance %(instance_id)s failed to reach active state "
"in %(timeout)s seconds. "
"Current status: %(instance_status)s.") %
dict(instance_id=instance_id,
timeout=timeout,
instance_status=instance_status))
def reboot_server(self, server, soft_reboot=False):
self.compute_api.server_reboot(self.admin_context,
server['instance_id'],
soft_reboot)
@six.add_metaclass(abc.ABCMeta)
class BaseNetworkhelper(object):
@abc.abstractproperty
def NAME(self):
"""Returns code name of network helper."""
@abc.abstractmethod
def __init__(self, service_instance_manager):
"""Instantiates class and its attrs."""
@abc.abstractmethod
def get_network_name(self, network_info):
"""Returns name of network for service instance."""
@abc.abstractmethod
def setup_connectivity_with_service_instances(self):
"""Sets up connectivity between Manila host and service instances."""
@abc.abstractmethod
def setup_network(self, network_info):
"""Sets up network for service instance."""
@abc.abstractmethod
def teardown_network(self, server_details):
"""Teardowns network resources provided for service instance."""
class NeutronNetworkHelper(BaseNetworkhelper):
def __init__(self, service_instance_manager):
self.get_config_option = service_instance_manager.get_config_option
self.vif_driver = importutils.import_class(
self.get_config_option("interface_driver"))()
if service_instance_manager.driver_config:
self._network_config_group = (
service_instance_manager.driver_config.network_config_group or
service_instance_manager.driver_config.config_group)
else:
self._network_config_group = None
self._neutron_api = None
self._service_network_id = None
self.connect_share_server_to_tenant_network = (
self.get_config_option('connect_share_server_to_tenant_network'))
@property
def NAME(self):
return NEUTRON_NAME
@property
def admin_project_id(self):
return self.neutron_api.admin_project_id
@property
@utils.synchronized("instantiate_neutron_api_neutron_net_helper")
def neutron_api(self):
if not self._neutron_api:
self._neutron_api = neutron.API(
config_group_name=self._network_config_group)
return self._neutron_api
@property
@utils.synchronized("service_network_id_neutron_net_helper")
def service_network_id(self):
if not self._service_network_id:
self._service_network_id = self._get_service_network_id()
return self._service_network_id
def get_network_name(self, network_info):
"""Returns name of network for service instance."""
net = self.neutron_api.get_network(network_info['neutron_net_id'])
return net['name']
@utils.synchronized("service_instance_get_service_network", external=True)
def _get_service_network_id(self):
"""Finds existing or creates new service network."""
service_network_name = self.get_config_option("service_network_name")
networks = []
for network in self.neutron_api.get_all_admin_project_networks():
if network['name'] == service_network_name:
networks.append(network)
if len(networks) > 1:
raise exception.ServiceInstanceException(
_('Ambiguous service networks.'))
elif not networks:
return self.neutron_api.network_create(
self.admin_project_id, service_network_name)['id']
else:
return networks[0]['id']
@utils.synchronized(
"service_instance_setup_and_teardown_network_for_instance",
external=True)
def teardown_network(self, server_details):
subnet_id = server_details.get("subnet_id")
router_id = server_details.get("router_id")
service_port_id = server_details.get("service_port_id")
public_port_id = server_details.get("public_port_id")
for port_id in (service_port_id, public_port_id):
if port_id:
try:
self.neutron_api.delete_port(port_id)
except exception.NetworkException as e:
if e.kwargs.get('code') != 404:
raise
LOG.debug("Failed to delete port %(port_id)s with error: "
"\n %(exc)s", {"port_id": port_id, "exc": e})
if router_id and subnet_id:
ports = self.neutron_api.list_ports(
fields=['fixed_ips', 'device_id', 'device_owner'])
# NOTE(vponomaryov): iterate ports to get to know whether current
# subnet is used or not. We will not remove it from router if it
# is used.
for port in ports:
# NOTE(vponomaryov): if device_id is present, then we know that
# this port is used. Also, if device owner is 'compute:*', then
# we know that it is VM. We continue only if both are 'True'.
if (port['device_id'] and
port['device_owner'].startswith('compute:')):
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == subnet_id:
# NOTE(vponomaryov): There are other share servers
# exist that use this subnet. So, do not remove it
# from router.
return
try:
# NOTE(vponomaryov): there is no other share servers or
# some VMs that use this subnet. So, remove it from router.
self.neutron_api.router_remove_interface(
router_id, subnet_id)
except exception.NetworkException as e:
if e.kwargs['code'] != 404:
raise
LOG.debug('Subnet %(subnet_id)s is not attached to the '
'router %(router_id)s.',
{'subnet_id': subnet_id, 'router_id': router_id})
self.neutron_api.update_subnet(subnet_id, '')
@utils.synchronized(
"service_instance_setup_and_teardown_network_for_instance",
external=True)
def setup_network(self, network_info):
neutron_net_id = network_info['neutron_net_id']
neutron_subnet_id = network_info['neutron_subnet_id']
network_data = dict()
subnet_name = ('service_subnet_for_handling_of_share_server_for_'
'tenant_subnet_%s' % neutron_subnet_id)
network_data['service_subnet'] = self._get_service_subnet(subnet_name)
if not network_data['service_subnet']:
network_data['service_subnet'] = self.neutron_api.subnet_create(
self.admin_project_id, self.service_network_id, subnet_name,
self._get_cidr_for_subnet())
if not self.connect_share_server_to_tenant_network:
network_data['router'] = self._get_private_router(
neutron_net_id, neutron_subnet_id)
try:
self.neutron_api.router_add_interface(
network_data['router']['id'],
network_data['service_subnet']['id'])
except exception.NetworkException as e:
if e.kwargs['code'] != 400:
raise
LOG.debug('Subnet %(subnet_id)s is already attached to the '
'router %(router_id)s.',
{'subnet_id': network_data['service_subnet']['id'],
'router_id': network_data['router']['id']})
network_data['service_port'] = self.neutron_api.create_port(
self.admin_project_id, self.service_network_id,
subnet_id=network_data['service_subnet']['id'],
device_owner='manila')
network_data['ports'] = [network_data['service_port']]
if self.connect_share_server_to_tenant_network:
network_data['public_port'] = self.neutron_api.create_port(
self.admin_project_id, neutron_net_id,
subnet_id=neutron_subnet_id, device_owner='manila')
network_data['ports'].append(network_data['public_port'])
try:
port = self.setup_connectivity_with_service_instances()
service_ip = self._get_service_ip(
port, network_data['service_subnet']['id'])
except Exception as e:
for port in network_data['ports']:
self.neutron_api.delete_port(port['id'])
raise
network_data['nics'] = [
{'port-id': port['id']} for port in network_data['ports']]
public_ip = network_data.get(
'public_port', network_data['service_port'])
network_data['ip_address'] = public_ip['fixed_ips'][0]['ip_address']
network_data['service_ip'] = service_ip
return network_data
def _get_service_ip(self, port, subnet_id):
for fixed_ips in port['fixed_ips']:
if subnet_id == fixed_ips['subnet_id']:
return fixed_ips['ip_address']
msg = _("Service IP not found for Share Server.")
raise exception.ServiceIPNotFound(reason=msg)
def _get_cidr_for_subnet(self):
"""Returns not used cidr for service subnet creating."""
subnets = self._get_all_service_subnets()
used_cidrs = set(subnet['cidr'] for subnet in subnets)
serv_cidr = netaddr.IPNetwork(
self.get_config_option("service_network_cidr"))
division_mask = self.get_config_option("service_network_division_mask")
for subnet in serv_cidr.subnet(division_mask):
cidr = six.text_type(subnet.cidr)
if cidr not in used_cidrs:
return cidr
else:
raise exception.ServiceInstanceException(_('No available cidrs.'))
def setup_connectivity_with_service_instances(self):
"""Sets up connectivity with service instances.
Creates creating port in service network, creating and setting up
required network devices.
"""
port = self._get_service_port()
port = self._add_fixed_ips_to_service_port(port)
interface_name = self.vif_driver.get_device_name(port)
self.vif_driver.plug(interface_name, port['id'], port['mac_address'])
ip_cidrs = []
for fixed_ip in port['fixed_ips']:
subnet = self.neutron_api.get_subnet(fixed_ip['subnet_id'])
net = netaddr.IPNetwork(subnet['cidr'])
ip_cidr = '%s/%s' % (fixed_ip['ip_address'], net.prefixlen)
ip_cidrs.append(ip_cidr)
self.vif_driver.init_l3(interface_name, ip_cidrs)
# ensure that interface is first in the list
device = ip_lib.IPDevice(interface_name)
device.route.pullup_route(interface_name)
# here we are checking for garbage devices from removed service port
self._remove_outdated_interfaces(device)
return port
@utils.synchronized(
"service_instance_remove_outdated_interfaces", external=True)
def _remove_outdated_interfaces(self, device):
"""Finds and removes unused network device."""
device_cidr_set = self._get_set_of_device_cidrs(device)
for dev in ip_lib.IPWrapper().get_devices():
if dev.name != device.name and dev.name[:3] == device.name[:3]:
cidr_set = self._get_set_of_device_cidrs(dev)
if device_cidr_set & cidr_set:
self.vif_driver.unplug(dev.name)
def _get_set_of_device_cidrs(self, device):
cidrs = set()
for addr in device.addr.list():
if addr['ip_version'] == 4:
cidrs.add(six.text_type(netaddr.IPNetwork(addr['cidr']).cidr))
return cidrs
@utils.synchronized("service_instance_get_service_port", external=True)
def _get_service_port(self):
"""Find or creates service neutron port.
This port will be used for connectivity with service instances.
"""
ports = [port for port in self.neutron_api.
list_ports(device_id='manila-share')]
if len(ports) > 1:
raise exception.ServiceInstanceException(
_('Error. Ambiguous service ports.'))
elif not ports:
host = socket.gethostname()
port = self.neutron_api.create_port(
self.admin_project_id, self.service_network_id,
device_id='manila-share', device_owner='manila:share',
host_id=host)
else:
port = ports[0]
return port
@utils.synchronized(
"service_instance_add_fixed_ips_to_service_port", external=True)
def _add_fixed_ips_to_service_port(self, port):
network = self.neutron_api.get_network(self.service_network_id)
subnets = set(network['subnets'])
port_fixed_ips = []
for fixed_ip in port['fixed_ips']:
port_fixed_ips.append({'subnet_id': fixed_ip['subnet_id'],
'ip_address': fixed_ip['ip_address']})
if fixed_ip['subnet_id'] in subnets:
subnets.remove(fixed_ip['subnet_id'])
# If there are subnets here that means that
# we need to add those to the port and call update.
if subnets:
port_fixed_ips.extend([dict(subnet_id=s) for s in subnets])
port = self.neutron_api.update_port_fixed_ips(
port['id'], {'fixed_ips': port_fixed_ips})
return port
@utils.synchronized("service_instance_get_private_router", external=True)
def _get_private_router(self, neutron_net_id, neutron_subnet_id):
"""Returns router attached to private subnet gateway."""
private_subnet = self.neutron_api.get_subnet(neutron_subnet_id)
if not private_subnet['gateway_ip']:
raise exception.ServiceInstanceException(
_('Subnet must have gateway.'))
private_network_ports = [p for p in self.neutron_api.list_ports(
network_id=neutron_net_id)]
for p in private_network_ports:
fixed_ip = p['fixed_ips'][0]
if (fixed_ip['subnet_id'] == private_subnet['id'] and
fixed_ip['ip_address'] == private_subnet['gateway_ip']):
private_subnet_gateway_port = p
break
else:
raise exception.ServiceInstanceException(
_('Subnet gateway is not attached to the router.'))
private_subnet_router = self.neutron_api.show_router(
private_subnet_gateway_port['device_id'])
return private_subnet_router
@utils.synchronized("service_instance_get_service_subnet", external=True)
def _get_service_subnet(self, subnet_name):
all_service_subnets = self._get_all_service_subnets()
service_subnets = [subnet for subnet in all_service_subnets
if subnet['name'] == subnet_name]
if len(service_subnets) == 1:
return service_subnets[0]
elif not service_subnets:
unused_service_subnets = [subnet for subnet in all_service_subnets
if subnet['name'] == '']
if unused_service_subnets:
service_subnet = unused_service_subnets[0]
self.neutron_api.update_subnet(
service_subnet['id'], subnet_name)
return service_subnet
return None
else:
raise exception.ServiceInstanceException(
_('Ambiguous service subnets.'))
@utils.synchronized(
"service_instance_get_all_service_subnets", external=True)
def _get_all_service_subnets(self):
service_network = self.neutron_api.get_network(self.service_network_id)
subnets = []
for subnet_id in service_network['subnets']:
subnets.append(self.neutron_api.get_subnet(subnet_id))
return subnets
class NovaNetworkHelper(BaseNetworkhelper):
"""Nova network helper for Manila service instances.
All security-group rules are applied to all interfaces of Nova VM
using Nova-network. In that case there is no need to create additional
service network. Only one thing should be satisfied - Manila host
should have access to all tenant networks.
This network helper does not create resources.
"""
def __init__(self, service_instance_manager):
self.compute_api = service_instance_manager.compute_api
self.admin_context = service_instance_manager.admin_context
@property
def NAME(self):
return NOVA_NAME
def setup_network(self, network_info):
net = self._get_nova_network(network_info['nova_net_id'])
network_info['nics'] = [{'net-id': net['id']}]
network_info['service_ip'] = net['gateway']
return network_info
def get_network_name(self, network_info):
"""Returns name of network for service instance."""
return self._get_nova_network(network_info['nova_net_id'])['label']
def teardown_network(self, server_details):
"""Nothing to do. Placeholder."""
def setup_connectivity_with_service_instances(self):
"""Nothing to do. Placeholder."""
def _get_nova_network(self, nova_network_id):
"""Returns network to be used for service instance.
:param nova_network_id: string with id of network.
:returns: dict -- network data as dict
:raises: exception.ManilaException
"""
if not nova_network_id:
raise exception.ManilaException(
_('Nova network for service instance is not provided.'))
net = self.compute_api.network_get(self.admin_context, nova_network_id)
return net
| {
"content_hash": "254fc342f3f2b7a34e5ec736f2913d58",
"timestamp": "",
"source": "github",
"line_count": 1042,
"max_line_length": 79,
"avg_line_length": 42.38771593090211,
"alnum_prop": 0.5819597898931353,
"repo_name": "jcsp/manila",
"id": "a39c8a5f27b86590827944361a61dea0ce98ebf8",
"size": "44836",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "manila/share/drivers/service_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "953"
},
{
"name": "Python",
"bytes": "4993686"
},
{
"name": "Shell",
"bytes": "42913"
}
],
"symlink_target": ""
} |
import xlrd
from collections import OrderedDict
import simplejson as json
# Open the workbook and select the first worksheet
wb = xlrd.open_workbook('cnu_major.xls')
sh = wb.sheet_by_index(0)
# Dictionary to hold dictionaries
major_dict = {}
major = {}
# Iterate through each row in worksheet and fetch values into dict
for rownum in range(1, sh.nrows):
majors = OrderedDict()
row_values = sh.row_values(rownum)
try:
#majors['index'] = int(row_values[0])
#majors['major'] = row_values[1]
major_dict[int(row_values[0])] = row_values[1]
except ValueError:
pass
major['majors'] = major_dict
# Serialize the lost of dicts to JSON
j = json.dumps(major, ensure_ascii=False, encoding='utf8')
# Write to file
with open('data.json', 'w') as f:
f.write(j) | {
"content_hash": "714e6e0ec1b7b399fc9cb766f5695fa8",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 66,
"avg_line_length": 25.93548387096774,
"alnum_prop": 0.6766169154228856,
"repo_name": "haeungun/MealTime_Android",
"id": "934c5823a44473b3980e563c07b90205e81563e5",
"size": "804",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "excelToJson.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Java",
"bytes": "47957"
},
{
"name": "Python",
"bytes": "804"
}
],
"symlink_target": ""
} |
import pyxb_114
import pyxb_114.binding.generate
import pyxb_114.utils.domutils
import pyxb_114.binding.saxer
import StringIO
from xml.dom import Node
import os.path
schema_path = '%s/../schemas/substgroup.xsd' % (os.path.dirname(__file__),)
code = pyxb_114.binding.generate.GeneratePython(schema_location=schema_path)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb_114.exceptions_ import *
import unittest
class TestSubstGroup (unittest.TestCase):
def testISO8601 (self):
xml = '<when><ISO8601>2009-06-15T17:50:00Z</ISO8601></when>'
dom = pyxb_114.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(dom.documentElement)
self.assertEqual(instance.sgTime._element(), ISO8601)
self.assertEqual(instance.toDOM().documentElement.toxml("utf-8"), xml)
saxer = pyxb_114.binding.saxer.make_parser(fallback_namespace=Namespace)
handler = saxer.getContentHandler()
saxer.parse(StringIO.StringIO(xml))
instance = handler.rootObject()
self.assertEqual(instance.sgTime._element(), ISO8601)
self.assertEqual(instance.toDOM().documentElement.toxml("utf-8"), xml)
def testPairTime (self):
xml = '<when><pairTime><seconds>34.0</seconds><fractionalSeconds>0.21</fractionalSeconds></pairTime></when>'
dom = pyxb_114.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(dom.documentElement)
self.assertEqual(instance.sgTime._element(), pairTime)
self.assertEqual(instance.sgTime.seconds, 34)
self.assertEqual(instance.toDOM().documentElement.toxml("utf-8"), xml)
saxer = pyxb_114.binding.saxer.make_parser(fallback_namespace=Namespace)
handler = saxer.getContentHandler()
saxer.parse(StringIO.StringIO(xml))
instance = handler.rootObject()
self.assertEqual(instance.sgTime._element(), pairTime)
self.assertEqual(instance.sgTime.seconds, 34)
self.assertEqual(instance.toDOM().documentElement.toxml("utf-8"), xml)
def testSGTime (self):
xml = '<when><sgTime>2009-06-15T17:50:00Z</sgTime></when>'
dom = pyxb_114.utils.domutils.StringToDOM(xml)
self.assertRaises(pyxb_114.AbstractElementError, CreateFromDOM, dom.documentElement)
saxer = pyxb_114.binding.saxer.make_parser(fallback_namespace=Namespace)
handler = saxer.getContentHandler()
self.assertRaises(pyxb_114.AbstractElementError, saxer.parse, StringIO.StringIO(xml))
xml = '<sgTime>2009-06-15T17:50:00Z</sgTime>'
dom = pyxb_114.utils.domutils.StringToDOM(xml)
self.assertRaises(pyxb_114.AbstractElementError, CreateFromDOM, dom.documentElement)
self.assertRaises(pyxb_114.AbstractElementError, saxer.parse, StringIO.StringIO(xml))
xml = '<ISO8601>2009-06-15T17:50:00Z</ISO8601>'
dom = pyxb_114.utils.domutils.StringToDOM(xml)
instance = CreateFromDOM(dom.documentElement)
self.assertEqual(instance._element(), ISO8601)
saxer.parse(StringIO.StringIO(xml))
instance = handler.rootObject()
self.assertEqual(instance._element(), ISO8601)
def testGenAbstract (self):
xml = '<when><pairTime><seconds>34.0</seconds><fractionalSeconds>0.21</fractionalSeconds></pairTime></when>'
instance = when(pairTime(34.0, 0.21))
self.assertEqual(instance.sgTime._element(), pairTime)
self.assertEqual(instance.sgTime.seconds, 34)
self.assertEqual(instance.toDOM().documentElement.toxml("utf-8"), xml)
# Loss of element association kills DOM generation
instance.sgTime._setElement(None)
self.assertRaises(pyxb_114.DOMGenerationError, instance.toDOM)
self.assertRaises(pyxb_114.AbstractElementError, sgTime)
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "014cad14623fab69a35c04c44224f60d",
"timestamp": "",
"source": "github",
"line_count": 88,
"max_line_length": 116,
"avg_line_length": 43.54545454545455,
"alnum_prop": 0.6941544885177453,
"repo_name": "msherry/PyXB-1.1.4",
"id": "155ec687ab9a1e8a3cf2811cc88246c26c81c110",
"size": "3832",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/drivers/test-substgroup.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Perl",
"bytes": "6307"
},
{
"name": "Python",
"bytes": "1521054"
},
{
"name": "Shell",
"bytes": "23730"
}
],
"symlink_target": ""
} |
from typing import List, Optional, Tuple
from django.db import connections
from psqlextra.models import PostgresPartitionedModel
from .config import PostgresPartitioningConfig
from .constants import AUTO_PARTITIONED_COMMENT
from .error import PostgresPartitioningError
from .partition import PostgresPartition
from .plan import PostgresModelPartitioningPlan, PostgresPartitioningPlan
PartitionList = List[Tuple[PostgresPartitionedModel, List[PostgresPartition]]]
class PostgresPartitioningManager:
"""Helps managing partitions by automatically creating new partitions and
deleting old ones according to the configuration."""
def __init__(self, configs: List[PostgresPartitioningConfig]) -> None:
self.configs = configs
self._validate_configs(self.configs)
def plan(
self,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> PostgresPartitioningPlan:
"""Plans which partitions should be deleted/created.
Arguments:
skip_create:
If set to True, no partitions will be marked
for creation, regardless of the configuration.
skip_delete:
If set to True, no partitions will be marked
for deletion, regardless of the configuration.
using:
Name of the database connection to use.
Returns:
A plan describing what partitions would be created
and deleted if the plan is applied.
"""
model_plans = []
for config in self.configs:
model_plan = self._plan_for_config(
config,
skip_create=skip_create,
skip_delete=skip_delete,
using=using,
)
if not model_plan:
continue
model_plans.append(model_plan)
return PostgresPartitioningPlan(model_plans)
def find_config_for_model(
self, model: PostgresPartitionedModel
) -> Optional[PostgresPartitioningConfig]:
"""Finds the partitioning config for the specified model."""
return next(
(config for config in self.configs if config.model == model), None
)
def _plan_for_config(
self,
config: PostgresPartitioningConfig,
skip_create: bool = False,
skip_delete: bool = False,
using: Optional[str] = None,
) -> Optional[PostgresModelPartitioningPlan]:
"""Creates a partitioning plan for one partitioning config."""
connection = connections[using or "default"]
table = self._get_partitioned_table(connection, config.model)
model_plan = PostgresModelPartitioningPlan(config)
if not skip_create:
for partition in config.strategy.to_create():
if table.partition_by_name(name=partition.name()):
continue
model_plan.creations.append(partition)
if not skip_delete:
for partition in config.strategy.to_delete():
introspected_partition = table.partition_by_name(
name=partition.name()
)
if not introspected_partition:
break
if introspected_partition.comment != AUTO_PARTITIONED_COMMENT:
continue
model_plan.deletions.append(partition)
if len(model_plan.creations) == 0 and len(model_plan.deletions) == 0:
return None
return model_plan
@staticmethod
def _get_partitioned_table(connection, model: PostgresPartitionedModel):
with connection.cursor() as cursor:
table = connection.introspection.get_partitioned_table(
cursor, model._meta.db_table
)
if not table:
raise PostgresPartitioningError(
f"Model {model.__name__}, with table "
f"{model._meta.db_table} does not exists in the "
"database. Did you run `python manage.py migrate`?"
)
return table
@staticmethod
def _validate_configs(configs: List[PostgresPartitioningConfig]):
"""Ensures there is only one config per model."""
models = set([config.model.__name__ for config in configs])
if len(models) != len(configs):
raise PostgresPartitioningError(
"Only one partitioning config per model is allowed"
)
| {
"content_hash": "672ac475554c4e3da10d8c580ca48630",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 78,
"avg_line_length": 33.12408759124087,
"alnum_prop": 0.6121639488761569,
"repo_name": "SectorLabs/django-postgres-extra",
"id": "28aee91ee0bf19af22eb1808590be62006141b2b",
"size": "4538",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psqlextra/partitioning/manager.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "359362"
}
],
"symlink_target": ""
} |
from django import forms
from django.core import validators
from django.db.models import get_model
from django.utils.translation import ugettext_lazy as _
from oscar.apps.customer.forms import EmailUserCreationForm, CommonPasswordValidator
from oscar.core.compat import get_user_model
User = get_user_model()
Partner = get_model('partner', 'Partner')
PartnerAddress = get_model('partner', 'PartnerAddress')
class PartnerSearchForm(forms.Form):
name = forms.CharField(required=False, label=_("Partner name"))
class PartnerCreateForm(forms.ModelForm):
class Meta:
model = Partner
fields = ('name',)
class NewUserForm(EmailUserCreationForm):
def __init__(self, partner, *args, **kwargs):
self.partner = partner
super(NewUserForm, self).__init__(host=None, *args, **kwargs)
def save(self):
user = super(NewUserForm, self).save(commit=False)
user.is_staff = True
user.save()
self.partner.users.add(user)
return user
class Meta:
model = User
fields = ('first_name', 'last_name', 'email', 'password1', 'password2')
class ExistingUserForm(forms.ModelForm):
"""
Slightly different form that makes
* makes saving password optional
* doesn't regenerate username
* doesn't allow changing email till #668 is resolved
"""
password1 = forms.CharField(
label=_('Password'),
widget=forms.PasswordInput,
required=False,
validators=[validators.MinLengthValidator(6),
CommonPasswordValidator()])
password2 = forms.CharField(
required=False,
label=_('Confirm Password'),
widget=forms.PasswordInput)
def clean_password2(self):
password1 = self.cleaned_data.get('password1', '')
password2 = self.cleaned_data.get('password2', '')
if password1 != password2:
raise forms.ValidationError(_("The two password fields didn't match."))
return password2
def save(self, commit=True):
user = super(ExistingUserForm, self).save(commit=False)
if self.cleaned_data['password1']:
user.set_password(self.cleaned_data['password1'])
if commit:
user.save()
return user
class Meta:
model = User
fields = ('first_name', 'last_name', 'password1', 'password2')
class UserEmailForm(forms.Form):
# We use a CharField so that a partial email address can be entered
email = forms.CharField(
label=_("Email address"), max_length=100)
class PartnerAddressForm(forms.ModelForm):
class Meta:
fields = ('line1', 'line2', 'line3', 'line4',
'state', 'postcode', 'country')
model = PartnerAddress
| {
"content_hash": "6a30f628c5003ec982ef6d6a4ef78b47",
"timestamp": "",
"source": "github",
"line_count": 92,
"max_line_length": 84,
"avg_line_length": 30,
"alnum_prop": 0.6445652173913043,
"repo_name": "makielab/django-oscar",
"id": "313decf8d3cc53a7de34318c2b79cc1a8d5eca26",
"size": "2760",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "oscar/apps/dashboard/partners/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1020640"
},
{
"name": "JavaScript",
"bytes": "1219819"
},
{
"name": "Puppet",
"bytes": "1995"
},
{
"name": "Python",
"bytes": "3577419"
},
{
"name": "Ruby",
"bytes": "634"
},
{
"name": "Shell",
"bytes": "5723"
}
],
"symlink_target": ""
} |
import os
import pexpect
import re
import sys
from hyriseBenchmarkCore import initialize
def main():
build_dir = initialize()
if not os.path.isdir("resources/test_data/tbl"):
print(
"Cannot find resources/test_data/tbl. Are you running the test suite from the main folder of the Hyrise"
"repository?"
)
sys.exit(1)
server = pexpect.spawn(f"{build_dir}/hyriseServer --benchmark_data=tpc-h:0.01 -p 0", timeout=10)
server.expect_exact("Loading/Generating tables", timeout=120)
server.expect_exact("Encoding 'lineitem'", timeout=120)
search_regex = r"Server started at 0.0.0.0 and port (\d+)"
server.expect(search_regex, timeout=120)
server_port = int(re.search(search_regex, str(server.after)).group(1))
# Recent Postgres/psql versions changed the authentication behavior, resulting in connection errors on some setups.
# Disabling encrypted connections solves the issue. Since hyriseServer does not implement authentication at all,
# this is no problem.
# See https://github.com/psycopg/psycopg2/issues/1084#issuecomment-656778107 and
# https://www.postgresql.org/docs/13/libpq-connect.html#LIBPQ-CONNECT-GSSENCMODE
environment_variables = os.environ.copy()
environment_variables.update({"PGGSSENCMODE": "disable"})
client = pexpect.spawn(f"psql -h localhost -p {server_port}", timeout=20, env=environment_variables)
client.sendline("select count(*) from region;")
client.expect_exact("COUNT(*)")
client.expect_exact("5")
client.expect_exact("(1 row)")
client.sendline("COPY loaded_table_from_tbl FROM 'resources/test_data/tbl/int.tbl';")
client.expect_exact("SELECT 0")
client.sendline("SELECT COUNT(*) AS \"row_count\" FROM loaded_table_from_tbl;")
client.expect_exact("row_count")
client.expect_exact("3")
client.expect_exact("(1 row)")
# Not using close_benchmark() here, as a server is started and a timeout of None would wait forever.
client.close()
server.close()
if __name__ == "__main__":
main()
| {
"content_hash": "105df314e6d05a984cd16a2939c335af",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 119,
"avg_line_length": 38.574074074074076,
"alnum_prop": 0.6927508401344215,
"repo_name": "hyrise/hyrise",
"id": "ca86a37f9d0a15b8cab9ec7894ca26e0c39144b7",
"size": "2107",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scripts/test/hyriseServer_test.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5907952"
},
{
"name": "CMake",
"bytes": "76722"
},
{
"name": "Dockerfile",
"bytes": "1345"
},
{
"name": "PLpgSQL",
"bytes": "30508"
},
{
"name": "Python",
"bytes": "110241"
},
{
"name": "Shell",
"bytes": "30173"
}
],
"symlink_target": ""
} |
"""
Generates a list of JSON objects, one object for each dataset retrieved from
the HSCIC website.
"""
import os
import string
import json
import logging
import requests
import urllib
import html2text
from bs4 import BeautifulSoup
from urlparse import urlparse
logging.basicConfig(filename='datasets.log',
format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG)
def get_query_dict(query):
"""
Given a query string will return a dict representation thereof.
"""
result = {}
items = query.split('&')
for i in items:
k, v = i.split('=')
result[urllib.unquote(k)] = urllib.unquote(v)
return result
def get_datasets(dom):
"""
Given a BeautifulSoup DOM will return a list of all the dataset's ids
found therein.
"""
result = []
datasets = dom.find_all('a', 'HSCICProducts')
for dataset in datasets:
url = urlparse(dataset.attrs['href'])
query = get_query_dict(url.query)
result.append(int(query['productid']))
return result
def get_parsed_url(parsed, query):
"""
Given a parsed URL and updated query dictionary, will return a string
representation of the updated URL.
"""
return '{}://{}{}?{}'.format(parsed.scheme, parsed.netloc, parsed.path,
urllib.urlencode(query))
def get_datasets_from_paginated_results(start_url):
"""
Given a start URL will attempt to paginate through the results and return
a list of dataset ids that constitute the result.
"""
result = []
logging.info('Getting paginated results for {}'.format(start_url))
parsed = urlparse(start_url)
query = get_query_dict(parsed.query)
query['size'] = '100'
query['page'] = '1'
# Grab the first page.
url = get_parsed_url(parsed, query)
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
# Work out how many further pages there are.
first_page_soup = BeautifulSoup(response.text)
result.extend(get_datasets(first_page_soup))
paging = first_page_soup.find(id='paging')
last_page_anchor = paging.find('a', 'last')
if last_page_anchor:
last_page = int(last_page_anchor.text)
logging.info('Number of pages is {}'.format(last_page))
# Iterate and parse them.
if last_page > 1:
for i in range(2, last_page+1):
query['page'] = str(i)
url = get_parsed_url(parsed, query)
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
soup = BeautifulSoup(response.text)
result.extend(get_datasets(soup))
logging.info('Number of datasets found: {}'.format(len(result)))
return result
def get_keywords(cache):
"""
Will attempt to retrieve a list of keywords and associated product_ids (the
unique dataset identifier).
"""
url_template = 'http://www.hscic.gov.uk/searchcatalogue?kwd={}&size=10&page=1#top'
keywords = {}
if os.path.isfile(cache):
logging.info('Using cached records from {}'.format(cache))
keywords = json.load(open(cache))
else:
for letter in string.ascii_lowercase:
url = url_template.format(letter)
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
html = response.text
soup = BeautifulSoup(html)
kw = soup.find("ol", "keyword")
if kw:
kids = kw.find("ol", "children")
if kids:
spans = kids.find_all("span", "heading")
for item in spans:
keywords[item.text] = []
for key in keywords:
if not keywords[key]:
url = url_template.format(urllib.quote(key))
keywords[key] = get_datasets_from_paginated_results(url)
json.dump(keywords, open(cache, 'wb'), indent=2)
logging.info('Saved complete keywords to {}'.format(cache))
return keywords
def get_topics(cache):
"""
Will attempt to retrieve a list of topics and associated product_ids (the
unique dataset identifiers).
"""
url_template = 'http://www.hscic.gov.uk/searchcatalogue?topics=0%2f{}&size=100&page=1'
topics = {}
if os.path.isfile(cache):
logging.info('Using cached records from {}'.format(cache))
topics = json.load(open(cache))
else:
url = "http://www.hscic.gov.uk/searchcatalogue"
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
html = response.text
soup = BeautifulSoup(html)
tops = soup.find("ol", "topic")
if tops:
spans = tops.find_all("span", "heading")
for item in spans:
topics[item.text] = []
for topic in topics:
if not topics[topic]:
url = url_template.format(urllib.quote(topic))
topics[topic] = get_datasets_from_paginated_results(url)
json.dump(topics, open(cache, 'wb'), indent=2)
logging.info('Saved complete topics to {}'.format(cache))
return topics
def get_info_types(cache):
"""
Will attempt to retrieve a list of information types and associated
product_ids.
"""
url_template = 'http://www.hscic.gov.uk/searchcatalogue?infotype=0%2f{}&size=100&page=1'
info_types = {}
if os.path.isfile(cache):
logging.info('Using cached records from {}'.format(cache))
info_types = json.load(open(cache))
else:
url = "http://www.hscic.gov.uk/searchcatalogue"
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
html = response.text
soup = BeautifulSoup(html)
ts = soup.find("ol", "informationtype")
if ts:
spans = ts.find_all("span", "heading")
for item in spans:
info_types[item.text] = []
for it in info_types:
if not info_types[it]:
url = url_template.format(urllib.quote(it))
info_types[it] = get_datasets_from_paginated_results(url)
json.dump(info_types, open(cache, 'wb'), indent=2)
logging.info('Saved complete information types to {}'.format(cache))
return info_types
def get_dataset(dataset_id, dataset, directory):
"""
Given an id and existing dict object representing the current meta-data
about the dataset will extract all the things from the dataset's page on
HSCIC.
"""
url_template = 'http://www.hscic.gov.uk/searchcatalogue?productid={}'
cache = os.path.join(directory, '{}.html'.format(dataset_id))
html = ''
url = url_template.format(dataset_id)
if os.path.isfile(cache):
logging.info('Using cached records from {}'.format(cache))
html = open(cache).read()
else:
logging.info('Requesting {}'.format(url))
response = requests.get(url)
logging.info(response.status_code)
if response.status_code < 400:
html = response.text
with open(cache, 'wb') as output:
output.write(html.encode('utf-8'))
if html:
soup = BeautifulSoup(html)
title = soup.find(id='headingtext').text.strip()
logging.info(title)
dataset['source'] = url
dataset['title'] = title
dataset['id'] = dataset_id
product = soup.find(id='productview')
pub_date = product.find('div',
'pubdate').text
dataset['publication_date'] = pub_date.replace('Publication date: ',
'')
summary = product.find('div', 'summary')
if summary:
summary = html2text.html2text(summary.prettify())
dataset['summary'] = summary
key_facts = product.find('div', 'notevalue')
if key_facts:
key_facts = html2text.html2text(key_facts.prettify())
dataset['key_facts'] = key_facts
resources = product.find_all('div', 'resourcelink')
files = []
for res in resources:
anchor = res.find('a')
url = anchor.attrs['href']
if url.startswith('./'):
url = 'http://www.hscic.gov.uk' + url[1:]
filetype = url[url.rfind('.') + 1:]
description = anchor.text.replace(' [.{}]'.format(filetype), '')
files.append({
'url': url,
'description': description.strip(),
'filetype': filetype,
})
dataset['sources'] = files
date_range = product.find('div', 'daterange')
if date_range:
date_range = date_range.text.replace('Date Range: ', '')
dataset['date_range'] = date_range
coverage = product.find_all('div', 'coverage')
geo = [x.text for x in coverage]
if geo:
dataset['geographical_coverage'] = geo
return dataset
else:
return None
if __name__ == '__main__':
result = []
directory = 'datasets_raw'
filename = 'datasets.json'
if not os.path.exists(directory):
logging.info('Creating directory {}'.format(directory))
os.makedirs(directory)
keywords = get_keywords(os.path.join(directory, 'keywords.json'))
topics = get_topics(os.path.join(directory, 'topics.json'))
information_types = get_info_types(os.path.join(directory,
'info_types.json'))
datasets = {}
for k in keywords:
for dataset in keywords[k]:
if dataset in datasets:
datasets[dataset]['keywords'].append(k)
else:
datasets[dataset] = {
'keywords': [k, ],
}
for t in topics:
for dataset in topics[t]:
if dataset in datasets:
if 'topics' in datasets[dataset]:
datasets[dataset]['topics'].append(t)
else:
datasets[dataset]['topics'] = [t, ]
else:
datasets[dataset] = {
'topics': [t, ],
}
for i in information_types:
for dataset in information_types[i]:
if dataset in datasets:
if 'information_types' in datasets[dataset]:
datasets[dataset]['information_types'].append(i)
else:
datasets[dataset]['information_types'] = [i, ]
else:
datasets[dataset] = {
'information_types': [i, ],
}
print('Processing {} datasets'.format(len(datasets)))
for k, v in datasets.iteritems():
data = get_dataset(k, v, directory)
if data:
result.append(data)
json.dump(result, open(filename, 'wb'), indent=2)
logging.info('Written results to {}'.format(filename))
| {
"content_hash": "a9289b4cbbe8bf63901032dcde535cf0",
"timestamp": "",
"source": "github",
"line_count": 311,
"max_line_length": 92,
"avg_line_length": 37.237942122186496,
"alnum_prop": 0.563768241084535,
"repo_name": "ntoll/hscic_apiomatic",
"id": "35ac4559e5bcdab94304ebb52e959adcf5df71f8",
"size": "11627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "grab_datasets.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15571"
}
],
"symlink_target": ""
} |
import github.GithubObject
class StatusMessage(github.GithubObject.NonCompletableGithubObject):
"""
This class represents status messages as defined in https://status.github.com/api
"""
@property
def body(self):
"""
:type: string
"""
return self._body.value
@property
def status(self):
"""
:type: string
"""
return self._status.value
@property
def created_on(self):
"""
:type: datetime.datetime
"""
return self._created_on.value
def _initAttributes(self):
self._status = github.GithubObject.NotSet
self._created_on = github.GithubObject.NotSet
def _useAttributes(self, attributes):
if "body" in attributes: # pragma no branch
self._body = self._makeStringAttribute(attributes["body"])
if "status" in attributes: # pragma no branch
self._status = self._makeStringAttribute(attributes["status"])
if "created_on" in attributes: # pragma no branch
self._created_on = self._makeDatetimeAttribute(attributes["created_on"])
| {
"content_hash": "3844f0f947259d9a41abe110105d22db",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 85,
"avg_line_length": 28.575,
"alnum_prop": 0.6097987751531059,
"repo_name": "ARMmbed/yotta_osx_installer",
"id": "c0c99903ce74f6fb59381408e43670f51cd50e8b",
"size": "2789",
"binary": false,
"copies": "73",
"ref": "refs/heads/master",
"path": "workspace/lib/python2.7/site-packages/github/StatusMessage.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "46"
},
{
"name": "Assembly",
"bytes": "29493"
},
{
"name": "Batchfile",
"bytes": "1321"
},
{
"name": "C",
"bytes": "3589917"
},
{
"name": "C++",
"bytes": "10603800"
},
{
"name": "CMake",
"bytes": "2408460"
},
{
"name": "CSS",
"bytes": "17863"
},
{
"name": "Emacs Lisp",
"bytes": "14305"
},
{
"name": "FORTRAN",
"bytes": "2105"
},
{
"name": "Groff",
"bytes": "3889491"
},
{
"name": "HTML",
"bytes": "31505361"
},
{
"name": "JavaScript",
"bytes": "90647"
},
{
"name": "Logos",
"bytes": "8877"
},
{
"name": "Makefile",
"bytes": "2798"
},
{
"name": "Objective-C",
"bytes": "254392"
},
{
"name": "Python",
"bytes": "7903768"
},
{
"name": "Shell",
"bytes": "36795"
},
{
"name": "VimL",
"bytes": "8478"
},
{
"name": "XC",
"bytes": "8384"
},
{
"name": "XS",
"bytes": "8334"
}
],
"symlink_target": ""
} |
"""
Created on Fri Aug 17 13:10:52 2012
Author: Josef Perktold
License: BSD-3
"""
import numpy as np
def clip_evals(x, value=0): #threshold=0, value=0):
evals, evecs = np.linalg.eigh(x)
clipped = np.any(evals < 0)
x_new = np.dot(evecs * np.maximum(evals, value), evecs.T)
return x_new, clipped
def corr_nearest(corr, threshold=1e-15, n_fact=100):
'''
Find the nearest correlation matrix that is positive semi-definite.
The function iteratively adjust the correlation matrix by clipping the
eigenvalues of a difference matrix. The diagonal elements are set to one.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
n_fact : int or float
factor to determine the maximum number of iterations. The maximum
number of iterations is the integer part of the number of columns in
the correlation matrix times n_fact.
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric.
Stops after the first step if correlation matrix is already positive
semi-definite or positive definite, so that smallest eigenvalue is above
threshold. In this case, the returned array is not the original, but
is equal to it within numerical precision.
See Also
--------
corr_clipped
cov_nearest
'''
k_vars = corr.shape[0]
if k_vars != corr.shape[1]:
raise ValueError("matrix is not square")
diff = np.zeros(corr.shape)
x_new = corr.copy()
diag_idx = np.arange(k_vars)
for ii in range(int(len(corr) * n_fact)):
x_adj = x_new - diff
x_psd, clipped = clip_evals(x_adj, value=threshold)
if not clipped:
x_new = x_psd
break
diff = x_psd - x_adj
x_new = x_psd.copy()
x_new[diag_idx, diag_idx] = 1
else:
import warnings
warnings.warn('maximum iteration reached')
return x_new
def corr_clipped(corr, threshold=1e-15):
'''
Find a near correlation matrix that is positive semi-definite
This function clips the eigenvalues, replacing eigenvalues smaller than
the threshold by the threshold. The new matrix is normalized, so that the
diagonal elements are one.
Compared to corr_nearest, the distance between the original correlation
matrix and the positive definite correlation matrix is larger, however,
it is much faster since it only computes eigenvalues only once.
Parameters
----------
corr : ndarray, (k, k)
initial correlation matrix
threshold : float
clipping threshold for smallest eigenvalue, see Notes
Returns
-------
corr_new : ndarray, (optional)
corrected correlation matrix
Notes
-----
The smallest eigenvalue of the corrected correlation matrix is
approximately equal to the ``threshold``. In examples, the
smallest eigenvalue can be by a factor of 10 smaller than the threshold,
e.g. threshold 1e-8 can result in smallest eigenvalue in the range
between 1e-9 and 1e-8.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input correlation matrix is symmetric. The diagonal elements of
returned correlation matrix is set to ones.
If the correlation matrix is already positive semi-definite given the
threshold, then the original correlation matrix is returned.
``cov_clipped`` is 40 or more times faster than ``cov_nearest`` in simple
example, but has a slightly larger approximation error.
See Also
--------
corr_nearest
cov_nearest
'''
x_new, clipped = clip_evals(corr, value=threshold)
if not clipped:
return corr
#cov2corr
x_std = np.sqrt(np.diag(x_new))
x_new = x_new / x_std / x_std[:,None]
return x_new
def cov_nearest(cov, method='clipped', threshold=1e-15, n_fact=100,
return_all=False):
'''
Find the nearest covariance matrix that is postive (semi-) definite
This leaves the diagonal, i.e. the variance, unchanged
Parameters
----------
cov : ndarray, (k,k)
initial covariance matrix
method : string
if "clipped", then the faster but less accurate ``corr_clipped`` is used.
if "nearest", then ``corr_nearest`` is used
threshold : float
clipping threshold for smallest eigen value, see Notes
nfact : int or float
factor to determine the maximum number of iterations in
``corr_nearest``. See its doc string
return_all : bool
if False (default), then only the covariance matrix is returned.
If True, then correlation matrix and standard deviation are
additionally returned.
Returns
-------
cov_ : ndarray
corrected covariance matrix
corr_ : ndarray, (optional)
corrected correlation matrix
std_ : ndarray, (optional)
standard deviation
Notes
-----
This converts the covariance matrix to a correlation matrix. Then, finds
the nearest correlation matrix that is positive semidefinite and converts
it back to a covariance matrix using the initial standard deviation.
The smallest eigenvalue of the intermediate correlation matrix is
approximately equal to the ``threshold``.
If the threshold=0, then the smallest eigenvalue of the correlation matrix
might be negative, but zero within a numerical error, for example in the
range of -1e-16.
Assumes input covariance matrix is symmetric.
See Also
--------
corr_nearest
corr_clipped
'''
from statsmodels.stats.moment_helpers import cov2corr, corr2cov
cov_, std_ = cov2corr(cov, return_std=True)
if method == 'clipped':
corr_ = corr_clipped(cov_, threshold=threshold)
elif method == 'nearest':
corr_ = corr_nearest(cov_, threshold=threshold, n_fact=n_fact)
cov_ = corr2cov(corr_, std_)
if return_all:
return cov_, corr_, std_
else:
return cov_
if __name__ == '__main__':
pass
| {
"content_hash": "9a501c64025f01df627bf08dce9977f3",
"timestamp": "",
"source": "github",
"line_count": 216,
"max_line_length": 81,
"avg_line_length": 30.61111111111111,
"alnum_prop": 0.6637931034482759,
"repo_name": "bavardage/statsmodels",
"id": "66645666a1a2f0cc8b2d52fb6394413625bf89ea",
"size": "6636",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "statsmodels/stats/correlation_tools.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "10509"
},
{
"name": "C",
"bytes": "11707"
},
{
"name": "CSS",
"bytes": "6244"
},
{
"name": "JavaScript",
"bytes": "16353"
},
{
"name": "Python",
"bytes": "6113420"
},
{
"name": "R",
"bytes": "12495"
},
{
"name": "Shell",
"bytes": "5156"
}
],
"symlink_target": ""
} |
from distutils.core import setup
from distutils.command.build_scripts import build_scripts,first_line_re
from distutils.command.bdist_wininst import bdist_wininst
from distutils.dist import Distribution
from shutil import copyfile,rmtree
from tempfile import mkdtemp
import os
import glob
class Distribution_extended(Distribution):
def __init__ (self, attrs=None):
self.add_prefix = False
self.remove_prefix = False
Distribution.__init__(self,attrs)
class bdist_wininst_extended(bdist_wininst):
def run(self):
self.distribution.add_prefix = True
bdist_wininst.run(self)
class build_scripts_add_extension(build_scripts):
def _transform_script_name(self,script_name):
script_base = os.path.basename(script_name)
filu = open(script_name,'r')
firstline = filu.readline()
filu.close()
if firstline:
match = first_line_re.match(firstline)
else:
match = None
file_name = script_base
if match:
if self.distribution.add_prefix and not script_base.endswith(".py"):
file_name = "%s.py" % script_base
if self.distribution.remove_prefix and script_base.endswith(".py"):
file_name = script_base[:-3]
if not file_name.startswith("tema."):
file_name = "tema.%s" % file_name
return file_name
def run(self):
# Not in posix system. Add prefix .py just to be sure
if os.name != "posix":
self.distribution.add_prefix = True
# Remove .py prefix in posix.
elif os.name == "posix" and not self.distribution.add_prefix:
self.distribution.remove_prefix = True
try:
tempdir = mkdtemp()
new_names = []
for script in self.scripts:
new_name = os.path.join(tempdir,self._transform_script_name(script))
new_names.append(new_name)
copyfile(script,new_name)
self.scripts = new_names
build_scripts.run(self)
finally:
if os.path.isdir(tempdir):
rmtree(tempdir)
try:
input_h = open("LICENCE",'r')
LICENCE=input_h.read()
finally:
input_h.close()
VERSION='3.2'
def get_scripts():
scripts = glob.glob("Validation/simulation/*.py")
log_tools = glob.glob("Validation/loghandling/*.py")
if "Validation/loghandling/avgofdats.py" in log_tools:
log_tools.remove("Validation/loghandling/avgofdats.py")
scripts.extend(log_tools)
scripts.append("Validation/viewer/model2dot.py")
scripts.append("ModelUtils/runmodelpackage.py")
scripts.append("ModelUtils/actionlist.py")
# scripts.append("TemaLib/MockSUT/mocksut.py")
scripts.extend(glob.glob("Validation/analysis/*.py"))
modelutils = glob.glob("TemaLib/tema/modelutils/*.py")
modelutils.remove("TemaLib/tema/modelutils/__init__.py")
scripts.extend(modelutils)
scripts.append("TemaLib/tema/model/model2lsts.py")
scripts.append("TemaLib/tema/eini/mdm2svg.py")
scripts.append("TemaLib/tema/packagereader/packagereader.py")
scripts.append("TemaLib/tema/ats4appmodel/ats4appmodel2lsts.py")
scripts.append("TemaLib/tema/filter/filterexpand.py")
scripts.append("TemaLib/tema/variablemodels/variablemodelcreator.py")
scripts.append("TemaLib/tema/testengine/testengine.py")
return scripts
def get_packages(start_path):
for root,dirs,files in os.walk(start_path):
for filename in files:
if filename == "__init__.py":
yield root.split(start_path + os.sep,1)[1].replace("/",".")
def get_manpages():
man_pages = glob.glob("Docs/man/man1/*.1")
return man_pages
packages_list = list(get_packages("TemaLib"))
if "ToolProxy" in packages_list:
packages_list.remove("ToolProxy")
scripts_list = get_scripts()
manpages_list = get_manpages()
setup(name='tema-tg',
provides=['tema',],
license=LICENCE,
version=VERSION,
description='TEMA Test engine',
author="Tampere University of Technology, Department of Software Systems",
author_email='teams@cs.tut.fi',
url='http://tema.cs.tut.fi',
package_dir = {"" : "TemaLib" },
data_files=[('share/man/man1', manpages_list),
('lib/tema-tg/gui_interface',['gui_interface/tema.start_engine','gui_interface/tema.list_sessions','gui_interface/tema.kill_engine'])],
packages=packages_list,
package_data={"tema.modelutils" : ["pcrules/Generic*","makefiles/GNUmakefile*"]},
scripts=scripts_list,
cmdclass={"build_scripts" : build_scripts_add_extension, "bdist_wininst" : bdist_wininst_extended },
distclass=Distribution_extended,
)
| {
"content_hash": "067336da631da0e0eab7902d9a8fc872",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 153,
"avg_line_length": 35.294117647058826,
"alnum_prop": 0.6420833333333333,
"repo_name": "tema-mbt/tema-tg",
"id": "04b5040c89df07be8b9a56f4ed4770a153e91493",
"size": "5964",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "924079"
},
{
"name": "Shell",
"bytes": "2195"
},
{
"name": "TeX",
"bytes": "20563"
}
],
"symlink_target": ""
} |
import unittest
import sys, os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)))
import src.parser
class ParserTestCase(unittest.TestCase):
""" Tests for parser methods """
# --------------------------------------------------
# TESTS for parser() method
# --------------------------------------------------
def test_parser_method_with_variable_decleration(self):
# Create parser instance to test on
parserObject = src.parser.Parser([['DATATYPE', 'int'], ['IDENTIFIER', 'a'], ['OPERATOR', '='], ['INTEGER', '11'], ['STATEMENT_END', ';']]).parse()
# Perform comparison
self.assertEqual( parserObject, {'VariableDecleration': [{'type': 'int'}, {'name': 'a'}, {'value': '11'}]} )
if __name__ == '__main__':
unittest.main() | {
"content_hash": "d4822134f4774d81be48f4682b4cceaf",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 154,
"avg_line_length": 38.22727272727273,
"alnum_prop": 0.5386444708680143,
"repo_name": "RyanMaugin/Tachyon",
"id": "dad5db571fc70a010aad63b9669366887bc13738",
"size": "949",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/parser_tests.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "85327"
}
],
"symlink_target": ""
} |
"""Statistics napp package."""
| {
"content_hash": "9b64c97cf216cbe4446235a495bcace4",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 30,
"avg_line_length": 31,
"alnum_prop": 0.6774193548387096,
"repo_name": "kytos/kyco-core-napps",
"id": "a0c699349ab169c5a3fbc77391215c43da8be996",
"size": "31",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "napps/kytos/of_stats/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "73608"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, print_function
import frappe
import hashlib
from frappe.model.db_schema import DbManager
from frappe.installer import get_root_connection
from frappe.database import Database
import os, subprocess
from bs4 import BeautifulSoup
import jinja2.exceptions
import io
def sync():
# make table
print('Syncing help database...')
help_db = HelpDatabase()
help_db.make_database()
help_db.connect()
help_db.make_table()
help_db.sync_pages()
help_db.build_index()
@frappe.whitelist()
def get_help(text):
return HelpDatabase().search(text)
@frappe.whitelist()
def get_installed_app_help(text):
return HelpDatabase().app_docs_search(text)
@frappe.whitelist()
def get_help_content(path):
return HelpDatabase().get_content(path)
def get_improve_page_html(app_name, target):
docs_config = frappe.get_module(app_name + ".config.docs")
source_link = docs_config.source_link
branch = getattr(docs_config, "branch", "develop")
html = '''<div class="page-container">
<div class="page-content">
<div class="edit-container text-center">
<i class="fa fa-smile text-muted"></i>
<a class="edit text-muted" href="{source_link}/blob/{branch}/{target}">
Improve this page
</a>
</div>
</div>
</div>'''.format(source_link=source_link, app_name=app_name, target=target, branch=branch)
return html
class HelpDatabase(object):
def __init__(self):
self.global_help_setup = frappe.conf.get('global_help_setup')
if self.global_help_setup:
bench_name = os.path.basename(os.path.abspath(frappe.get_app_path('frappe')).split('/apps/')[0])
self.help_db_name = hashlib.sha224(bench_name.encode('utf-8')).hexdigest()[:15]
def make_database(self):
'''make database for global help setup'''
if not self.global_help_setup:
return
dbman = DbManager(get_root_connection())
dbman.drop_database(self.help_db_name)
# make database
if not self.help_db_name in dbman.get_database_list():
try:
dbman.create_user(self.help_db_name, self.help_db_name)
except Exception as e:
# user already exists
if e.args[0] != 1396: raise
dbman.create_database(self.help_db_name)
dbman.grant_all_privileges(self.help_db_name, self.help_db_name)
dbman.flush_privileges()
def connect(self):
if self.global_help_setup:
self.db = Database(user=self.help_db_name, password=self.help_db_name)
else:
self.db = frappe.db
def make_table(self):
if not 'help' in self.db.get_tables():
self.db.sql('''create table help(
path varchar(255),
content text,
title text,
intro text,
full_path text,
fulltext(title),
fulltext(content),
index (path))
COLLATE=utf8mb4_unicode_ci
ENGINE=MyISAM
CHARACTER SET=utf8mb4''')
def search(self, words):
self.connect()
return self.db.sql('''
select title, intro, path from help where title like %s union
select title, intro, path from help where match(content) against (%s) limit 10''', ('%'+words+'%', words))
def app_docs_search(self, words):
self.connect()
frappe_path = '%' + 'apps/frappe' + '%'
return self.db.sql('''
select
title, intro, full_path
from
help
where
title like %s
and
full_path not like %s
union
select
title, intro, full_path
from
help
where
match(content) against (%s)
and
full_path not like %s
limit
10
''', ('%'+words+'%', frappe_path, words, frappe_path))
def get_content(self, path):
self.connect()
query = '''select title, content from help
where path like "{path}%" order by path desc limit 1'''
result = None
if not path.endswith('index'):
result = self.db.sql(query.format(path=os.path.join(path, 'index')))
if not result:
result = self.db.sql(query.format(path=path))
return {'title':result[0][0], 'content':result[0][1]} if result else {}
def sync_pages(self):
self.db.sql('truncate help')
doc_contents = '<ol>'
apps = os.listdir('../apps') if self.global_help_setup else frappe.get_installed_apps()
for app in apps:
# Expect handling of cloning docs apps in bench
docs_app = frappe.get_hooks('docs_app', app, app)[0]
web_folder = 'www/' if docs_app != app else ''
docs_folder = '../apps/{docs_app}/{docs_app}/{web_folder}docs/user'.format(
docs_app=docs_app, web_folder=web_folder)
self.out_base_path = '../apps/{docs_app}/{docs_app}/{web_folder}docs'.format(
docs_app=docs_app, web_folder=web_folder)
if os.path.exists(docs_folder):
app_name = getattr(frappe.get_module(app), '__title__', None) or app.title()
doc_contents += '<li><a data-path="/{app}/index">{app_name}</a></li>'.format(
app=app, app_name=app_name)
for basepath, folders, files in os.walk(docs_folder):
files = self.reorder_files(files)
for fname in files:
if fname.rsplit('.', 1)[-1] in ('md', 'html'):
fpath = os.path.join(basepath, fname)
with io.open(fpath, 'r', encoding = 'utf-8') as f:
try:
content = frappe.render_template(f.read(),
{'docs_base_url': '/assets/{docs_app}_docs'.format(docs_app=docs_app)})
relpath = self.get_out_path(fpath)
relpath = relpath.replace("user", app)
content = frappe.utils.md_to_html(content)
title = self.make_title(basepath, fname, content)
intro = self.make_intro(content)
content = self.make_content(content, fpath, relpath, app, docs_app)
self.db.sql('''insert into help(path, content, title, intro, full_path)
values (%s, %s, %s, %s, %s)''', (relpath, content, title, intro, fpath))
except jinja2.exceptions.TemplateSyntaxError:
print("Invalid Jinja Template for {0}. Skipping".format(fpath))
doc_contents += "</ol>"
self.db.sql('''insert into help(path, content, title, intro, full_path) values (%s, %s, %s, %s, %s)''',
('/documentation/index', doc_contents, 'Documentation', '', ''))
def make_title(self, basepath, filename, html):
if '<h1>' in html:
title = html.split("<h1>", 1)[1].split("</h1>", 1)[0]
elif 'index' in filename:
title = basepath.rsplit('/', 1)[-1].title().replace("-", " ")
else:
title = filename.rsplit('.', 1)[0].title().replace("-", " ")
return title
def make_intro(self, html):
intro = ""
if '<p>' in html:
intro = html.split('<p>', 1)[1].split('</p>', 1)[0]
if 'Duration' in html:
intro = "Help Video: " + intro
return intro
def make_content(self, html, path, relpath, app_name, doc_app):
if '<h1>' in html:
html = html.split('</h1>', 1)[1]
if '{next}' in html:
html = html.replace('{next}', '')
soup = BeautifulSoup(html, 'html.parser')
self.fix_links(soup, app_name)
self.fix_images(soup, doc_app)
parent = self.get_parent(relpath)
if parent:
parent_tag = soup.new_tag('a')
parent_tag.string = parent['title']
parent_tag['class'] = 'parent-link'
parent_tag['data-path'] = parent['path']
soup.find().insert_before(parent_tag)
return soup.prettify()
def fix_links(self, soup, app_name):
for link in soup.find_all('a'):
if link.has_attr('href'):
url = link['href']
if '/user' in url:
data_path = url[url.index('/user'):]
if '.' in data_path:
data_path = data_path[: data_path.rindex('.')]
if data_path:
link['data-path'] = data_path.replace("user", app_name)
def fix_images(self, soup, app_name):
for img in soup.find_all('img'):
if img.has_attr('src'):
url = img['src']
if '/docs/' in url:
img['src'] = url.replace('/docs/', '/assets/{0}_docs/'.format(app_name))
def build_index(self):
for data in self.db.sql('select path, full_path, content from help'):
self.make_index(data[0], data[1], data[2])
def make_index(self, original_path, full_path, content):
'''Make index from index.txt'''
if '{index}' in content:
path = os.path.dirname(full_path)
files = []
# get files from index.txt
index_path = os.path.join(path, "index.txt")
if os.path.exists(index_path):
with open(index_path, 'r') as f:
files = f.read().splitlines()
# files not in index.txt
for f in os.listdir(path):
if not os.path.isdir(os.path.join(path, f)) and len(f.rsplit('.', 1)) == 2:
name, extn = f.rsplit('.', 1)
if name not in files \
and name != 'index' and extn in ('md', 'html'):
files.append(name)
links_html = "<ol class='index-links'>"
for line in files:
fpath = os.path.join(os.path.dirname(original_path), line)
title = self.db.sql('select title from help where path like %s',
os.path.join(fpath, 'index') + '%')
if not title:
title = self.db.sql('select title from help where path like %s',
fpath + '%')
if title:
title = title[0][0]
links_html += "<li><a data-path='{fpath}'> {title} </a></li>".format(
fpath=fpath, title=title)
# else:
# bad entries in .txt files
# print fpath
links_html += "</ol>"
html = content.replace('{index}', links_html)
self.db.sql('update help set content=%s where path=%s', (html, original_path))
def get_out_path(self, path):
return '/' + os.path.relpath(path, self.out_base_path)
def get_parent(self, child_path):
if 'index' in child_path:
child_path = child_path[: child_path.rindex('index')]
if child_path[-1] == '/':
child_path = child_path[:-1]
child_path = child_path[: child_path.rindex('/')]
out = None
if child_path:
parent_path = child_path + "/index"
out = self.get_content(parent_path)
#if parent is documentation root
else:
parent_path = "/documentation/index"
out = {}
out['title'] = "Documentation"
if not out:
return None
out['path'] = parent_path
return out
def reorder_files(self, files):
pos = 0
if 'index.md' in files:
pos = files.index('index.md')
elif 'index.html' in files:
pos = files.index('index.html')
if pos:
files[0], files[pos] = files[pos], files[0]
return files
def setup_apps_for_docs(app):
docs_app = frappe.get_hooks('docs_app', app, app)[0]
if docs_app and not os.path.exists(frappe.get_app_path(app)):
print("Getting {docs_app} required by {app}".format(docs_app=docs_app, app=app))
subprocess.check_output(['bench', 'get-app', docs_app], cwd = '..')
else:
if docs_app:
print("{docs_app} required by {app} already present".format(docs_app=docs_app, app=app))
| {
"content_hash": "978a1c94c00a244ea7da1baac328dfcc",
"timestamp": "",
"source": "github",
"line_count": 343,
"max_line_length": 109,
"avg_line_length": 30.21865889212828,
"alnum_prop": 0.6359864930053063,
"repo_name": "ESS-LLP/frappe",
"id": "3a5bc5b308029b5e310508d34b14012158e5f105",
"size": "10493",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "frappe/utils/help.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "443869"
},
{
"name": "HTML",
"bytes": "198694"
},
{
"name": "JavaScript",
"bytes": "1966520"
},
{
"name": "Makefile",
"bytes": "99"
},
{
"name": "Python",
"bytes": "2281931"
},
{
"name": "Shell",
"bytes": "517"
}
],
"symlink_target": ""
} |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent603xA import *
class agilent6032A(agilent603xA):
"Agilent 6032A IVI DC power supply driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '6032A')
super(agilent6032A, self).__init__(*args, **kwargs)
self._output_count = 1
self._output_spec = [
{
'range': {
'P60V': (61.425, 51.1875)
},
'ovp_max': 64.0,
'voltage_max': 61.425,
'current_max': 51.1875
}
]
| {
"content_hash": "8a2a40a08237d5bb95bfc9755442b890",
"timestamp": "",
"source": "github",
"line_count": 50,
"max_line_length": 77,
"avg_line_length": 34.78,
"alnum_prop": 0.6716503737780334,
"repo_name": "getzze/python-ivi",
"id": "8ebc12dccaf137a670b4396b39ff7143bae2e595",
"size": "1739",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "ivi/agilent/agilent6032A.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1739388"
}
],
"symlink_target": ""
} |
import platform
import os
class Constants:
# os
WINDOWS = "Windows"
LINUX = "Linux"
MACOS = "TODO"
OS = platform.system()
# file system
EXPORTER_DIR = ""
EXPORTER_CONFIG_FILE = ""
QTCREATOR_CONFIG_DIR = ""
QTCREATOR_CONFIG_FILE = ""
# sync config file
SYNC_CONFIG_FILE = ""
# config ini file categories
CONFIG_INI_FILE_CATEGORIES = []
@staticmethod
def init():
Constants.SYNC_CONFIG_FILE = os.path.dirname(__file__) + '/../config.json'
Constants.EXPORTER_DIR = os.path.dirname(os.path.realpath(__file__)) + "/../exported"
Constants.EXPORTER_CONFIG_FILE = Constants.EXPORTER_DIR + "/QtCreator.ini"
# os specific storage paths
if Constants.OS == Constants.WINDOWS:
Constants.QTCREATOR_CONFIG_DIR = os.environ["APPDATA"] + "\\QtProject\\"
Constants.QTCREATOR_CONFIG_FILE = Constants.QTCREATOR_CONFIG_DIR + "QtCreator.ini"
if Constants.OS == Constants.MACOS:
print("TODO")
if Constants.OS == Constants.LINUX:
print("TODO")
| {
"content_hash": "d661792640e26210f43f8dabcbd787d4",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 94,
"avg_line_length": 27.425,
"alnum_prop": 0.6071103008204193,
"repo_name": "assemblical/qtcreator_settings_sync",
"id": "c4becd82910f351cd947c7f9f6c60adcae8b585f",
"size": "1097",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lib/Contants.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3166"
}
],
"symlink_target": ""
} |
from msrest.serialization import Model
class ManagementEventAggregationCondition(Model):
"""How the data that is collected should be combined over time.
:param operator: the condition operator. Possible values include:
'GreaterThan', 'GreaterThanOrEqual', 'LessThan', 'LessThanOrEqual'
:type operator: str or :class:`ConditionOperator
<azure.mgmt.monitor.models.ConditionOperator>`
:param threshold: The threshold value that activates the alert.
:type threshold: float
:param window_size: the period of time (in ISO 8601 duration format) that
is used to monitor alert activity based on the threshold. If specified
then it must be between 5 minutes and 1 day.
:type window_size: timedelta
"""
_attribute_map = {
'operator': {'key': 'operator', 'type': 'ConditionOperator'},
'threshold': {'key': 'threshold', 'type': 'float'},
'window_size': {'key': 'windowSize', 'type': 'duration'},
}
def __init__(self, operator=None, threshold=None, window_size=None):
self.operator = operator
self.threshold = threshold
self.window_size = window_size
| {
"content_hash": "ca35801e07c4661f9cf1014344286c50",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 77,
"avg_line_length": 41.214285714285715,
"alnum_prop": 0.6819757365684576,
"repo_name": "SUSE/azure-sdk-for-python",
"id": "1273fcd1483a200298f580dc7026cfeb91ea5b51",
"size": "1628",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "azure-mgmt-monitor/azure/mgmt/monitor/models/management_event_aggregation_condition.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "9090161"
}
],
"symlink_target": ""
} |
import os
import boto3
import pytest
import simplejson as json
from _pytest.fixtures import fixture
from botocore.exceptions import ClientError
from moto import mock_s3
from lambda_tools import Configuration
from reactor.utils import formatters
from test.data.sample_event_data import KNOWN_VALID_EVENTS, build_event_json_array
@pytest.fixture(scope="session", autouse=True)
def set_env_vars():
# when using localstack default/dummy values for these must be set so they can be ignored
os.environ['AWS_ACCESS_KEY_ID'] = 'NONE'
os.environ['AWS_SECRET_ACCESS_KEY'] = 'NONE'
os.environ['AWS_DEFAULT_REGION'] = 'us-east-1'
# all operations happen in the test namespace with the test config
os.environ['NAMESPACE'] = 'test'
os.environ['CONFIG'] = 'cz-test-config' # used as reactor bucket name
@pytest.fixture()
def event_data():
event_json = build_event_json_array(KNOWN_VALID_EVENTS)
return json.loads(event_json,
parse_float=str,
object_hook=formatters.dict_clean)
@pytest.fixture()
def provisioned_throughput_exceeded_exception():
return ClientError(
{'Error': {'Code': 102, 'Message': 'ProvisionedThroughputExceededException'}},
operation_name="DoSomething")
@pytest.fixture()
def aws_client_exception():
return ClientError(
{'Error': {'Code': 102, 'Message': 'JustSomeRandomException'}},
operation_name="DoSomething")
@pytest.fixture()
def aws_client_permissions_exception():
return ClientError({'Error': {'Code': 102, 'Message': 'AccessDenied: Let Erik try it'}},
operation_name="DoSomething")
@fixture(scope="function")
def lambda_environment():
os.environ['CONFIG'] = "TEST-ENV-NO-ACCOUNT-1234567890"
os.environ['LOG_LEVEL'] = "DEBUG"
yield os.environ
del os.environ['CONFIG']
del os.environ['LOG_LEVEL']
@fixture(scope="function")
def configuration_bucket_no_file(lambda_environment):
with mock_s3():
s3 = boto3.resource('s3')
bucket = s3.create_bucket(Bucket=lambda_environment['CONFIG'])
yield bucket
@fixture(scope="function")
def configuration_bucket_and_file(lambda_environment):
with mock_s3():
s3 = boto3.resource('s3')
bucket = s3.create_bucket(Bucket=lambda_environment['CONFIG'])
test_configuration_file_path = f"{os.path.dirname(__file__)}/data/sample_{Configuration.CONFIG_FILE}"
s3.Object(lambda_environment['CONFIG'],
Configuration.CONFIG_FILE).put(Body=open(test_configuration_file_path, 'rb'))
yield bucket
| {
"content_hash": "edf0ed98e335311dcadc80b7d64bf80d",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 109,
"avg_line_length": 32.55,
"alnum_prop": 0.6816436251920123,
"repo_name": "Cloudzero/cloudzero-reactor-aws",
"id": "a0fde433fb938bdf467d9e43b6b4d055c6a221ea",
"size": "2802",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "test/conftest.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "5349"
},
{
"name": "Python",
"bytes": "803664"
}
],
"symlink_target": ""
} |
from itertools import count
import json
import pytest
from devp2p.peermanager import PeerManager
from ethereum import tester
from ethereum.ethpow import mine
import ethereum.keys
import ethereum.config
from ethereum.slogging import get_logger
from pyethapp.accounts import Account, AccountsService, mk_random_privkey
from pyethapp.app import EthApp
from pyethapp.config import update_config_with_defaults, get_default_config
from pyethapp.db_service import DBService
from pyethapp.eth_service import ChainService
from pyethapp.jsonrpc import JSONRPCServer, quantity_encoder, address_encoder, data_decoder, \
data_encoder
from pyethapp.pow_service import PoWService
# reduce key derivation iterations
ethereum.keys.PBKDF2_CONSTANTS['c'] = 100
log = get_logger('test.jsonrpc')
# EVM code corresponding to the following solidity code:
#
# contract LogTest {
# event Log();
#
# function () {
# Log();
# }
# }
#
# (compiled with online Solidity compiler at https://chriseth.github.io/browser-solidity/ version
# 0.1.1-34172c3b/RelWithDebInfo-Emscripten/clang/int)
LOG_EVM = ('606060405260448060116000396000f30060606040523615600d57600d565b60425b7f5e7df75d54'
'e493185612379c616118a4c9ac802de621b010c96f74d22df4b30a60405180905060405180910390'
'a15b565b00').decode('hex')
from pyethapp.jsonrpc import Compilers
solidity_code = "contract test { function multiply(uint a) returns(uint d) { return a * 7; } }"
def test_compileSolidity():
from pyethapp.jsonrpc import Compilers, data_encoder
import ethereum._solidity
s = ethereum._solidity.get_solidity()
if s == None:
pytest.xfail("solidity not installed, not tested")
else:
c = Compilers()
bc = s.compile(solidity_code)
abi = s.mk_full_signature(solidity_code)
A = dict(test=dict(code=data_encoder(bc),
info=dict(source=solidity_code,
language='Solidity',
languageVersion='0',
compilerVersion='0',
abiDefinition=abi,
userDoc=dict(methods=dict()),
developerDoc=dict(methods=dict()),
)
)
)
B = c.compileSolidity(solidity_code)
assert A.keys() == B.keys()
At = A['test']
Bt = B['test']
assert At['code'] == Bt['code']
for k, Av in At['info'].items():
if k == 'compilerVersion':
continue
assert Av == Bt['info'][k]
@pytest.mark.skipif('solidity' not in Compilers().compilers, reason="solidity compiler not available")
def test_compileSolidity_2():
result = Compilers().compileSolidity(solidity_code)
assert set(result.keys()) == {'test'}
assert set(result['test'].keys()) == {'info', 'code'}
assert set(result['test']['info']) == {
'language', 'languageVersion', 'abiDefinition', 'source',
'compilerVersion', 'developerDoc', 'userDoc'
}
@pytest.fixture
def test_app(request, tmpdir):
class TestApp(EthApp):
def start(self):
super(TestApp, self).start()
log.debug('adding test accounts')
# high balance account
self.services.accounts.add_account(Account.new('', tester.keys[0]), store=False)
# low balance account
self.services.accounts.add_account(Account.new('', tester.keys[1]), store=False)
# locked account
locked_account = Account.new('', tester.keys[2])
locked_account.lock()
self.services.accounts.add_account(locked_account, store=False)
assert set(acct.address for acct in self.services.accounts) == set(tester.accounts[:3])
def mine_next_block(self):
"""Mine until a valid nonce is found.
:returns: the new head
"""
log.debug('mining next block')
block = self.services.chain.chain.head_candidate
delta_nonce = 10**6
for start_nonce in count(0, delta_nonce):
bin_nonce, mixhash = mine(block.number, block.difficulty, block.mining_hash,
start_nonce=start_nonce, rounds=delta_nonce)
if bin_nonce:
break
self.services.pow.recv_found_nonce(bin_nonce, mixhash, block.mining_hash)
log.debug('block mined')
return self.services.chain.chain.head
def rpc_request(self, method, *args):
"""Simulate an incoming JSON RPC request and return the result.
Example::
>>> assert test_app.rpc_request('eth_getBalance', '0x' + 'ff' * 20) == '0x0'
"""
log.debug('simulating rpc request', method=method)
method = self.services.jsonrpc.dispatcher.get_method(method)
res = method(*args)
log.debug('got response', response=res)
return res
# genesis block with reduced difficulty, increased gas limit, and allocations to test accounts
genesis_block = {
"nonce": "0x0000000000000042",
"difficulty": "0x1",
"alloc": {
tester.accounts[0].encode('hex'): {'balance': 10**24},
tester.accounts[1].encode('hex'): {'balance': 1},
tester.accounts[2].encode('hex'): {'balance': 10**24},
},
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x0000000000000000000000000000000000000000",
"timestamp": "0x00",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"extraData": "0x",
"gasLimit": "0x2fefd8"
}
genesis_block_file = tmpdir.join('test_genesis_block.json')
genesis_block_file.write(json.dumps(genesis_block))
config = {
'data_dir': str(tmpdir),
'db': {'implementation': 'EphemDB'},
'pow': {'activated': False},
'p2p': {
'min_peers': 0,
'max_peers': 0,
'listen_port': 29873
},
'node': {'privkey_hex': mk_random_privkey().encode('hex')},
'discovery': {
'boostrap_nodes': [],
'listen_port': 29873
},
'eth': {'genesis': str(genesis_block_file), 'block': ethereum.config.default_config},
'jsonrpc': {'listen_port': 29873}
}
services = [DBService, AccountsService, PeerManager, ChainService, PoWService, JSONRPCServer]
update_config_with_defaults(config, get_default_config([TestApp] + services))
app = TestApp(config)
for service in services:
service.register_with_app(app)
def fin():
log.debug('stopping test app')
app.stop()
request.addfinalizer(fin)
log.debug('starting test app')
app.start()
return app
@pytest.mark.xfail # sender has not funds
def test_send_transaction(test_app):
chain = test_app.services.chain.chain
assert chain.head_candidate.get_balance('\xff' * 20) == 0
sender = test_app.services.accounts.unlocked_accounts()[0].address
assert chain.head_candidate.get_balance(sender) > 0
tx = {
'from': address_encoder(sender),
'to': address_encoder('\xff' * 20),
'value': quantity_encoder(1)
}
tx_hash = data_decoder(test_app.rpc_request('eth_sendTransaction', tx))
assert tx_hash == chain.head_candidate.get_transaction(0).hash
assert chain.head_candidate.get_balance('\xff' * 20) == 1
test_app.mine_next_block()
assert tx_hash == chain.head.get_transaction(0).hash
assert chain.head.get_balance('\xff' * 20) == 1
# send transactions from account which can't pay gas
tx['from'] = address_encoder(test_app.services.accounts.unlocked_accounts()[1].address)
tx_hash = data_decoder(test_app.rpc_request('eth_sendTransaction', tx))
assert chain.head_candidate.get_transactions() == []
@pytest.mark.skipif(True, reason='must timeout if it fails')
def test_pending_transaction_filter(test_app):
filter_id = test_app.rpc_request('eth_newPendingTransactionFilter')
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
tx = {
'from': address_encoder(test_app.services.accounts.unlocked_accounts()[0].address),
'to': address_encoder('\xff' * 20)
}
def test_sequence(s):
tx_hashes = []
for c in s:
if c == 't':
tx_hashes.append(test_app.rpc_request('eth_sendTransaction', tx))
elif c == 'b':
test_app.mine_next_block()
else:
assert False
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == tx_hashes
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
sequences = [
't',
'b',
'ttt',
'tbt',
'ttbttt',
'bttbtttbt',
'bttbtttbttbb',
]
map(test_sequence, sequences)
@pytest.mark.skipif(True, reason='must timeout if it fails')
def test_new_block_filter(test_app):
filter_id = test_app.rpc_request('eth_newBlockFilter')
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
h = test_app.mine_next_block().hash
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == [data_encoder(h)]
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
hashes = [data_encoder(test_app.mine_next_block().hash) for i in range(3)]
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == hashes
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
assert test_app.rpc_request('eth_getFilterChanges', filter_id) == []
@pytest.mark.skipif(True, reason='must timeout if it fails')
def test_get_logs(test_app):
test_app.mine_next_block() # start with a fresh block
n0 = test_app.services.chain.chain.head.number
sender = address_encoder(test_app.services.accounts.unlocked_accounts()[0].address)
contract_creation = {
'from': sender,
'data': data_encoder(LOG_EVM)
}
tx_hash = test_app.rpc_request('eth_sendTransaction', contract_creation)
test_app.mine_next_block()
receipt = test_app.rpc_request('eth_getTransactionReceipt', tx_hash)
contract_address = receipt['contractAddress']
tx = {
'from': sender,
'to': contract_address
}
# single log in pending block
test_app.rpc_request('eth_sendTransaction', tx)
logs1 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'pending',
'toBlock': 'pending'
})
assert len(logs1) == 1
assert logs1[0]['type'] == 'pending'
assert logs1[0]['logIndex'] == None
assert logs1[0]['transactionIndex'] == None
assert logs1[0]['transactionHash'] == None
assert logs1[0]['blockHash'] == None
assert logs1[0]['blockNumber'] == None
assert logs1[0]['address'] == contract_address
logs2 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'pending',
'toBlock': 'pending'
})
assert logs2 == logs1
# same log, but now mined in head
test_app.mine_next_block()
logs3 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'latest',
'toBlock': 'latest'
})
assert len(logs3) == 1
assert logs3[0]['type'] == 'mined'
assert logs3[0]['logIndex'] == '0x0'
assert logs3[0]['transactionIndex'] == '0x0'
assert logs3[0]['blockHash'] == data_encoder(test_app.services.chain.chain.head.hash)
assert logs3[0]['blockNumber'] == quantity_encoder(test_app.services.chain.chain.head.number)
assert logs3[0]['address'] == contract_address
# another log in pending block
test_app.rpc_request('eth_sendTransaction', tx)
logs4 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'latest',
'toBlock': 'pending'
})
assert logs4 == [logs1[0], logs3[0]] or logs4 == [logs3[0], logs1[0]]
# two logs in pending block
test_app.rpc_request('eth_sendTransaction', tx)
logs5 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'pending',
'toBlock': 'pending'
})
assert len(logs5) == 2
assert logs5[0] == logs5[1] == logs1[0]
# two logs in head
test_app.mine_next_block()
logs6 = test_app.rpc_request('eth_getLogs', {
'fromBlock': 'latest',
'toBlock': 'pending'
})
for log in logs6:
assert log['type'] == 'mined'
assert log['logIndex'] == '0x0'
assert log['blockHash'] == data_encoder(test_app.services.chain.chain.head.hash)
assert log['blockNumber'] == quantity_encoder(test_app.services.chain.chain.head.number)
assert log['address'] == contract_address
assert sorted([log['transactionIndex'] for log in logs6]) == ['0x0', '0x1']
# everything together with another log in pending block
test_app.rpc_request('eth_sendTransaction', tx)
logs7 = test_app.rpc_request('eth_getLogs', {
'fromBlock': quantity_encoder(n0),
'toBlock': 'pending'
})
assert sorted(logs7) == sorted(logs3 + logs6 + logs1)
@pytest.mark.skipif(True, reason='must timeout if it fails')
def test_get_filter_changes(test_app):
test_app.mine_next_block() # start with a fresh block
n0 = test_app.services.chain.chain.head.number
sender = address_encoder(test_app.services.accounts.unlocked_accounts()[0].address)
contract_creation = {
'from': sender,
'data': data_encoder(LOG_EVM)
}
tx_hash = test_app.rpc_request('eth_sendTransaction', contract_creation)
test_app.mine_next_block()
receipt = test_app.rpc_request('eth_getTransactionReceipt', tx_hash)
contract_address = receipt['contractAddress']
tx = {
'from': sender,
'to': contract_address
}
pending_filter_id = test_app.rpc_request('eth_newFilter', {
'fromBlock': 'pending',
'toBlock': 'pending'
})
latest_filter_id = test_app.rpc_request('eth_newFilter', {
'fromBlock': 'latest',
'toBlock': 'latest'
})
tx_hashes = []
logs = []
# tx in pending block
tx_hashes.append(test_app.rpc_request('eth_sendTransaction', tx))
logs.append(test_app.rpc_request('eth_getFilterChanges', pending_filter_id))
assert len(logs[-1]) == 1
assert logs[-1][0]['type'] == 'pending'
assert logs[-1][0]['logIndex'] == None
assert logs[-1][0]['transactionIndex'] == None
assert logs[-1][0]['transactionHash'] == None
assert logs[-1][0]['blockHash'] == None
assert logs[-1][0]['blockNumber'] == None
assert logs[-1][0]['address'] == contract_address
pending_log = logs[-1][0]
logs.append(test_app.rpc_request('eth_getFilterChanges', pending_filter_id))
assert logs[-1] == []
logs.append(test_app.rpc_request('eth_getFilterChanges', latest_filter_id))
assert logs[-1] == []
test_app.mine_next_block()
logs.append(test_app.rpc_request('eth_getFilterChanges', latest_filter_id))
assert len(logs[-1]) == 1 # log from before, but now mined
assert logs[-1][0]['type'] == 'mined'
assert logs[-1][0]['logIndex'] == '0x0'
assert logs[-1][0]['transactionIndex'] == '0x0'
assert logs[-1][0]['transactionHash'] == tx_hashes[-1]
assert logs[-1][0]['blockHash'] == data_encoder(test_app.services.chain.chain.head.hash)
assert logs[-1][0]['blockNumber'] == quantity_encoder(test_app.services.chain.chain.head.number)
assert logs[-1][0]['address'] == contract_address
logs_in_range = [logs[-1][0]]
# send tx and mine block
tx_hashes.append(test_app.rpc_request('eth_sendTransaction', tx))
test_app.mine_next_block()
logs.append(test_app.rpc_request('eth_getFilterChanges', pending_filter_id))
assert len(logs[-1]) == 1
assert logs[-1][0]['type'] == 'mined'
assert logs[-1][0]['logIndex'] == '0x0'
assert logs[-1][0]['transactionIndex'] == '0x0'
assert logs[-1][0]['transactionHash'] == tx_hashes[-1]
assert logs[-1][0]['blockHash'] == data_encoder(test_app.services.chain.chain.head.hash)
assert logs[-1][0]['blockNumber'] == quantity_encoder(test_app.services.chain.chain.head.number)
assert logs[-1][0]['address'] == contract_address
logs_in_range.append(logs[-1][0])
logs.append(test_app.rpc_request('eth_getFilterChanges', latest_filter_id))
assert logs[-1] == logs[-2] # latest and pending filter see same (mined) log
logs.append(test_app.rpc_request('eth_getFilterChanges', latest_filter_id))
assert logs[-1] == []
test_app.mine_next_block()
logs.append(test_app.rpc_request('eth_getFilterChanges', pending_filter_id))
assert logs[-1] == []
range_filter_id = test_app.rpc_request('eth_newFilter', {
'fromBlock': quantity_encoder(test_app.services.chain.chain.head.number - 3),
'toBlock': 'pending'
})
tx_hashes.append(test_app.rpc_request('eth_sendTransaction', tx))
logs.append(test_app.rpc_request('eth_getFilterChanges', range_filter_id))
assert sorted(logs[-1]) == sorted(logs_in_range + [pending_log])
| {
"content_hash": "67b30745f32ae7e105492ba9a4ab9364",
"timestamp": "",
"source": "github",
"line_count": 446,
"max_line_length": 102,
"avg_line_length": 38.699551569506724,
"alnum_prop": 0.6189455388180765,
"repo_name": "vaporry/pyethapp",
"id": "2c1b1ae5cf73a2a15a2840a6c87a7dd0e18ffd7c",
"size": "17260",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pyethapp/tests/test_jsonrpc.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1700"
},
{
"name": "Python",
"bytes": "232955"
}
],
"symlink_target": ""
} |
from accelerator.tests.factories import (
CriterionFactory,
CriterionOptionSpecFactory,
)
from accelerator.tests.contexts.judge_feedback_context import (
JudgeFeedbackContext,
)
from accelerator.models import JUDGING_FEEDBACK_STATUS_COMPLETE
import swapper
JudgeApplicationFeedback = swapper.load_model(
'accelerator', 'JudgeApplicationFeedback')
class AnalyzeJudgingContext(JudgeFeedbackContext):
def __init__(self,
type="reads",
name="reads",
read_count=1,
options=[""],
is_active=True,
judge_capacity=10,
add_application=True):
super().__init__(is_active=is_active,
judge_capacity=judge_capacity)
self.read_count = read_count
self.options = options
self.feedback.feedback_status = JUDGING_FEEDBACK_STATUS_COMPLETE
self.feedback.save()
if add_application:
self.add_application()
self.criterion = CriterionFactory(type=type,
name=name,
judging_round=self.judging_round)
self.option_specs = [CriterionOptionSpecFactory(
criterion=self.criterion,
count=read_count,
option=option) for option in options]
def needed_reads(self):
return self.total_reads_required() - self.feedback_count()
def total_reads_required(self):
return self.read_count * len(self.applications)
def feedback_count(self):
counts = [JudgeApplicationFeedback.objects.filter(
application=app,
feedback_status=JUDGING_FEEDBACK_STATUS_COMPLETE).count()
for app in self.applications]
return sum([min(self.read_count, count)
for count in counts])
| {
"content_hash": "941369d3616994f430cf1db1fe1b858f",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 75,
"avg_line_length": 34.75925925925926,
"alnum_prop": 0.6014917421417155,
"repo_name": "masschallenge/django-accelerator",
"id": "fe34c6ab0e48c0f327ad940bc6ab7fd965c5e09e",
"size": "1877",
"binary": false,
"copies": "1",
"ref": "refs/heads/development",
"path": "accelerator/tests/contexts/analyze_judging_context.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "1848"
},
{
"name": "Makefile",
"bytes": "6817"
},
{
"name": "Python",
"bytes": "996767"
},
{
"name": "Shell",
"bytes": "2453"
}
],
"symlink_target": ""
} |
import requests
from collections import OrderedDict
from cattle import from_env
URLS = OrderedDict()
URLS['Stable'] = 'http://stable.release.core-os.net/amd64-usr/'
URLS['Beta'] = 'http://beta.release.core-os.net/amd64-usr/'
URLS['Alpha'] = 'http://alpha.release.core-os.net/amd64-usr/'
DIGEST = 'coreos_production_openstack_image.img.bz2.DIGESTS'
IMG = 'coreos_production_openstack_image.img.bz2'
def get_hash(base, version):
url = '{0}{1}/{2}'.format(base, version, DIGEST)
for line in requests.get(url).text.split('\n'):
parts = line.split(' ')
if len(parts) == 2 and len(parts[0]) == 40:
return parts[0], parts[1]
return None, None
def get_version(base):
url = base + 'current/version.txt'
data = {}
for line in requests.get(url).text.split('\n'):
parts = line.split('=', 1)
if len(parts) == 2:
data[parts[0]] = parts[1]
version = data.get('COREOS_VERSION_ID')
hash, file = get_hash(base, version)
return version, hash, '{0}{1}/{2}'.format(base, version, file)
def save_image(client, name, version, hash, url):
data = {
'uuid': 'coreos-{0}-{1}'.format(name.lower(), version),
'url': url,
'isPublic': True,
'checksum': hash,
'name': 'CoreOS {0} {1}'.format(name, version)
}
print 'Registering CoreOS', name, 'Image', version
images = client.list_image(uuid=data['uuid'])
if len(images) == 1:
client.update(images[0], **data)
else:
client.create_image(**data)
def create_images():
client = from_env()
for name, base in URLS.items():
version, hash, url = get_version(base)
if url is not None:
save_image(client, name, version, hash, url)
if __name__ == '__main__':
create_images()
| {
"content_hash": "a5c7a066e9e73d8beda6183bb2a4902b",
"timestamp": "",
"source": "github",
"line_count": 66,
"max_line_length": 66,
"avg_line_length": 27.46969696969697,
"alnum_prop": 0.595146166574738,
"repo_name": "ibuildthecloud/stampede",
"id": "f173d3dcdf224cf7abdf0c2c3c2045e57f205334",
"size": "1836",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "stampede/coreos_images.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "5422"
},
{
"name": "Shell",
"bytes": "17985"
}
],
"symlink_target": ""
} |
from typing import Callable
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
import base64
import copy
from requests import Response
DATE_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
account_sas_token = ""
storage_account_name = ""
class Client:
"""
API Client
"""
def __init__(self, server_url, verify, proxy, account_sas_token, storage_account_name, api_version):
self.ms_client = MicrosoftStorageClient(server_url, verify, proxy, account_sas_token, storage_account_name,
api_version)
def list_queues_request(self, limit: str = None, prefix: str = None, marker: str = None) -> str:
"""
List queues in Azure storage account.
Args:
limit (str): Number of queues to retrieve.
prefix (str): Filters the results to return only queues with names that begin with the specified prefix.
marker (str): Identifies the portion of the list to be returned.
Returns:
str: API xml response from Azure.
"""
params = assign_params(comp='list', maxresults=limit, prefix=prefix, marker=marker)
response = self.ms_client.http_request(method="GET", url_suffix='', params=params, resp_type="text")
return response
def create_queue_request(self, queue_name: str) -> Response:
"""
Create queue in storage account.
Args:
queue_name (str): New queue name.
Returns:
Response: API response from Azure.
"""
response = self.ms_client.http_request(method="PUT", url_suffix=f'/{queue_name}', return_empty_response=True)
return response
def delete_queue_request(self, queue_name: str) -> Response:
"""
Delete queue from storage account.
Args:
queue_name (str): New queue name.
Returns:
Response: API response from Azure.
"""
response = self.ms_client.http_request(method="DELETE", url_suffix=f'/{queue_name}', return_empty_response=True)
return response
def create_message_request(self, queue_name: str, xml_data: str,
visibility_time_out: int = None, expiration: int = None) -> str:
"""
Add a new message to the back of the message queue.
Args:
queue_name (str): Queue name.
xml_data (str): Request XML data.
visibility_time_out (int): Specifies the new visibility timeout value.
expiration (int): Specifies the time-to-live interval for the message, in seconds.
Returns:
str: API response from Azure.
"""
params = assign_params(messagettl=expiration, visibilitytimeout=visibility_time_out)
response = self.ms_client.http_request(method="POST", url_suffix=f'/{queue_name}/messages', params=params,
resp_type="text", data=xml_data)
return response
def get_messages_request(self, queue_name: str, limit: str = "1", visibility_time_out: int = 30) -> str:
"""
Retrieves messages from the front of the queue.
Retrieved messages will move to the end of the queue,and will be visible after 'visibility_time_out' argument.
Args:
limit (str): Number of messages to retrieve.
queue_name (str): Queue name.
visibility_time_out (int): Specifies the new visibility timeout value.
Returns:
str: API response from Azure.
"""
params = assign_params(numofmessages=limit, visibilitytimeout=visibility_time_out)
response = self.ms_client.http_request(method="GET", url_suffix=f'/{queue_name}/messages',
resp_type="text", params=params)
return response
def peek_messages_request(self, limit: str, queue_name: str) -> str:
"""
Retrieves messages from the front of the queue.
Args:
limit (str): Number of messages to retrieve
queue_name (str): Queue name.
Returns:
str: API response from Azure.
"""
params = assign_params(numofmessages=limit, peekonly="true")
response = self.ms_client.http_request(method="GET", url_suffix=f'/{queue_name}/messages',
resp_type="text", params=params)
return response
def delete_message_request(self, queue_name: str, message_id: str, pop_receipt: str) -> Response:
"""
Delete message from the queue.
Args:
queue_name (str): Queue name.
message_id (str): Message ID.
pop_receipt (str): Message ID pop-receipt.
Returns:
Response: API response from Azure.
"""
params = assign_params(popreceipt=pop_receipt.replace("+", "%2b"))
url_suffix = f'/{queue_name}/messages/{message_id}'
response = self.ms_client.http_request(method="DELETE", url_suffix=url_suffix,
params=params, return_empty_response=True)
return response
def update_message_request(self, queue_name: str, xml_data: str, message_id: str, pop_receipt: str,
visibility_time_out: str) -> Response:
"""
Update message in the queue.
Args:
queue_name (str): Queue name.
xml_data (str): Request XML data.
message_id (str): Updated message ID.
pop_receipt (str): Updated message ID pop-receipt.
visibility_time_out (str): Specifies the new visibility timeout value.
Returns:
Response: API response text from Azure.
"""
params = assign_params(popreceipt=pop_receipt.replace("+", "%2b"),
visibilitytimeout=visibility_time_out)
url_suffix = f'/{queue_name}/messages/{message_id}'
response = self.ms_client.http_request(method="PUT", url_suffix=url_suffix, params=params, data=xml_data,
return_empty_response=True)
return response
def clear_messages_request(self, queue_name: str) -> Response:
"""
Delete all messages from the queue.
Args:
queue_name (str): Queue name.
Returns:
Response: API response text from Azure.
"""
url_suffix = f'/{queue_name}/messages'
response = self.ms_client.http_request(method="DELETE", url_suffix=url_suffix, return_empty_response=True)
return response
def parse_xml_response(xml_string_response: str, tag_path: str = "", find_tag: bool = False) -> list:
"""
Parse Azure XML response.
Convert XML schema string to iterable list.
For example:
xml_string_response = Integration log: <?xml version="1.0" encoding="utf-8"?><QueueMessagesList>
<QueueMessage><MessageId>e90f5f60-7a02-4b0b-a522-04ca8f3a00b9</MessageId>
<InsertionTime>Thu, 14 Oct 2021 08:17:14 GMT</InsertionTime>
<ExpirationTime>Thu, 21 Oct 2021 08:17:14 GMT</ExpirationTime>
<DequeueCount>0</DequeueCount><MessageText>demo content</MessageText>
</QueueMessage>
</QueueMessagesList>
The return value will be:
[{'MessageId': 'e90f5f60-7a02-4b0b-a522-04ca8f3a00b9', 'InsertionTime': 'Thu, 14 Oct 2021 08:17:14 GMT',
'ExpirationTime': 'Thu, 21 Oct 2021 08:17:14 GMT', 'DequeueCount': '0', 'MessageText': 'demo content'}]
Args:
xml_string_response (str): XML response.
tag_path (str): XML target Tag.
find_tag (bool): Indicates parse operation type.
Returns:
list: XML iterable element.
"""
tree = ET.ElementTree(ET.fromstring(xml_string_response))
root = tree.getroot()
raw_response = []
if find_tag:
return root.findall(tag_path)
for message in root.iter(tag_path):
message_data = {}
for attribute in message:
message_data[attribute.tag] = attribute.text
raw_response.append(message_data)
return raw_response
def is_base_64(string: str) -> bool:
"""
Validate if string is base 64 encoded.
Args:
string (str): String to validate.
Returns:
bool: True if the string is base 64 encoded , else False.
"""
try:
if isinstance(string, str):
# If there's any unicode here, an exception will be thrown and the function will return false
string_bytes = bytes(string, 'ascii')
elif isinstance(string, bytes):
string_bytes = string
else:
raise ValueError("Argument must be string or bytes")
return base64.b64encode(base64.b64decode(string_bytes)) == string_bytes
except Exception:
return False
def decode_message(string: str) -> str:
"""
Decode string if it is encoded in base64.
Args:
string (str): String to decode.
Returns:
str : Decoded / origin string.
"""
if is_base_64(string):
try:
return base64.b64decode(string).decode("utf-8")
except Exception:
return string
return string
def encode_message(string: str) -> str:
"""
Encode string in base64.
Args:
string (str): String to decode.
Returns:
str: Encoded string.
"""
message_bytes = string.encode('utf-8')
return base64.b64encode(message_bytes).decode("utf-8")
def get_pagination_next_marker_element(limit: str, page: int, client_request: Callable, params: dict) -> str:
"""
Get next marker element for request pagination.
'marker' is a string value that identifies the portion of the list to be returned with the next list operation.
The operation returns a NextMarker element within the response body if the list returned was not complete.
This value may then be used as a query parameter in a subsequent call to request the next portion of the list items.
Args:
limit (str): Number of elements to retrieve.
page (str): Page number.
client_request (Callable): Client request function.
params (dict): Request params.
Returns:
str: Next marker.
"""
offset = int(limit) * (page - 1)
response = client_request(limit=str(offset), **params)
tree = ET.ElementTree(ET.fromstring(response))
root = tree.getroot()
return root.findtext('NextMarker') # type: ignore
def list_queues_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
List queues in Azure storage account.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '50'
prefix = args.get('prefix')
page = arg_to_number(args.get('page') or '1')
marker = ''
readable_message = f'Queues List:\n Current page size: {limit}\n Showing page {page} out others that may exist'
if page > 1: # type: ignore
marker = get_pagination_next_marker_element(limit=limit, page=page, # type: ignore
client_request=client.list_queues_request,
params={"prefix": prefix})
if not marker:
return CommandResults(
readable_output=readable_message,
outputs_prefix='AzureStorageQueue.Queue',
outputs=[],
raw_response=[]
)
response = client.list_queues_request(limit, prefix, marker)
xml_response = parse_xml_response(xml_string_response=response, tag_path="./Queues/Queue/Name", find_tag=True)
raw_response = [{"name": element.text} for element in xml_response]
readable_output = tableToMarkdown(
readable_message,
raw_response,
headers='name',
headerTransform=pascalToSpace
)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageQueue.Queue',
outputs_key_field='name',
outputs=raw_response,
raw_response=raw_response
)
return command_results
def create_queue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Create queue in storage account.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: readable output for XSOAR.
"""
queue_name = args["queue_name"]
queue_name_regex = "^[a-z0-9](?!.*--)[a-z0-9-]{1,61}[a-z0-9]$"
# Rules for naming queues can be found here:
# https://docs.microsoft.com/en-us/rest/api/storageservices/naming-queues-and-metadata
if not re.search(queue_name_regex, queue_name):
raise Exception('The specified queue name is invalid.')
response = client.create_queue_request(queue_name)
readable_output = f'Queue {queue_name} successfully created.' if response.status_code == 201 \
else f'Queue {queue_name} already exists.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def delete_queue_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete queue from storage account.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: readable output for XSOAR.
"""
queue_name = args["queue_name"]
client.delete_queue_request(queue_name)
readable_output = f'Queue {queue_name} successfully deleted.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def date_values_to_iso(data: dict, keys: list):
"""
Convert time data values to ISO 8601 time format.
input example: keys = ['InsertionTime','ExpirationTime'] , data = {
'InsertionTime': 'Wed, 13 Oct 2021 09:11:32 GMT',
'ExpirationTime': 'Wed, 20 Oct 2021 09:11:32 GMT',
}
the method will convert the data to:
{
'InsertionTime': '2021-10-13T09:11:32',
'ExpirationTime': '2021-10-20T09:11:32'
}
Args:
data (dict): Data.
keys (list): Keys list to convert.
"""
for key in keys:
if data.get(key):
time_value = datetime.strptime(data.get(key), DATE_FORMAT) # type: ignore
iso_time = FormatIso8601(time_value)
data[key] = iso_time
def create_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Add a new message to the back of the message queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
message_content = args["message_content"]
queue_name = args["queue_name"]
visibility_time_out = arg_to_number(args.get("visibility_time_out"))
expiration = arg_to_number(args.get("expiration"))
encode = argToBoolean(args.get("base64_encoding", False))
message_content = encode_message(message_content) if encode else message_content
top = ET.Element('QueueMessage')
child = ET.SubElement(top, 'MessageText')
child.text = message_content
xml_data = ET.tostring(top, encoding='unicode')
response = client.create_message_request(queue_name, xml_data, visibility_time_out, expiration)
raw_response = parse_xml_response(xml_string_response=response, tag_path="QueueMessage")
message_outputs = copy.deepcopy(raw_response)[0]
date_values_to_iso(message_outputs, ['ExpirationTime', 'InsertionTime', 'TimeNextVisible'])
outputs = {'name': queue_name, 'Message': message_outputs}
readable_output = tableToMarkdown(f'{queue_name} Queue message:',
message_outputs,
headers=['MessageId', 'ExpirationTime',
'InsertionTime', 'TimeNextVisible', 'PopReceipt'],
headerTransform=pascalToSpace)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageQueue.Queue',
outputs_key_field='name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def get_messages_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieves messages from the front of the queue.
Retrieved messages will move to the end of the queue,
and will be visible after the amount of time specified in the 'TimeNextVisible' param.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '1'
queue_name = args["queue_name"]
visibility_time_out = arg_to_number(args.get("visibility_time_out"))
if int(limit) < 1 or int(limit) > 32:
raise Exception('Invalid limit value. Minimum value is 1, maximum value is 32')
response = client.get_messages_request(queue_name, limit, visibility_time_out) # type: ignore
raw_response = parse_xml_response(xml_string_response=response, tag_path="QueueMessage")
message_outputs = copy.deepcopy(raw_response)
for message in message_outputs:
message['MessageText'] = decode_message(message['MessageText'])
date_values_to_iso(message, ['ExpirationTime', 'InsertionTime', 'TimeNextVisible'])
outputs = {'name': queue_name, 'Message': message_outputs}
readable_output = tableToMarkdown(f'{queue_name} Queue messages:',
message_outputs,
headers=['MessageText', 'MessageId', 'PopReceipt', 'DequeueCount',
'ExpirationTime', 'InsertionTime', 'TimeNextVisible'],
headerTransform=pascalToSpace)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageQueue.Queue',
outputs_key_field='name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def peek_messages_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Retrieves messages from the front of the queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: outputs, readable outputs and raw response for XSOAR.
"""
limit = args.get('limit') or '1'
queue_name = args["queue_name"]
if int(limit) < 1 or int(limit) > 32:
raise Exception('Invalid limit value. Minimum value is 1, maximum value is 32')
response = client.peek_messages_request(limit, queue_name)
raw_response = parse_xml_response(xml_string_response=response, tag_path="QueueMessage")
message_outputs = copy.deepcopy(raw_response)
for message in message_outputs:
message['MessageText'] = decode_message(message['MessageText'])
date_values_to_iso(message, ['ExpirationTime', 'InsertionTime'])
outputs = {'name': queue_name, 'Message': message_outputs}
readable_output = tableToMarkdown(f'{queue_name} Queue messages:',
message_outputs,
headers=['MessageText', 'MessageId', 'DequeueCount',
'ExpirationTime', 'InsertionTime'],
headerTransform=pascalToSpace)
command_results = CommandResults(
readable_output=readable_output,
outputs_prefix='AzureStorageQueue.Queue',
outputs_key_field='name',
outputs=outputs,
raw_response=raw_response
)
return command_results
def dequeue_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Dequeue message from the front of the queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: Readable output for XSOAR.
"""
queue_name = args["queue_name"]
response = client.get_messages_request(queue_name=queue_name)
message_response = parse_xml_response(xml_string_response=response, tag_path="QueueMessage")
if len(message_response) == 0:
return CommandResults(readable_output=f'There are no messages in {queue_name} queue.')
message_id = message_response[0]["MessageId"]
pop_receipt = message_response[0]["PopReceipt"]
client.delete_message_request(queue_name=queue_name, message_id=message_id, pop_receipt=pop_receipt)
readable_output = f'Message in {queue_name} successfully deleted.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def delete_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete message from the queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: Readable output for XSOAR.
"""
queue_name = args["queue_name"]
message_id = args["message_id"]
pop_receipt = args["pop_receipt"]
client.delete_message_request(queue_name=queue_name, message_id=message_id, pop_receipt=pop_receipt)
readable_output = f'Message in {queue_name} successfully deleted.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def update_message_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Update message in the the queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: readable output for XSOAR.
"""
message_content = args["message_content"]
queue_name = args["queue_name"]
message_id = args["message_id"]
pop_receipt = args["pop_receipt"]
encode = argToBoolean(args.get("base64_encoding", False))
visibility_time_out = args["visibility_time_out"]
message_content = encode_message(message_content) if encode else message_content
top = ET.Element('QueueMessage')
child = ET.SubElement(top, 'MessageText')
child.text = message_content
xml_data = ET.tostring(top, encoding='unicode')
client.update_message_request(queue_name, xml_data, message_id, pop_receipt, visibility_time_out)
readable_output = f'The message in {queue_name} successfully updated.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def clear_messages_command(client: Client, args: Dict[str, Any]) -> CommandResults:
"""
Delete all messages from the queue.
Args:
client (Client): Azure Queue Storage API client.
args (dict): Command arguments from XSOAR.
Returns:
CommandResults: Readable output for XSOAR.
"""
queue_name = args["queue_name"]
client.clear_messages_request(queue_name=queue_name)
readable_output = f'{queue_name} was cleared of messages successfully.'
command_results = CommandResults(
readable_output=readable_output
)
return command_results
def parse_incident(message: dict) -> dict:
"""
Parse message to XSOAR Incident.
Args:
message (dict): Message item.
Returns:
dict: XSOAR Incident
"""
time_headers = ['ExpirationTime', 'InsertionTime', 'TimeNextVisible']
message['MessageText'] = decode_message(message['MessageText'])
for header in time_headers:
time_value = datetime.strptime(message.get(header), DATE_FORMAT) # type: ignore
iso_time = FormatIso8601(time_value) + 'Z'
message[header] = iso_time
incident = {}
incident['name'] = "Azure Storage - Queue MessageId: " + message["MessageId"]
incident['rawJSON'] = json.dumps(message)
return incident
def fetch_incidents(client: Client, queue_name: str, max_fetch: str) -> None:
"""
Fetch messages from the Queue.
Args:
client (Client): Azure Queue Storage API client.
queue_name (str): Queue name.
max_fetch (str): Maximum incidents for one fetch.
"""
response = client.get_messages_request(queue_name=queue_name, limit=max_fetch)
raw_response = parse_xml_response(xml_string_response=response, tag_path="QueueMessage")
incidents = []
for message in raw_response:
message['queue_name'] = queue_name
incidents.append(parse_incident(message))
demisto.incidents(incidents)
for message in raw_response:
client.delete_message_request(queue_name=queue_name, message_id=message["MessageId"],
pop_receipt=message["PopReceipt"])
def test_module(client: Client, max_fetch: str) -> None:
"""
Tests API connectivity and authentication.
Args:
client (Client): Azure Queue Storage API client.
max_fetch (str): Maximum incidents for one fetch.
Returns:
str : 'ok' if test passed, anything else will fail the test.
"""
try:
client.list_queues_request()
max_fetch_int = int(max_fetch)
except Exception as exception:
if 'Error in API call' in str(exception):
return return_results('Authorization Error: make sure API Credentials are correctly set')
if 'Error Type' in str(exception):
return return_results(
'Verify that the storage account name is correct and that you have access to the server from your host.')
if type(exception).__name__ == 'ValueError':
return return_results('Invalid Maximum fetch value.')
raise exception
if max_fetch_int <= 0 or max_fetch_int > 32:
return return_results('Invalid Maximum fetch value. Minimum value is 1, maximum value is 32')
return_results('ok')
def main() -> None:
"""
Main function
"""
params: Dict[str, Any] = demisto.params()
args: Dict[str, Any] = demisto.args()
verify_certificate: bool = not params.get('insecure', False)
proxy = params.get('proxy', False)
global account_sas_token
global storage_account_name
account_sas_token = params['credentials']['password']
storage_account_name = params['credentials']['identifier']
api_version = "2020-10-02"
base_url = f'https://{storage_account_name}.queue.core.windows.net'
command = demisto.command()
demisto.debug(f'Command being called is {command}')
try:
requests.packages.urllib3.disable_warnings()
client: Client = Client(base_url, verify_certificate, proxy, account_sas_token, storage_account_name,
api_version)
commands = {
'azure-storage-queue-list': list_queues_command,
'azure-storage-queue-create': create_queue_command,
'azure-storage-queue-delete': delete_queue_command,
'azure-storage-queue-message-create': create_message_command,
'azure-storage-queue-message-get': get_messages_command,
'azure-storage-queue-message-peek': peek_messages_command,
'azure-storage-queue-message-dequeue': dequeue_message_command,
'azure-storage-queue-message-update': update_message_command,
'azure-storage-queue-message-delete': delete_message_command,
'azure-storage-queue-message-clear': clear_messages_command
}
if command == 'test-module':
test_module(client, params.get('max_fetch')) # type: ignore
if command == 'fetch-incidents':
fetch_incidents(client, params.get('queue_name'), params.get('max_fetch')) # type: ignore
elif command in commands:
return_results(commands[command](client, args))
else:
raise NotImplementedError(f'{command} command is not implemented.')
except Exception as e:
return_error(str(e))
from MicrosoftAzureStorageApiModule import * # noqa: E402
if __name__ in ['__main__', 'builtin', 'builtins']:
main()
| {
"content_hash": "d5f9dd69d216eedbe04e9a93a5b0694e",
"timestamp": "",
"source": "github",
"line_count": 875,
"max_line_length": 121,
"avg_line_length": 33.16,
"alnum_prop": 0.6195416164053076,
"repo_name": "VirusTotal/content",
"id": "d7ded6939c9025b5dfe08974699a6a3b310552b6",
"size": "29015",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "Packs/AzureStorageQueue/Integrations/AzureStorageQueue/AzureStorageQueue.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "2146"
},
{
"name": "HTML",
"bytes": "205901"
},
{
"name": "JavaScript",
"bytes": "1584075"
},
{
"name": "PowerShell",
"bytes": "442288"
},
{
"name": "Python",
"bytes": "47594464"
},
{
"name": "Rich Text Format",
"bytes": "480911"
},
{
"name": "Shell",
"bytes": "108066"
},
{
"name": "YARA",
"bytes": "1185"
}
],
"symlink_target": ""
} |
"""Ce fichier contient la classe Attitudes détaillée plus bas."""
from abstraits.obase import BaseObj
from .attitude import Attitude
class Attitudes(BaseObj):
"""Classe conteneur des attitudes sociales.
Cette classe liste tous les items Attitude utilisables dans l'univers
à un instant donné.
Voir : ./attitude.py
"""
enregistrer = True
def __init__(self):
"""Constructeur du conteneur"""
BaseObj.__init__(self)
self._attitudes = {}
self._construire()
def __getnewargs__(self):
return ()
def __bool__(self):
return bool(self._attitudes)
def __contains__(self, cle):
"""Renvoie True si l'attitude existe, False sinon"""
return cle in self._attitudes
def __len__(self):
return len(self._attitudes)
def __getitem__(self, cle):
"""Renvoie une attitude à partir de sa clé"""
return self._attitudes[cle]
def __setitem__(self, cle, valeur):
"""Ajoute une attitude à la liste"""
self._attitudes[cle] = valeur
def __delitem__(self, cle):
"""Détruit l'attitude spécifiée"""
del self._attitudes[cle]
def keys(self):
"""Renvoie une liste des attitudes par clés"""
return list(self._attitudes.keys())
def values(self):
"""Renvoie une liste des objets Attitude"""
return list(self._attitudes.values())
def ajouter_ou_modifier(self, cle):
"""Ajoute une attitude ou la renvoie si existante"""
if cle in self._attitudes:
return self._attitudes[cle]
else:
attitude = Attitude(cle, self)
self._attitudes[cle] = attitude
return attitude
| {
"content_hash": "9b80c835f985c710e97dfdf3ea66d9f0",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 73,
"avg_line_length": 27.38095238095238,
"alnum_prop": 0.5982608695652174,
"repo_name": "stormi/tsunami",
"id": "378cdbee5cf85241de02be87598a67f9b90b2deb",
"size": "3298",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/primaires/communication/attitudes.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "7188300"
},
{
"name": "Ruby",
"bytes": "373"
}
],
"symlink_target": ""
} |
"""SSD MobilenetV1 FPN Feature Extractor."""
import copy
import functools
import tensorflow as tf
from object_detection.meta_architectures import ssd_meta_arch
from object_detection.models import feature_map_generators
from object_detection.utils import context_manager
from object_detection.utils import ops
from object_detection.utils import shape_utils
from nets import mobilenet_v1
slim = tf.contrib.slim
# A modified config of mobilenet v1 that makes it more detection friendly,
def _create_modified_mobilenet_config():
conv_defs = copy.deepcopy(mobilenet_v1.MOBILENETV1_CONV_DEFS)
conv_defs[-2] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=2, depth=512)
conv_defs[-1] = mobilenet_v1.DepthSepConv(kernel=[3, 3], stride=1, depth=256)
return conv_defs
class SSDMobileNetV1FpnFeatureExtractor(ssd_meta_arch.SSDFeatureExtractor):
"""SSD Feature Extractor using MobilenetV1 FPN features."""
def __init__(self,
is_training,
depth_multiplier,
min_depth,
pad_to_multiple,
conv_hyperparams_fn,
fpn_min_level=3,
fpn_max_level=7,
additional_layer_depth=256,
reuse_weights=None,
use_explicit_padding=False,
use_depthwise=False,
override_base_feature_extractor_hyperparams=False):
"""SSD FPN feature extractor based on Mobilenet v1 architecture.
Args:
is_training: whether the network is in training mode.
depth_multiplier: float depth multiplier for feature extractor.
min_depth: minimum feature extractor depth.
pad_to_multiple: the nearest multiple to zero pad the input height and
width dimensions to.
conv_hyperparams_fn: A function to construct tf slim arg_scope for conv2d
and separable_conv2d ops in the layers that are added on top of the base
feature extractor.
fpn_min_level: the highest resolution feature map to use in FPN. The valid
values are {2, 3, 4, 5} which map to MobileNet v1 layers
{Conv2d_3_pointwise, Conv2d_5_pointwise, Conv2d_11_pointwise,
Conv2d_13_pointwise}, respectively.
fpn_max_level: the smallest resolution feature map to construct or use in
FPN. FPN constructions uses features maps starting from fpn_min_level
upto the fpn_max_level. In the case that there are not enough feature
maps in the backbone network, additional feature maps are created by
applying stride 2 convolutions until we get the desired number of fpn
levels.
additional_layer_depth: additional feature map layer channel depth.
reuse_weights: whether to reuse variables. Default is None.
use_explicit_padding: Whether to use explicit padding when extracting
features. Default is False.
use_depthwise: Whether to use depthwise convolutions. Default is False.
override_base_feature_extractor_hyperparams: Whether to override
hyperparameters of the base feature extractor with the one from
`conv_hyperparams_fn`.
"""
super(SSDMobileNetV1FpnFeatureExtractor, self).__init__(
is_training=is_training,
depth_multiplier=depth_multiplier,
min_depth=min_depth,
pad_to_multiple=pad_to_multiple,
conv_hyperparams_fn=conv_hyperparams_fn,
reuse_weights=reuse_weights,
use_explicit_padding=use_explicit_padding,
use_depthwise=use_depthwise,
override_base_feature_extractor_hyperparams=
override_base_feature_extractor_hyperparams)
self._fpn_min_level = fpn_min_level
self._fpn_max_level = fpn_max_level
self._additional_layer_depth = additional_layer_depth
self._conv_defs = None
if self._use_depthwise:
self._conv_defs = _create_modified_mobilenet_config()
def preprocess(self, resized_inputs):
"""SSD preprocessing.
Maps pixel values to the range [-1, 1].
Args:
resized_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
"""
return (2.0 / 255.0) * resized_inputs - 1.0
def extract_features(self, preprocessed_inputs):
"""Extract features from preprocessed inputs.
Args:
preprocessed_inputs: a [batch, height, width, channels] float tensor
representing a batch of images.
Returns:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
"""
preprocessed_inputs = shape_utils.check_min_image_dim(
33, preprocessed_inputs)
with tf.variable_scope('MobilenetV1',
reuse=self._reuse_weights) as scope:
with slim.arg_scope(
mobilenet_v1.mobilenet_v1_arg_scope(
is_training=None, regularize_depthwise=True)):
with (slim.arg_scope(self._conv_hyperparams_fn())
if self._override_base_feature_extractor_hyperparams
else context_manager.IdentityContextManager()):
_, image_features = mobilenet_v1.mobilenet_v1_base(
ops.pad_to_multiple(preprocessed_inputs, self._pad_to_multiple),
final_endpoint='Conv2d_13_pointwise',
min_depth=self._min_depth,
depth_multiplier=self._depth_multiplier,
conv_defs=self._conv_defs,
use_explicit_padding=self._use_explicit_padding,
scope=scope)
depth_fn = lambda d: max(int(d * self._depth_multiplier), self._min_depth)
with slim.arg_scope(self._conv_hyperparams_fn()):
with tf.variable_scope('fpn', reuse=self._reuse_weights):
feature_blocks = [
'Conv2d_3_pointwise', 'Conv2d_5_pointwise', 'Conv2d_11_pointwise',
'Conv2d_13_pointwise'
]
base_fpn_max_level = min(self._fpn_max_level, 5)
feature_block_list = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_block_list.append(feature_blocks[level - 2])
fpn_features = feature_map_generators.fpn_top_down_feature_maps(
[(key, image_features[key]) for key in feature_block_list],
depth=depth_fn(self._additional_layer_depth),
use_depthwise=self._use_depthwise,
use_explicit_padding=self._use_explicit_padding)
feature_maps = []
for level in range(self._fpn_min_level, base_fpn_max_level + 1):
feature_maps.append(fpn_features['top_down_{}'.format(
feature_blocks[level - 2])])
last_feature_map = fpn_features['top_down_{}'.format(
feature_blocks[base_fpn_max_level - 2])]
# Construct coarse features
padding = 'VALID' if self._use_explicit_padding else 'SAME'
kernel_size = 3
for i in range(base_fpn_max_level + 1, self._fpn_max_level + 1):
if self._use_depthwise:
conv_op = functools.partial(
slim.separable_conv2d, depth_multiplier=1)
else:
conv_op = slim.conv2d
if self._use_explicit_padding:
last_feature_map = ops.fixed_padding(
last_feature_map, kernel_size)
last_feature_map = conv_op(
last_feature_map,
num_outputs=depth_fn(self._additional_layer_depth),
kernel_size=[kernel_size, kernel_size],
stride=2,
padding=padding,
scope='bottom_up_Conv2d_{}'.format(i - base_fpn_max_level + 13))
feature_maps.append(last_feature_map)
return feature_maps
| {
"content_hash": "42c303d722c1b11007d951d5dd532a8d",
"timestamp": "",
"source": "github",
"line_count": 177,
"max_line_length": 80,
"avg_line_length": 43.96610169491525,
"alnum_prop": 0.643407864302236,
"repo_name": "derekjchow/models",
"id": "b0c149aead313ba97c4997da1f91c5759f6994bb",
"size": "8472",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "research/object_detection/models/ssd_mobilenet_v1_fpn_feature_extractor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "1523636"
},
{
"name": "Dockerfile",
"bytes": "9821"
},
{
"name": "GLSL",
"bytes": "976"
},
{
"name": "HTML",
"bytes": "147010"
},
{
"name": "JavaScript",
"bytes": "33316"
},
{
"name": "Jupyter Notebook",
"bytes": "2831692"
},
{
"name": "Makefile",
"bytes": "4933"
},
{
"name": "Python",
"bytes": "14201542"
},
{
"name": "Shell",
"bytes": "158255"
}
],
"symlink_target": ""
} |
"""
Tests for the zipline.assets package
"""
import sys
from unittest import TestCase
from datetime import datetime, timedelta
import pickle
import uuid
import warnings
import pandas as pd
from pandas.tseries.tools import normalize_date
from pandas.util.testing import assert_frame_equal
from nose_parameterized import parameterized
from numpy import full
from zipline.assets import Asset, Equity, Future, AssetFinder
from zipline.assets.futures import FutureChain
from zipline.errors import (
SymbolNotFound,
MultipleSymbolsFound,
SidAssignmentError,
RootSymbolNotFound,
)
from zipline.finance.trading import TradingEnvironment
from zipline.utils.test_utils import (
all_subindices,
make_rotating_asset_info,
)
def build_lookup_generic_cases():
"""
Generate test cases for AssetFinder test_lookup_generic.
"""
unique_start = pd.Timestamp('2013-01-01', tz='UTC')
unique_end = pd.Timestamp('2014-01-01', tz='UTC')
dupe_0_start = pd.Timestamp('2013-01-01', tz='UTC')
dupe_0_end = dupe_0_start + timedelta(days=1)
dupe_1_start = pd.Timestamp('2013-01-03', tz='UTC')
dupe_1_end = dupe_1_start + timedelta(days=1)
frame = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'duplicated_0',
'start_date': dupe_0_start.value,
'end_date': dupe_0_end.value,
'exchange': '',
},
{
'sid': 1,
'symbol': 'duplicated_1',
'start_date': dupe_1_start.value,
'end_date': dupe_1_end.value,
'exchange': '',
},
{
'sid': 2,
'symbol': 'unique',
'start_date': unique_start.value,
'end_date': unique_end.value,
'exchange': '',
},
],
index='sid')
env = TradingEnvironment()
env.write_data(equities_df=frame)
finder = env.asset_finder
dupe_0, dupe_1, unique = assets = [
finder.retrieve_asset(i)
for i in range(3)
]
dupe_0_start = dupe_0.start_date
dupe_1_start = dupe_1.start_date
cases = [
##
# Scalars
# Asset object
(finder, assets[0], None, assets[0]),
(finder, assets[1], None, assets[1]),
(finder, assets[2], None, assets[2]),
# int
(finder, 0, None, assets[0]),
(finder, 1, None, assets[1]),
(finder, 2, None, assets[2]),
# Duplicated symbol with resolution date
(finder, 'duplicated_0', dupe_0_start, dupe_0),
(finder, 'duplicated_1', dupe_1_start, dupe_1),
# Unique symbol, with or without resolution date.
(finder, 'unique', unique_start, unique),
(finder, 'unique', None, unique),
##
# Iterables
# Iterables of Asset objects.
(finder, assets, None, assets),
(finder, iter(assets), None, assets),
# Iterables of ints
(finder, (0, 1), None, assets[:-1]),
(finder, iter((0, 1)), None, assets[:-1]),
# Iterables of symbols.
(finder, ('duplicated_0', 'unique'), dupe_0_start, [dupe_0, unique]),
(finder, ('duplicated_1', 'unique'), dupe_1_start, [dupe_1, unique]),
# Mixed types
(finder,
('duplicated_0', 2, 'unique', 1, dupe_1),
dupe_0_start,
[dupe_0, assets[2], unique, assets[1], dupe_1]),
]
return cases
class AssetTestCase(TestCase):
def test_asset_object(self):
self.assertEquals({5061: 'foo'}[Asset(5061)], 'foo')
self.assertEquals(Asset(5061), 5061)
self.assertEquals(5061, Asset(5061))
self.assertEquals(Asset(5061), Asset(5061))
self.assertEquals(int(Asset(5061)), 5061)
self.assertEquals(str(Asset(5061)), 'Asset(5061)')
def test_asset_is_pickleable(self):
# Very wow
s = Asset(
1337,
symbol="DOGE",
asset_name="DOGECOIN",
start_date=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
end_date=pd.Timestamp('2014-06-25 11:21AM', tz='UTC'),
first_traded=pd.Timestamp('2013-12-08 9:31AM', tz='UTC'),
exchange='THE MOON',
)
s_unpickled = pickle.loads(pickle.dumps(s))
attrs_to_check = ['end_date',
'exchange',
'first_traded',
'end_date',
'asset_name',
'start_date',
'sid',
'start_date',
'symbol']
for attr in attrs_to_check:
self.assertEqual(getattr(s, attr), getattr(s_unpickled, attr))
def test_asset_comparisons(self):
s_23 = Asset(23)
s_24 = Asset(24)
self.assertEqual(s_23, s_23)
self.assertEqual(s_23, 23)
self.assertEqual(23, s_23)
self.assertNotEqual(s_23, s_24)
self.assertNotEqual(s_23, 24)
self.assertNotEqual(s_23, "23")
self.assertNotEqual(s_23, 23.5)
self.assertNotEqual(s_23, [])
self.assertNotEqual(s_23, None)
self.assertLess(s_23, s_24)
self.assertLess(s_23, 24)
self.assertGreater(24, s_23)
self.assertGreater(s_24, s_23)
def test_lt(self):
self.assertTrue(Asset(3) < Asset(4))
self.assertFalse(Asset(4) < Asset(4))
self.assertFalse(Asset(5) < Asset(4))
def test_le(self):
self.assertTrue(Asset(3) <= Asset(4))
self.assertTrue(Asset(4) <= Asset(4))
self.assertFalse(Asset(5) <= Asset(4))
def test_eq(self):
self.assertFalse(Asset(3) == Asset(4))
self.assertTrue(Asset(4) == Asset(4))
self.assertFalse(Asset(5) == Asset(4))
def test_ge(self):
self.assertFalse(Asset(3) >= Asset(4))
self.assertTrue(Asset(4) >= Asset(4))
self.assertTrue(Asset(5) >= Asset(4))
def test_gt(self):
self.assertFalse(Asset(3) > Asset(4))
self.assertFalse(Asset(4) > Asset(4))
self.assertTrue(Asset(5) > Asset(4))
def test_type_mismatch(self):
if sys.version_info.major < 3:
self.assertIsNotNone(Asset(3) < 'a')
self.assertIsNotNone('a' < Asset(3))
else:
with self.assertRaises(TypeError):
Asset(3) < 'a'
with self.assertRaises(TypeError):
'a' < Asset(3)
class TestFuture(TestCase):
future = Future(
2468,
symbol='OMH15',
root_symbol='OM',
notice_date=pd.Timestamp('2014-01-20', tz='UTC'),
expiration_date=pd.Timestamp('2014-02-20', tz='UTC'),
auto_close_date=pd.Timestamp('2014-01-18', tz='UTC'),
contract_multiplier=500
)
def test_str(self):
strd = self.future.__str__()
self.assertEqual("Future(2468 [OMH15])", strd)
def test_repr(self):
reprd = self.future.__repr__()
self.assertTrue("Future" in reprd)
self.assertTrue("2468" in reprd)
self.assertTrue("OMH15" in reprd)
self.assertTrue("root_symbol='OM'" in reprd)
self.assertTrue(("notice_date=Timestamp('2014-01-20 00:00:00+0000', "
"tz='UTC')") in reprd)
self.assertTrue("expiration_date=Timestamp('2014-02-20 00:00:00+0000'"
in reprd)
self.assertTrue("auto_close_date=Timestamp('2014-01-18 00:00:00+0000'"
in reprd)
self.assertTrue("contract_multiplier=500" in reprd)
def test_reduce(self):
reduced = self.future.__reduce__()
self.assertEqual(Future, reduced[0])
def test_to_and_from_dict(self):
dictd = self.future.to_dict()
self.assertTrue('root_symbol' in dictd)
self.assertTrue('notice_date' in dictd)
self.assertTrue('expiration_date' in dictd)
self.assertTrue('auto_close_date' in dictd)
self.assertTrue('contract_multiplier' in dictd)
from_dict = Future.from_dict(dictd)
self.assertTrue(isinstance(from_dict, Future))
self.assertEqual(self.future, from_dict)
def test_root_symbol(self):
self.assertEqual('OM', self.future.root_symbol)
class AssetFinderTestCase(TestCase):
def setUp(self):
self.env = TradingEnvironment()
def test_lookup_symbol_fuzzy(self):
as_of = pd.Timestamp('2013-01-01', tz='UTC')
frame = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'TEST@%d' % i,
'company_name': "company%d" % i,
'start_date': as_of.value,
'end_date': as_of.value,
'exchange': uuid.uuid4().hex,
'fuzzy': 'TEST%d' % i
}
for i in range(3)
]
)
self.env.write_data(equities_df=frame)
finder = AssetFinder(self.env.engine, fuzzy_char='@')
asset_0, asset_1, asset_2 = (
finder.retrieve_asset(i) for i in range(3)
)
for i in range(2): # we do it twice to test for caching bugs
self.assertIsNone(finder.lookup_symbol('test', as_of))
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of)
)
# Adding an unnecessary fuzzy shouldn't matter.
self.assertEqual(
asset_1,
finder.lookup_symbol('test@1', as_of, fuzzy=True)
)
# Shouldn't find this with no fuzzy_str passed.
self.assertIsNone(finder.lookup_symbol('test1', as_of))
# Should find exact match.
self.assertEqual(
asset_1,
finder.lookup_symbol('test1', as_of, fuzzy=True),
)
def test_lookup_symbol_resolve_multiple(self):
# Incrementing by two so that start and end dates for each
# generated Asset don't overlap (each Asset's end_date is the
# day after its start date.)
dates = pd.date_range('2013-01-01', freq='2D', periods=5, tz='UTC')
df = pd.DataFrame.from_records(
[
{
'sid': i,
'symbol': 'existing',
'start_date': date.value,
'end_date': (date + timedelta(days=1)).value,
'exchange': 'NYSE',
}
for i, date in enumerate(dates)
]
)
self.env.write_data(equities_df=df)
finder = AssetFinder(self.env.engine)
for _ in range(2): # Run checks twice to test for caching bugs.
with self.assertRaises(SymbolNotFound):
finder.lookup_symbol_resolve_multiple('non_existing', dates[0])
with self.assertRaises(MultipleSymbolsFound):
finder.lookup_symbol_resolve_multiple('existing', None)
for i, date in enumerate(dates):
# Verify that we correctly resolve multiple symbols using
# the supplied date
result = finder.lookup_symbol_resolve_multiple(
'existing',
date,
)
self.assertEqual(result.symbol, 'existing')
self.assertEqual(result.sid, i)
@parameterized.expand(
build_lookup_generic_cases()
)
def test_lookup_generic(self, finder, symbols, reference_date, expected):
"""
Ensure that lookup_generic works with various permutations of inputs.
"""
results, missing = finder.lookup_generic(symbols, reference_date)
self.assertEqual(results, expected)
self.assertEqual(missing, [])
def test_lookup_generic_handle_missing(self):
data = pd.DataFrame.from_records(
[
{
'sid': 0,
'symbol': 'real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
{
'sid': 1,
'symbol': 'also_real',
'start_date': pd.Timestamp('2013-1-1', tz='UTC'),
'end_date': pd.Timestamp('2014-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose end date is before our query date. We should
# still correctly find it.
{
'sid': 2,
'symbol': 'real_but_old',
'start_date': pd.Timestamp('2002-1-1', tz='UTC'),
'end_date': pd.Timestamp('2003-1-1', tz='UTC'),
'exchange': '',
},
# Sid whose start_date is **after** our query date. We should
# **not** find it.
{
'sid': 3,
'symbol': 'real_but_in_the_future',
'start_date': pd.Timestamp('2014-1-1', tz='UTC'),
'end_date': pd.Timestamp('2020-1-1', tz='UTC'),
'exchange': 'THE FUTURE',
},
]
)
self.env.write_data(equities_df=data)
finder = AssetFinder(self.env.engine)
results, missing = finder.lookup_generic(
['real', 1, 'fake', 'real_but_old', 'real_but_in_the_future'],
pd.Timestamp('2013-02-01', tz='UTC'),
)
self.assertEqual(len(results), 3)
self.assertEqual(results[0].symbol, 'real')
self.assertEqual(results[0].sid, 0)
self.assertEqual(results[1].symbol, 'also_real')
self.assertEqual(results[1].sid, 1)
self.assertEqual(results[2].symbol, 'real_but_old')
self.assertEqual(results[2].sid, 2)
self.assertEqual(len(missing), 2)
self.assertEqual(missing[0], 'fake')
self.assertEqual(missing[1], 'real_but_in_the_future')
def test_insert_metadata(self):
data = {0: {'asset_type': 'equity',
'start_date': '2014-01-01',
'end_date': '2015-01-01',
'symbol': "PLAY",
'foo_data': "FOO"}}
self.env.write_data(equities_data=data)
finder = AssetFinder(self.env.engine)
# Test proper insertion
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
self.assertEqual(pd.Timestamp('2015-01-01', tz='UTC'),
equity.end_date)
# Test invalid field
with self.assertRaises(AttributeError):
equity.foo_data
def test_consume_metadata(self):
# Test dict consumption
dict_to_consume = {0: {'symbol': 'PLAY'},
1: {'symbol': 'MSFT'}}
self.env.write_data(equities_data=dict_to_consume)
finder = AssetFinder(self.env.engine)
equity = finder.retrieve_asset(0)
self.assertIsInstance(equity, Equity)
self.assertEqual('PLAY', equity.symbol)
# Test dataframe consumption
df = pd.DataFrame(columns=['asset_name', 'exchange'], index=[0, 1])
df['asset_name'][0] = "Dave'N'Busters"
df['exchange'][0] = "NASDAQ"
df['asset_name'][1] = "Microsoft"
df['exchange'][1] = "NYSE"
self.env = TradingEnvironment()
self.env.write_data(equities_df=df)
finder = AssetFinder(self.env.engine)
self.assertEqual('NASDAQ', finder.retrieve_asset(0).exchange)
self.assertEqual('Microsoft', finder.retrieve_asset(1).asset_name)
def test_consume_asset_as_identifier(self):
# Build some end dates
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
fut_end = pd.Timestamp('2008-01-01', tz='UTC')
# Build some simple Assets
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
future_asset = Future(200, symbol="TESTFUT", end_date=fut_end)
# Consume the Assets
self.env.write_data(equities_identifiers=[equity_asset],
futures_identifiers=[future_asset])
finder = AssetFinder(self.env.engine)
# Test equality with newly built Assets
self.assertEqual(equity_asset, finder.retrieve_asset(1))
self.assertEqual(future_asset, finder.retrieve_asset(200))
self.assertEqual(eq_end, finder.retrieve_asset(1).end_date)
self.assertEqual(fut_end, finder.retrieve_asset(200).end_date)
def test_sid_assignment(self):
# This metadata does not contain SIDs
metadata = ['PLAY', 'MSFT']
today = normalize_date(pd.Timestamp('2015-07-09', tz='UTC'))
# Write data with sid assignment
self.env.write_data(equities_identifiers=metadata,
allow_sid_assignment=True)
# Verify that Assets were built and different sids were assigned
finder = AssetFinder(self.env.engine)
play = finder.lookup_symbol('PLAY', today)
msft = finder.lookup_symbol('MSFT', today)
self.assertEqual('PLAY', play.symbol)
self.assertIsNotNone(play.sid)
self.assertNotEqual(play.sid, msft.sid)
def test_sid_assignment_failure(self):
# This metadata does not contain SIDs
metadata = ['PLAY', 'MSFT']
# Write data without sid assignment, asserting failure
with self.assertRaises(SidAssignmentError):
self.env.write_data(equities_identifiers=metadata,
allow_sid_assignment=False)
def test_security_dates_warning(self):
# Build an asset with an end_date
eq_end = pd.Timestamp('2012-01-01', tz='UTC')
equity_asset = Equity(1, symbol="TESTEQ", end_date=eq_end)
# Catch all warnings
with warnings.catch_warnings(record=True) as w:
# Cause all warnings to always be triggered
warnings.simplefilter("always")
equity_asset.security_start_date
equity_asset.security_end_date
equity_asset.security_name
# Verify the warning
self.assertEqual(3, len(w))
for warning in w:
self.assertTrue(issubclass(warning.category,
DeprecationWarning))
def test_lookup_future_chain(self):
metadata = {
# Notice day is today, so not valid
2: {
'symbol': 'ADN15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-05-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
1: {
'symbol': 'ADV15',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-08-14', tz='UTC'),
'start_date': pd.Timestamp('2015-01-01', tz='UTC')
},
# Starts trading today, so should be valid.
0: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-05-14', tz='UTC')
},
# Copy of the above future, but starts trading in August,
# so it isn't valid.
3: {
'symbol': 'ADF16',
'root_symbol': 'AD',
'asset_type': 'future',
'notice_date': pd.Timestamp('2015-11-16', tz='UTC'),
'start_date': pd.Timestamp('2015-08-01', tz='UTC')
},
}
self.env.write_data(futures_data=metadata)
finder = AssetFinder(self.env.engine)
dt = pd.Timestamp('2015-05-14', tz='UTC')
last_year = pd.Timestamp('2014-01-01', tz='UTC')
first_day = pd.Timestamp('2015-01-01', tz='UTC')
# Check that we get the expected number of contracts, in the
# right order
ad_contracts = finder.lookup_future_chain('AD', dt, dt)
self.assertEqual(len(ad_contracts), 2)
self.assertEqual(ad_contracts[0].sid, 1)
self.assertEqual(ad_contracts[1].sid, 0)
# Check that pd.NaT for knowledge_date uses the value of as_of_date
ad_contracts = finder.lookup_future_chain('AD', dt, pd.NaT)
self.assertEqual(len(ad_contracts), 2)
# Check that we get nothing if our knowledge date is last year
ad_contracts = finder.lookup_future_chain('AD', dt, last_year)
self.assertEqual(len(ad_contracts), 0)
# Check that we get things that start on the knowledge date
ad_contracts = finder.lookup_future_chain('AD', dt, first_day)
self.assertEqual(len(ad_contracts), 1)
# Check that pd.NaT for as_of_date gives the whole chain
ad_contracts = finder.lookup_future_chain('AD', pd.NaT, first_day)
self.assertEqual(len(ad_contracts), 4)
def test_map_identifier_index_to_sids(self):
# Build an empty finder and some Assets
dt = pd.Timestamp('2014-01-01', tz='UTC')
finder = AssetFinder(self.env.engine)
asset1 = Equity(1, symbol="AAPL")
asset2 = Equity(2, symbol="GOOG")
asset200 = Future(200, symbol="CLK15")
asset201 = Future(201, symbol="CLM15")
# Check for correct mapping and types
pre_map = [asset1, asset2, asset200, asset201]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([1, 2, 200, 201], post_map)
for sid in post_map:
self.assertIsInstance(sid, int)
# Change order and check mapping again
pre_map = [asset201, asset2, asset200, asset1]
post_map = finder.map_identifier_index_to_sids(pre_map, dt)
self.assertListEqual([201, 2, 200, 1], post_map)
def test_compute_lifetimes(self):
num_assets = 4
env = TradingEnvironment()
trading_day = env.trading_day
first_start = pd.Timestamp('2015-04-01', tz='UTC')
frame = make_rotating_asset_info(
num_assets=num_assets,
first_start=first_start,
frequency=env.trading_day,
periods_between_starts=3,
asset_lifetime=5
)
env.write_data(equities_df=frame)
finder = env.asset_finder
all_dates = pd.date_range(
start=first_start,
end=frame.end_date.max(),
freq=trading_day,
)
for dates in all_subindices(all_dates):
expected_with_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
expected_no_start_raw = full(
shape=(len(dates), num_assets),
fill_value=False,
dtype=bool,
)
for i, date in enumerate(dates):
it = frame[['start_date', 'end_date']].itertuples()
for j, start, end in it:
# This way of doing the checks is redundant, but very
# clear.
if start <= date <= end:
expected_with_start_raw[i, j] = True
if start < date:
expected_no_start_raw[i, j] = True
expected_with_start = pd.DataFrame(
data=expected_with_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=True)
assert_frame_equal(result, expected_with_start)
expected_no_start = pd.DataFrame(
data=expected_no_start_raw,
index=dates,
columns=frame.index.values,
)
result = finder.lifetimes(dates, include_start_date=False)
assert_frame_equal(result, expected_no_start)
def test_sids(self):
# Ensure that the sids property of the AssetFinder is functioning
env = TradingEnvironment()
env.write_data(equities_identifiers=[1, 2, 3])
sids = env.asset_finder.sids
self.assertEqual(3, len(sids))
self.assertTrue(1 in sids)
self.assertTrue(2 in sids)
self.assertTrue(3 in sids)
class TestFutureChain(TestCase):
@classmethod
def setUpClass(cls):
metadata = {
0: {
'symbol': 'CLG06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2005-12-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-01-20', tz='UTC')},
1: {
'root_symbol': 'CL',
'symbol': 'CLK06',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-03-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-04-20', tz='UTC')},
2: {
'symbol': 'CLQ06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2005-12-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-06-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-07-20', tz='UTC')},
3: {
'symbol': 'CLX06',
'root_symbol': 'CL',
'asset_type': 'future',
'start_date': pd.Timestamp('2006-02-01', tz='UTC'),
'notice_date': pd.Timestamp('2006-09-20', tz='UTC'),
'expiration_date': pd.Timestamp('2006-10-20', tz='UTC')}
}
env = TradingEnvironment()
env.write_data(futures_data=metadata)
cls.asset_finder = env.asset_finder
@classmethod
def tearDownClass(cls):
del cls.asset_finder
def test_len(self):
""" Test the __len__ method of FutureChain.
"""
# None of the contracts have started yet.
cl = FutureChain(self.asset_finder, lambda: '2005-11-30', 'CL')
self.assertEqual(len(cl), 0)
# Sids 0, 1, & 2 have started, 3 has not yet started.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is still valid the day before its notice date.
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(len(cl), 3)
# Sid 0 is now invalid, leaving only Sids 1 & 2 valid.
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(len(cl), 2)
# Sid 3 has started, so 1, 2, & 3 are now valid.
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(len(cl), 3)
# All contracts are no longer valid.
cl = FutureChain(self.asset_finder, lambda: '2006-09-20', 'CL')
self.assertEqual(len(cl), 0)
def test_getitem(self):
""" Test the __getitem__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl[0], 0)
self.assertEqual(cl[1], 1)
self.assertEqual(cl[2], 2)
with self.assertRaises(IndexError):
cl[3]
cl = FutureChain(self.asset_finder, lambda: '2005-12-19', 'CL')
self.assertEqual(cl[0], 0)
cl = FutureChain(self.asset_finder, lambda: '2005-12-20', 'CL')
self.assertEqual(cl[0], 1)
cl = FutureChain(self.asset_finder, lambda: '2006-02-01', 'CL')
self.assertEqual(cl[-1], 3)
def test_root_symbols(self):
""" Test that different variations on root symbols are handled
as expected.
"""
# Make sure this successfully gets the chain for CL.
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
self.assertEqual(cl.root_symbol, 'CL')
# These root symbols don't exist, so RootSymbolNotFound should
# be raised immediately.
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', 'CLZ')
with self.assertRaises(RootSymbolNotFound):
FutureChain(self.asset_finder, lambda: '2005-12-01', '')
def test_repr(self):
""" Test the __repr__ method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
cl_feb = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL',
as_of_date=pd.Timestamp('2006-02-01', tz='UTC'))
# The default chain should not include the as of date.
self.assertEqual(repr(cl), "FutureChain(root_symbol='CL')")
# An explicit as of date should show up in the repr.
self.assertEqual(
repr(cl_feb),
("FutureChain(root_symbol='CL', "
"as_of_date='2006-02-01 00:00:00+00:00')")
)
def test_as_of(self):
""" Test the as_of method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that the as_of_date is set correctly to the future
feb = pd.Timestamp('2006-02-01', tz='UTC')
cl_feb = cl.as_of(feb)
self.assertEqual(
cl_feb.as_of_date,
pd.Timestamp(feb, tz='UTC')
)
# Test that the as_of_date is set correctly to the past, with
# args of str, datetime.datetime, and pd.Timestamp.
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp(datetime(year=2005, month=2, day=1), tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
feb_prev = pd.Timestamp('2005-02-01', tz='UTC')
cl_feb_prev = cl.as_of(feb_prev)
self.assertEqual(
cl_feb_prev.as_of_date,
pd.Timestamp(feb_prev, tz='UTC')
)
# The chain as of the current dt should always be the same as
# the defualt chain.
self.assertEqual(cl[0], cl.as_of(pd.Timestamp('2005-12-01'))[0])
def test_offset(self):
""" Test the offset method of FutureChain.
"""
cl = FutureChain(self.asset_finder, lambda: '2005-12-01', 'CL')
# Test that an offset forward sets as_of_date as expected
self.assertEqual(
cl.offset('3 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=3)
)
# Test that an offset backward sets as_of_date as expected, with
# time delta given as str, datetime.timedelta, and pd.Timedelta.
self.assertEqual(
cl.offset('-1000 days').as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(timedelta(days=-1000)).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
self.assertEqual(
cl.offset(pd.Timedelta('-1000 days')).as_of_date,
cl.as_of_date + pd.Timedelta(days=-1000)
)
# An offset of zero should give the original chain.
self.assertEqual(cl[0], cl.offset(0)[0])
self.assertEqual(cl[0], cl.offset("0 days")[0])
# A string that doesn't represent a time delta should raise a
# ValueError.
with self.assertRaises(ValueError):
cl.offset("blah")
| {
"content_hash": "067091452f3e1bc65dcb4843f7ce65f9",
"timestamp": "",
"source": "github",
"line_count": 884,
"max_line_length": 79,
"avg_line_length": 36.328054298642535,
"alnum_prop": 0.5402628137261007,
"repo_name": "chrjxj/zipline",
"id": "ebe11e373eda41e934b973f4455d37ac5b1807e4",
"size": "32697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_assets.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "564"
},
{
"name": "Emacs Lisp",
"bytes": "138"
},
{
"name": "Python",
"bytes": "1394403"
},
{
"name": "Shell",
"bytes": "4065"
}
],
"symlink_target": ""
} |
from autobahn.twisted.component import Component, run
from autobahn.twisted.util import sleep
from twisted.internet.defer import inlineCallbacks
import os
url = os.environ.get('CBURL', 'ws://localhost:8080/ws')
realmvalue = os.environ.get('CBREALM', 'realm1')
topic = os.environ.get('CBTOPIC', 'com.myapp.hello')
component = Component(transports=url, realm=realmvalue)
@component.on_join
@inlineCallbacks
def joined(session, details):
print("session ready")
def oncounter(count):
print("event received: {0}", count)
try:
yield session.subscribe(oncounter, topic)
print("subscribed to topic")
except Exception as e:
print("could not subscribe to topic: {0}".format(e))
if __name__ == "__main__":
run([component])
| {
"content_hash": "4ed61d041bbedd9461d1f754153df307",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 60,
"avg_line_length": 25.258064516129032,
"alnum_prop": 0.6845466155810983,
"repo_name": "crossbario/crossbar-examples",
"id": "db5da3967fe91e88d43dd24608766df12e44812f",
"size": "2077",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "getting-started/1.hello-world/client_component_subscribe.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5120"
},
{
"name": "C#",
"bytes": "22931"
},
{
"name": "C++",
"bytes": "77209"
},
{
"name": "CSS",
"bytes": "216506"
},
{
"name": "Dockerfile",
"bytes": "1423"
},
{
"name": "Erlang",
"bytes": "16493"
},
{
"name": "HTML",
"bytes": "4701160"
},
{
"name": "Hack",
"bytes": "4082"
},
{
"name": "Java",
"bytes": "20795"
},
{
"name": "JavaScript",
"bytes": "2989112"
},
{
"name": "Jupyter Notebook",
"bytes": "335655"
},
{
"name": "Lua",
"bytes": "1233"
},
{
"name": "Makefile",
"bytes": "68685"
},
{
"name": "PHP",
"bytes": "45600"
},
{
"name": "PLSQL",
"bytes": "157154"
},
{
"name": "PLpgSQL",
"bytes": "5053"
},
{
"name": "Python",
"bytes": "856797"
},
{
"name": "SCSS",
"bytes": "58669"
},
{
"name": "Shell",
"bytes": "46285"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tools', '0007_baseuser'),
]
operations = [
migrations.CreateModel(
name='Comment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('comment', models.CharField(max_length=50)),
],
),
]
| {
"content_hash": "34df0b432a1a509c71ed27a72284d534",
"timestamp": "",
"source": "github",
"line_count": 20,
"max_line_length": 114,
"avg_line_length": 24.85,
"alnum_prop": 0.5653923541247485,
"repo_name": "svaswani/STEALTH",
"id": "97343e354a37a5248ec42a361259e9aa72a2aaca",
"size": "569",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "django/tools/migrations/0008_comment.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "4348"
},
{
"name": "HTML",
"bytes": "43918"
},
{
"name": "Python",
"bytes": "18919"
}
],
"symlink_target": ""
} |
from mock import Mock
from mock import patch
import json
import yaml
from nailgun.api.v1.validators.cluster import AttributesValidator
from nailgun.errors import errors
from nailgun.test.base import BaseTestCase
class TestAttributesValidator(BaseTestCase):
def test_generated_attributes_validation(self):
self.assertRaises(errors.InvalidData,
AttributesValidator.validate,
'{"generated": {"name": "test"}}')
def test_editable_attributes_validation(self):
self.assertRaises(errors.InvalidData,
AttributesValidator.validate,
'{"editable": "name"}')
def test_missing_type(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
value: 'x'
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_missing_value(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: checkbox
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_invalid_regexp(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: text
value: '212a'
regex:
error: Invalid
source: ^\d+$
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_checkbox_value(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: checkbox
value: true
weight: 80
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: checkbox
value: 'x'
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_custom_repo_configuration_value(self):
attrs = '''
editable:
storage:
repos:
description: desc
type: custom_repo_configuration
value:
- name: ubuntu
priority: null
section: main universe multiverse
suite: trusty
type: deb
uri: http://archive.ubuntu.com/ubuntu/
- name: ubuntu-updates
priority: null
section: main universe multiverse
suite: trusty-updates
type: deb
uri: http://archive.ubuntu.com/ubuntu/
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_password_value(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: password
value: '2'
weight: 80
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: password
value: 2
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_radio_value(self):
attrs = '''
editable:
storage:
syslog_transport:
label: Syslog transport protocol
type: radio
value: tcp
values:
- data: udp
description: ''
label: UDP
- data: tcp
description: ''
label: TCP
- data: missing-description
label: Missing Description
weight: 3
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_select_value(self):
attrs = '''
editable:
common:
libvirt_type:
label: Hypervisor type
type: select
value: qemu
values:
- data: kvm
label: KVM
description: KVM description
- data: qemu
label: QEMU
description: QEMU description
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_text_value(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: text
value: '2'
weight: 80
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: text
value: 2
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
def test_textarea_value(self):
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: textarea
value: '2'
weight: 80
'''
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
attrs = '''
editable:
storage:
osd_pool_size:
description: desc
label: OSD Pool Size
type: textarea
value: 2
weight: 80
'''
self.assertRaises(errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attrs))
@patch('nailgun.objects.Cluster.get_updated_editable_attributes')
def test_invalid_provisioning_method(self, mock_cluster_attrs):
attrs = {'editable': {'provision': {'method':
{'value': 'not_image', 'type': 'text'}}}}
mock_cluster_attrs.return_value = attrs
cluster_mock = Mock(release=Mock(environment_version='7.0'))
self.assertRaises(errors.InvalidData,
AttributesValidator.validate,
json.dumps(attrs), cluster_mock)
@patch('nailgun.objects.Cluster.get_updated_editable_attributes')
def test_provision_method_missing(self, mock_cluster_attrs):
attrs = {'editable': {'method':
{'value': 'not_image', 'type': 'text'}}}
mock_cluster_attrs.return_value = attrs
cluster_mock = Mock(release=Mock(environment_version='7.0'))
self.assertRaises(errors.InvalidData,
AttributesValidator.validate,
json.dumps(attrs), cluster_mock)
@patch('nailgun.objects.Cluster.get_updated_editable_attributes')
def test_provision_method_passed(self, mock_cluster_attrs):
attrs = {'editable': {'provision': {'method':
{'value': 'image', 'type': 'text'}}}}
mock_cluster_attrs.return_value = attrs
cluster_mock = Mock(
is_locked=False, release=Mock(environment_version='7.0')
)
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate,
json.dumps(attrs), cluster_mock)
@patch('nailgun.objects.Cluster.get_updated_editable_attributes')
def test_provision_method_passed_old(self, mock_cluster_attrs):
attrs = {'editable': {'provision': {'method':
{'value': 'image', 'type': 'text'}}}}
mock_cluster_attrs.return_value = attrs
cluster_mock = Mock(
is_locked=False, release=Mock(environment_version='6.0')
)
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate,
json.dumps(attrs), cluster_mock)
def test_valid_attributes(self):
valid_attibutes = [
'{"editable": {"name": "test"}}',
'{"name": "test"}',
]
for attributes in valid_attibutes:
self.assertNotRaises(errors.InvalidData,
AttributesValidator.validate,
attributes)
self.assertNotRaises(
errors.InvalidData,
AttributesValidator.validate_editable_attributes,
yaml.load(attributes))
| {
"content_hash": "abfbd39db48a974e08a89f4b1939aa02",
"timestamp": "",
"source": "github",
"line_count": 320,
"max_line_length": 78,
"avg_line_length": 32.68125,
"alnum_prop": 0.4905335628227194,
"repo_name": "huntxu/fuel-web",
"id": "049b25664db145180e011be8ba32ef62e2728627",
"size": "11091",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "nailgun/nailgun/test/unit/test_attributes_validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "97543"
},
{
"name": "HTML",
"bytes": "2844"
},
{
"name": "JavaScript",
"bytes": "815534"
},
{
"name": "Mako",
"bytes": "1943"
},
{
"name": "Python",
"bytes": "3710735"
},
{
"name": "Ruby",
"bytes": "13649"
},
{
"name": "Shell",
"bytes": "22527"
}
],
"symlink_target": ""
} |
"""Define API Queries."""
import six
from gcloud.bigquery._helpers import _TypedProperty
from gcloud.bigquery._helpers import _rows_from_json
from gcloud.bigquery.dataset import Dataset
from gcloud.bigquery.job import QueryJob
from gcloud.bigquery.table import _parse_schema_resource
class _SyncQueryConfiguration(object):
"""User-settable configuration options for synchronous query jobs.
Values which are ``None`` -> server defaults.
"""
_default_dataset = None
_dry_run = None
_max_results = None
_timeout_ms = None
_preserve_nulls = None
_use_query_cache = None
class QueryResults(object):
"""Synchronous job: query tables.
:type query: string
:param query: SQL query string
:type client: :class:`gcloud.bigquery.client.Client`
:param client: A client which holds credentials and project configuration
for the dataset (which requires a project).
"""
def __init__(self, query, client):
self._client = client
self._properties = {}
self.query = query
self._configuration = _SyncQueryConfiguration()
self._job = None
@property
def project(self):
"""Project bound to the job.
:rtype: string
:returns: the project (derived from the client).
"""
return self._client.project
def _require_client(self, client):
"""Check client or verify over-ride.
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: :class:`gcloud.bigquery.client.Client`
:returns: The client passed in or the currently bound client.
"""
if client is None:
client = self._client
return client
@property
def cache_hit(self):
"""Query results served from cache.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#cacheHit
:rtype: boolean or ``NoneType``
:returns: True if the query results were served from cache (None
until set by the server).
"""
return self._properties.get('cacheHit')
@property
def complete(self):
"""Server completed query.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobComplete
:rtype: boolean or ``NoneType``
:returns: True if the query completed on the server (None
until set by the server).
"""
return self._properties.get('jobComplete')
@property
def errors(self):
"""Errors generated by the query.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#errors
:rtype: list of mapping, or ``NoneType``
:returns: Mappings describing errors generated on the server (None
until set by the server).
"""
return self._properties.get('errors')
@property
def name(self):
"""Job name, generated by the back-end.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#jobReference
:rtype: list of mapping, or ``NoneType``
:returns: Mappings describing errors generated on the server (None
until set by the server).
"""
return self._properties.get('jobReference', {}).get('jobId')
@property
def job(self):
"""Job instance used to run the query.
:rtype: :class:`gcloud.bigquery.job.QueryJob`, or ``NoneType``
:returns: Job instance used to run the query (None until
``jobReference`` property is set by the server).
"""
if self._job is None:
job_ref = self._properties.get('jobReference')
if job_ref is not None:
self._job = QueryJob(job_ref['jobId'], self.query,
self._client)
return self._job
@property
def page_token(self):
"""Token for fetching next bach of results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#pageToken
:rtype: string, or ``NoneType``
:returns: Token generated on the server (None until set by the server).
"""
return self._properties.get('pageToken')
@property
def total_rows(self):
"""Total number of rows returned by the query
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalRows
:rtype: integer, or ``NoneType``
:returns: Count generated on the server (None until set by the server).
"""
return self._properties.get('totalRows')
@property
def total_bytes_processed(self):
"""Total number of bytes processed by the query
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#totalBytesProcessed
:rtype: integer, or ``NoneType``
:returns: Count generated on the server (None until set by the server).
"""
return self._properties.get('totalBytesProcessed')
@property
def rows(self):
"""Query results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#rows
:rtype: list of tuples of row values, or ``NoneType``
:returns: fields describing the schema (None until set by the server).
"""
return _rows_from_json(self._properties.get('rows', ()), self.schema)
@property
def schema(self):
"""Schema for query results.
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#schema
:rtype: list of :class:`SchemaField`, or ``NoneType``
:returns: fields describing the schema (None until set by the server).
"""
return _parse_schema_resource(self._properties.get('schema', {}))
default_dataset = _TypedProperty('default_dataset', Dataset)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#defaultDataset
"""
dry_run = _TypedProperty('dry_run', bool)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#dryRun
"""
max_results = _TypedProperty('max_results', six.integer_types)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#maxResults
"""
preserve_nulls = _TypedProperty('preserve_nulls', bool)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#preserveNulls
"""
timeout_ms = _TypedProperty('timeout_ms', six.integer_types)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#timeoutMs
"""
use_query_cache = _TypedProperty('use_query_cache', bool)
"""See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query#useQueryCache
"""
def _set_properties(self, api_response):
"""Update properties from resource in body of ``api_response``
:type api_response: httplib2.Response
:param api_response: response returned from an API call
"""
self._properties.clear()
self._properties.update(api_response)
def _build_resource(self):
"""Generate a resource for :meth:`begin`."""
resource = {'query': self.query}
if self.default_dataset is not None:
resource['defaultDataset'] = {
'projectId': self.project,
'datasetId': self.default_dataset.name,
}
if self.max_results is not None:
resource['maxResults'] = self.max_results
if self.preserve_nulls is not None:
resource['preserveNulls'] = self.preserve_nulls
if self.timeout_ms is not None:
resource['timeoutMs'] = self.timeout_ms
if self.use_query_cache is not None:
resource['useQueryCache'] = self.use_query_cache
if self.dry_run is not None:
resource['dryRun'] = self.dry_run
return resource
def run(self, client=None):
"""API call: run the query via a POST request
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/query
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
"""
client = self._require_client(client)
path = '/projects/%s/queries' % (self.project,)
api_response = client.connection.api_request(
method='POST', path=path, data=self._build_resource())
self._set_properties(api_response)
def fetch_data(self, max_results=None, page_token=None, start_index=None,
timeout_ms=None, client=None):
"""API call: fetch a page of query result data via a GET request
See:
https://cloud.google.com/bigquery/docs/reference/v2/jobs/getQueryResults
:type max_results: integer or ``NoneType``
:param max_results: maximum number of rows to return.
:type page_token: string or ``NoneType``
:param page_token: token representing a cursor into the table's rows.
:type start_index: integer or ``NoneType``
:param start_index: zero-based index of starting row
:type timeout_ms: integer or ``NoneType``
:param timeout_ms: timeout, in milliseconds, to wait for query to
complete
:type client: :class:`gcloud.bigquery.client.Client` or ``NoneType``
:param client: the client to use. If not passed, falls back to the
``client`` stored on the current dataset.
:rtype: tuple
:returns: ``(row_data, total_rows, page_token)``, where ``row_data``
is a list of tuples, one per result row, containing only
the values; ``total_rows`` is a count of the total number
of rows in the table; and ``page_token`` is an opaque
string which can be used to fetch the next batch of rows
(``None`` if no further batches can be fetched).
:raises: ValueError if the query has not yet been executed.
"""
if self.name is None:
raise ValueError("Query not yet executed: call 'run()'")
client = self._require_client(client)
params = {}
if max_results is not None:
params['maxResults'] = max_results
if page_token is not None:
params['pageToken'] = page_token
if start_index is not None:
params['startIndex'] = start_index
if timeout_ms is not None:
params['timeoutMs'] = timeout_ms
path = '/projects/%s/queries/%s' % (self.project, self.name)
response = client.connection.api_request(method='GET',
path=path,
query_params=params)
self._set_properties(response)
total_rows = response.get('totalRows')
if total_rows is not None:
total_rows = int(total_rows)
page_token = response.get('pageToken')
rows_data = _rows_from_json(response.get('rows', ()), self.schema)
return rows_data, total_rows, page_token
| {
"content_hash": "dfaaa91f8e2ee7609878bc59e9ed4312",
"timestamp": "",
"source": "github",
"line_count": 337,
"max_line_length": 90,
"avg_line_length": 33.961424332344215,
"alnum_prop": 0.6041065967671472,
"repo_name": "waprin/google-cloud-python",
"id": "4dd378af9c95abaf32b8f2a69dd022f4945f346c",
"size": "12042",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gcloud/bigquery/query.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Protocol Buffer",
"bytes": "158375"
},
{
"name": "Python",
"bytes": "2785380"
},
{
"name": "Shell",
"bytes": "3120"
}
],
"symlink_target": ""
} |
"""
Verifier that matches JSON content using extended JSON pointer syntax.
JSON pointer syntax is extended as follows:
1) A ~$xxx at the end will result in a test for the string "xxx" in the matching JSON object.
2) A "." as a path segment will match any JSON object member or array item.
"""
import json
from src.jsonPointer import JSONMatcher, JSONPointerMatchError
class Verifier(object):
def verify(self, manager, uri, response, respdata, args): #@UnusedVariable
# Get arguments
statusCodes = args.get("status", ["200", ])
exists = args.get("exists", [])
notexists = args.get("notexists", [])
# status code must match
if str(response.status) not in statusCodes:
return False, " HTTP Status Code Wrong: %d" % (response.status,)
# look for response data
if not respdata:
return False, " No response body"
# Must be application/json
ct = response.msg.getheaders("content-type")[0].split(";")[0]
if ct != "application/json" and not ct.endswith("+json"):
return False, " Wrong Content-Type: %s" % (ct,)
# Read in json
try:
j = json.loads(respdata)
except Exception, e:
return False, " Response data is not JSON data: %s" % (e,)
def _splitPathTests(path):
if '[' in path:
return path.split('[', 1)
else:
return path, None
result = True
resulttxt = ""
for jpath in exists:
if jpath.find("~$") != -1:
path, value = jpath.split("~$")
else:
path, value = jpath, None
try:
jp = JSONMatcher(path)
except Exception:
result = False
resulttxt += " Invalid JSON pointer for %s\n" % (path,)
else:
try:
jobjs = jp.match(j)
if not jobjs:
result = False
resulttxt += " Items not returned in JSON for %s\n" % (path,)
if value and value not in map(str, jobjs):
result = False
resulttxt += " Item values not returned in JSON for %s\n" % (jpath,)
except JSONPointerMatchError:
result = False
resulttxt += " Items not returned in JSON for %s\n" % (path,)
for jpath in notexists:
if jpath.find("~$") != -1:
path, value = jpath.split("~$")
else:
path, value = jpath, None
try:
jp = JSONMatcher(path)
except Exception:
result = False
resulttxt += " Invalid JSON pointer for %s\n" % (jpath,)
else:
try:
jobjs = jp.match(j)
except JSONPointerMatchError:
pass
else:
if len(jobjs):
resulttxt += " Items returned in JSON for %s\n" % (jpath,)
result = False
return result, resulttxt
| {
"content_hash": "4e84f277fce5fd813f56661c63ff0c65",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 99,
"avg_line_length": 36.043956043956044,
"alnum_prop": 0.48353658536585364,
"repo_name": "fpiotrow/caldav-tester-packaging",
"id": "5ce7b461936bf78657930192840e84631a560d57",
"size": "3888",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "verifiers/jsonPointerMatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "249685"
},
{
"name": "Shell",
"bytes": "6275"
}
],
"symlink_target": ""
} |
from plenum.common.types import f
from plenum.common.constants import TARGET_NYM, TXN_TYPE, RAW, DATA, NAME, \
VERSION, ORIGIN
from indy_client.test.state_proof.helper import check_valid_proof, \
sdk_submit_operation_and_get_replies
from indy_common.constants import GET_ATTR, GET_NYM, GET_SCHEMA, GET_CLAIM_DEF, CLAIM_DEF_FROM, CLAIM_DEF_SCHEMA_REF, \
CLAIM_DEF_SIGNATURE_TYPE, SCHEMA_NAME, SCHEMA_VERSION, SCHEMA_ATTR_NAMES
# fixtures, do not remove
from indy_client.test.test_nym_attrib import \
sdk_added_raw_attribute, attributeName, attributeValue, attributeData
def check_no_data_and_valid_proof(replies):
for reply in replies:
result = reply[1][f.RESULT.nm]
assert result.get(DATA) is None
check_valid_proof(reply[1])
def test_state_proof_returned_for_missing_attr(looper,
attributeName,
sdk_pool_handle,
sdk_wallet_trust_anchor):
"""
Tests that state proof is returned in the reply for GET_ATTR transactions
"""
_, dest = sdk_wallet_trust_anchor
get_attr_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_ATTR,
RAW: attributeName
}
replies = sdk_submit_operation_and_get_replies(looper, sdk_pool_handle,
sdk_wallet_trust_anchor, get_attr_operation)
check_no_data_and_valid_proof(replies)
def test_state_proof_returned_for_missing_nym(looper,
sdk_pool_handle,
sdk_wallet_trust_anchor,
sdk_user_wallet_a):
"""
Tests that state proof is returned in the reply for GET_NYM transactions
"""
# Make not existing id
_, dest = sdk_user_wallet_a
dest = dest[:-3]
dest += "fff"
get_nym_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_NYM
}
replies = sdk_submit_operation_and_get_replies(looper, sdk_pool_handle,
sdk_wallet_trust_anchor, get_nym_operation)
check_no_data_and_valid_proof(replies)
def test_state_proof_returned_for_missing_schema(looper,
sdk_pool_handle,
sdk_wallet_trust_anchor):
"""
Tests that state proof is returned in the reply for GET_SCHEMA transactions
"""
_, dest = sdk_wallet_trust_anchor
schema_name = "test_schema"
schema_version = "1.0"
get_schema_operation = {
TARGET_NYM: dest,
TXN_TYPE: GET_SCHEMA,
DATA: {
SCHEMA_NAME: schema_name,
SCHEMA_VERSION: schema_version,
}
}
replies = sdk_submit_operation_and_get_replies(looper, sdk_pool_handle,
sdk_wallet_trust_anchor,
get_schema_operation)
for reply in replies:
result = reply[1][f.RESULT.nm]
assert SCHEMA_ATTR_NAMES not in result[DATA]
check_valid_proof(reply[1])
def test_state_proof_returned_for_missing_claim_def(looper,
sdk_pool_handle,
sdk_wallet_trust_anchor):
"""
Tests that state proof is returned in the reply for GET_CLAIM_DEF
transactions
"""
_, dest = sdk_wallet_trust_anchor
get_claim_def_operation = {
CLAIM_DEF_FROM: dest,
TXN_TYPE: GET_CLAIM_DEF,
CLAIM_DEF_SCHEMA_REF: 12,
CLAIM_DEF_SIGNATURE_TYPE: 'CL'
}
replies = sdk_submit_operation_and_get_replies(looper, sdk_pool_handle,
sdk_wallet_trust_anchor,
get_claim_def_operation)
check_no_data_and_valid_proof(replies)
| {
"content_hash": "794a32227975fd3e03a5886f294baf69",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 119,
"avg_line_length": 37.783018867924525,
"alnum_prop": 0.5438202247191011,
"repo_name": "spivachuk/sovrin-node",
"id": "e7d887a663b4dd28218e169fc474306a9f1e1dc8",
"size": "4005",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "indy_client/test/state_proof/test_state_proof_for_missing_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3329"
},
{
"name": "Dockerfile",
"bytes": "7269"
},
{
"name": "Groovy",
"bytes": "8984"
},
{
"name": "Makefile",
"bytes": "11151"
},
{
"name": "Python",
"bytes": "1681637"
},
{
"name": "Ruby",
"bytes": "65393"
},
{
"name": "Rust",
"bytes": "25532"
},
{
"name": "Shell",
"bytes": "132633"
}
],
"symlink_target": ""
} |
import logging
import os
import traceback
from typing import Any, Dict, Iterable, List, Mapping, Tuple, Union
from googleapiclient.discovery import Resource
from googleapiclient.errors import HttpError
from service_framework import service_builder
from service_framework import services
from classes import decorators, firestore, gmail, report_type
from auth.credentials import Credentials
from auth.secret_manager import SecretManager
from classes.exceptions import CredentialsError
from classes.report_config import ReportConfig
class Fetcher(object):
@decorators.retry(exceptions=HttpError, tries=3, backoff=2)
def fetch(self, method, **kwargs: Mapping[str, str]) -> Dict[str, Any]:
result = method(**kwargs).execute()
return result
def error_to_trace(self, error: Exception = None) -> str:
"""Pulls a python stack trace from an error.
Args:
error (Exception, optional): the exception. Defaults to None.
Returns:
str: the stack trace
"""
trace = ''
if error:
tb = traceback.TracebackException.from_exception(error).format()
if tb:
trace = '\n\nTrace:\n\n' + ''.join(tb)
return f'{trace}'
class ReportFetcher(object):
report_type: report_type.Type
service_definition: services.Service
chunk_multiplier = int(os.environ.get('CHUNK_MULTIPLIER', 64))
email = None
project = None
profile = None
@property
def service(self) -> Resource:
"""Creates the API service for the product.
Returns:
Resource: the service definition
"""
return service_builder.build_service(
service=self.report_type.service,
key=Credentials(datastore=SecretManager,
email=self.email,
project=self.project).credentials
)
def read_header(self, report_details: ReportConfig) -> Tuple[List[str],
List[str]]:
"""Reads the header of the report CSV file.
Args:
report_details (dict): the report definition
Returns:
Tuple[List[str], List[str]]: the csv headers and column types
"""
pass
def stream_to_gcs(self, bucket: str, report_details: ReportConfig) -> None:
"""Streams the report CSV to Cloud Storage.
Args:
bucket (str): GCS Bucket
report_data (dict): Report definition
"""
pass
def normalize_report_details(self,
report_object: Dict[str, Any],
report_id: str) -> Dict[str, Any]:
"""Normalizes the api format report into a flattened data structure.
Args:
report_object: Report details from api queries method
report_id: the report id.
Returns:
result (Dict): the normalized data structure
"""
pass
def fetch_report_config(self, report_object: Dict[str, Any],
report_id: str) -> Dict[str, Any]:
"""Fetches a report configuration.
This fetched the latest version of a report's configuration from the
product, normalizes it fo the format that Report2BQ wants, and merges in
the Report2BQ state fields.
Args:
report_object (Dict[str, Any]): the existing report object
report_id (str): the report id
Returns:
Dict[str, Any]: the updated configuration
"""
report_data = self.normalize_report_details(report_object=report_object,
report_id=report_id)
keys_to_update = [
'email', 'dest_dataset', 'dest_project', 'dest_table', 'notifier',
'schema', 'append', 'force', 'infer_schema']
for key in keys_to_update:
if key in report_object:
report_data[key] = report_object[key]
return report_data
def get_latest_report_file(self, report_id: str) -> Dict[str, Any]:
"""Fetch the last known successful report's definition.
Args:
report_id: report id
Returns:
result (Dict): the last known report, or an empty Dict if it has
not yet run.
"""
pass
def run_report(self, report_id: int,
asynchronous: bool = True) -> Dict[str, Any]:
"""Runs a report on the product.
Args:
report_id (int): the report to run.
asynchronous (bool): fire and forget or wait for the result.
Returns:
Dict[str, Any]: the run result
"""
pass
def check_running_report(self, config: Dict[str, Any]):
pass
def get_reports(self) -> Dict[str, Any]:
"""Fetches a list of reports for current user.
Returns:
result (Dict): the list of reports for the current user.
"""
pass
def get_report_definition(self,
report_id: int,
fields: str = None) -> Mapping[str, Any]:
"""Fetches the report definition.
Args:
report_id: report id
Returns:
the report definition
"""
pass
def create_report(self,
report: Mapping[str, Any]) -> Union[str, Mapping[str, Any]]:
"""Creates a new report.
Args:
report (Mapping[str, Any]): the report definition
Returns:
Union[str, Mapping[str, Any]]: the report, or the error.
"""
pass
class ReportRunner(object):
report_type = None
project = None
email = None
@decorators.lazy_property
def firestore(self) -> firestore.Firestore:
return firestore.Firestore(project=self.project, email=self.email)
def run(self, unattended: bool):
"""Runs the report.
Args:
unattended (bool): wait for the result or just run and log for the run
monitor.
"""
pass
def _email_error(self,
message: str,
email: str = None,
error: Exception = None) -> None:
"""Emails the error to the owner, and the administrator if defined.
Args:
message (str): the message
email (str, optional): report owner email. Defaults to None.
error (Exception, optional): any error. Defaults to None.
"""
to = [email] if email else []
administrator = \
os.environ.get('ADMINISTRATOR_EMAIL') or \
self.FIRESTORE.get_document(report_type.Type._ADMIN,
'admin').get('email')
cc = [administrator] if administrator else []
try:
mailer_credentials = Credentials(
datastore=SecretManager,
email=email, project=self.project)
except CredentialsError:
mailer_credentials = \
Credentials(datastore=SecretManager,
email=administrator,
project=self.project) if administrator else None
body = f'{message}{gmail.error_to_trace(error)}'
if mailer_credentials and (to or cc):
message = gmail.GMailMessage(to=to,
cc=cc,
subject=f'Error in report_loader',
body=body,
project=self.project)
gmail.send_message(message=message,
credentials=mailer_credentials)
else:
logging.error('Unable to email error %s', body)
def strip_nulls(value: Iterable) -> Iterable:
"""Removes null values from iterables.
Recursively remove all None values from dictionaries and lists, and returns
the result as a new dictionary or list.
Args:
value (Any): any list or dict to have empty values removed.
"""
if isinstance(value, list):
return [strip_nulls(x) for x in value if x is not None]
elif isinstance(value, dict):
return {
key: strip_nulls(val)
for key, val in value.items() if val is not None
}
else:
return value
def error_to_trace(error: Exception = None) -> str:
"""Pulls a python stack trace from an error.
Args:
error (Exception, optional): the exception. Defaults to None.
Returns:
str: the stack trace
"""
trace = ''
if error:
tb = traceback.TracebackException.from_exception(error).format()
if tb:
trace = '\n\nTrace:\n\n' + ''.join(tb)
return f'{trace}'
| {
"content_hash": "9e763aac58f518088ea50e99bcab669d",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 80,
"avg_line_length": 28.698245614035088,
"alnum_prop": 0.6076537474018828,
"repo_name": "google/report2bq",
"id": "d4c020ab236306f4626777c2549871ef534ebe5f",
"size": "8755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "application/classes/__init__.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "689"
},
{
"name": "HTML",
"bytes": "13362"
},
{
"name": "JavaScript",
"bytes": "375"
},
{
"name": "Python",
"bytes": "435292"
},
{
"name": "Shell",
"bytes": "35343"
}
],
"symlink_target": ""
} |
"""
OdCpuDmaLatency - command ``/usr/bin/od -An -t d /dev/cpu_dma_latency``
=======================================================================
This module provides the class ``OdCpuDmaLatency`` which processes
``/usr/bin/od -An -t d /dev/cpu_dma_latency`` command output.
"""
from insights import parser, CommandParser
from insights.specs import Specs
from insights.parsers import SkipException
@parser(Specs.od_cpu_dma_latency)
class OdCpuDmaLatency(CommandParser):
"""
Class for parsing the output of `/usr/bin/od -An -t d /dev/cpu_dma_latency` command.
Typical output of is::
2000000000
Attributes:
force_latency(int): A integer containing the value of force_latency.
Examples:
>>> type(cpu_dma_latency)
<class 'insights.parsers.od_cpu_dma_latency.OdCpuDmaLatency'>
>>> cpu_dma_latency.force_latency
2000000000
"""
def parse_content(self, content):
if content and content[0].isdigit():
self.force_latency = int(content[0])
else:
raise SkipException('Nothing to parse.')
| {
"content_hash": "fc6c899613268fe7c9a87e97a3cb3ed1",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 88,
"avg_line_length": 30.555555555555557,
"alnum_prop": 0.6245454545454545,
"repo_name": "RedHatInsights/insights-core",
"id": "b8b13e0198e4afea47100db37a8e7019fbc88844",
"size": "1100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "insights/parsers/od_cpu_dma_latency.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "220"
},
{
"name": "Python",
"bytes": "8219046"
},
{
"name": "Shell",
"bytes": "1754"
}
],
"symlink_target": ""
} |
import sys
import math
import random
import string
import time
import types
import Tkinter
_Windows = sys.platform == 'win32' # True if on Win95/98/NT
_root_window = None # The root window for graphics output
_canvas = None # The canvas which holds graphics
_canvas_xs = None # Size of canvas object
_canvas_ys = None
_canvas_x = None # Current position on canvas
_canvas_y = None
_canvas_col = None # Current colour (set to black below)
_canvas_tsize = 12
_canvas_tserifs = 0
def formatColor(r, g, b):
return '#%02x%02x%02x' % (int(r * 255), int(g * 255), int(b * 255))
def colorToVector(color):
return map(lambda x: int(x, 16) / 256.0, [color[1:3], color[3:5], color[5:7]])
if _Windows:
_canvas_tfonts = ['times new roman', 'lucida console']
else:
_canvas_tfonts = ['times', 'lucidasans-24']
pass # XXX need defaults here
def sleep(secs):
global _root_window
if _root_window == None:
time.sleep(secs)
else:
_root_window.update_idletasks()
_root_window.after(int(1000 * secs), _root_window.quit)
_root_window.mainloop()
def begin_graphics(width=640, height=480, color=formatColor(0, 0, 0), title=None):
global _root_window, _canvas, _canvas_x, _canvas_y, _canvas_xs, _canvas_ys, _bg_color
# Check for duplicate call
if _root_window is not None:
# Lose the window.
_root_window.destroy()
# Save the canvas size parameters
_canvas_xs, _canvas_ys = width - 1, height - 1
_canvas_x, _canvas_y = 0, _canvas_ys
_bg_color = color
# Create the root window
_root_window = Tkinter.Tk()
_root_window.protocol('WM_DELETE_WINDOW', _destroy_window)
_root_window.title(title or 'Graphics Window')
_root_window.resizable(0, 0)
# Create the canvas object
try:
_canvas = Tkinter.Canvas(_root_window, width=width, height=height)
_canvas.pack()
draw_background()
_canvas.update()
except:
_root_window = None
raise
# Bind to key-down and key-up events
_root_window.bind( "<KeyPress>", _keypress )
_root_window.bind( "<KeyRelease>", _keyrelease )
_root_window.bind( "<FocusIn>", _clear_keys )
_root_window.bind( "<FocusOut>", _clear_keys )
_root_window.bind( "<Button-1>", _leftclick )
_root_window.bind( "<Button-2>", _rightclick )
_root_window.bind( "<Button-3>", _rightclick )
_root_window.bind( "<Control-Button-1>", _ctrl_leftclick)
_clear_keys()
_leftclick_loc = None
_rightclick_loc = None
_ctrl_leftclick_loc = None
def _leftclick(event):
global _leftclick_loc
_leftclick_loc = (event.x, event.y)
def _rightclick(event):
global _rightclick_loc
_rightclick_loc = (event.x, event.y)
def _ctrl_leftclick(event):
global _ctrl_leftclick_loc
_ctrl_leftclick_loc = (event.x, event.y)
def wait_for_click():
while True:
global _leftclick_loc
global _rightclick_loc
global _ctrl_leftclick_loc
if _leftclick_loc != None:
val = _leftclick_loc
_leftclick_loc = None
return val, 'left'
if _rightclick_loc != None:
val = _rightclick_loc
_rightclick_loc = None
return val, 'right'
if _ctrl_leftclick_loc != None:
val = _ctrl_leftclick_loc
_ctrl_leftclick_loc = None
return val, 'ctrl_left'
sleep(0.05)
def draw_background():
corners = [(0,0), (0, _canvas_ys), (_canvas_xs, _canvas_ys), (_canvas_xs, 0)]
polygon(corners, _bg_color, fillColor=_bg_color, filled=True, smoothed=False)
def _destroy_window(event=None):
sys.exit(0)
# global _root_window
# _root_window.destroy()
# _root_window = None
#print "DESTROY"
def end_graphics():
global _root_window, _canvas, _mouse_enabled
try:
try:
sleep(1)
if _root_window != None:
_root_window.destroy()
except SystemExit, e:
print 'Ending graphics raised an exception:', e
finally:
_root_window = None
_canvas = None
_mouse_enabled = 0
_clear_keys()
def clear_screen(background=None):
global _canvas_x, _canvas_y
_canvas.delete('all')
draw_background()
_canvas_x, _canvas_y = 0, _canvas_ys
def polygon(coords, outlineColor, fillColor=None, filled=1, smoothed=1, behind=0, width=1):
c = []
for coord in coords:
c.append(coord[0])
c.append(coord[1])
if fillColor == None: fillColor = outlineColor
if filled == 0: fillColor = ""
poly = _canvas.create_polygon(c, outline=outlineColor, fill=fillColor, smooth=smoothed, width=width)
if behind > 0:
_canvas.tag_lower(poly, behind) # Higher should be more visible
return poly
def square(pos, r, color, filled=1, behind=0):
x, y = pos
coords = [(x - r, y - r), (x + r, y - r), (x + r, y + r), (x - r, y + r)]
return polygon(coords, color, color, filled, 0, behind=behind)
def circle(pos, r, outlineColor, fillColor, endpoints=None, style='pieslice', width=2):
x, y = pos
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
return _canvas.create_arc(x0, y0, x1, y1, outline=outlineColor, fill=fillColor,
extent=e[1] - e[0], start=e[0], style=style, width=width)
def image(pos, file="../../blueghost.gif"):
x, y = pos
# img = PhotoImage(file=file)
return _canvas.create_image(x, y, image = Tkinter.PhotoImage(file=file), anchor = Tkinter.NW)
def refresh():
_canvas.update_idletasks()
def moveCircle(id, pos, r, endpoints=None):
global _canvas_x, _canvas_y
x, y = pos
# x0, x1 = x - r, x + r + 1
# y0, y1 = y - r, y + r + 1
x0, x1 = x - r - 1, x + r
y0, y1 = y - r - 1, y + r
if endpoints == None:
e = [0, 359]
else:
e = list(endpoints)
while e[0] > e[1]: e[1] = e[1] + 360
# Skip Xorg Crash
# edit(id, ('start', e[0]), ('extent', e[1] - e[0]))
move_to(id, x0, y0)
def edit(id, *args):
_canvas.itemconfigure(id, **dict(args))
def text(pos, color, contents, font='Helvetica', size=12, style='normal', anchor="nw"):
global _canvas_x, _canvas_y
x, y = pos
font = (font, str(size), style)
return _canvas.create_text(x, y, fill=color, text=contents, font=font, anchor=anchor)
def changeText(id, newText, font=None, size=12, style='normal'):
_canvas.itemconfigure(id, text=newText)
if font != None:
_canvas.itemconfigure(id, font=(font, '-%d' % size, style))
def changeColor(id, newColor):
_canvas.itemconfigure(id, fill=newColor)
def line(here, there, color=formatColor(0, 0, 0), width=2):
x0, y0 = here[0], here[1]
x1, y1 = there[0], there[1]
return _canvas.create_line(x0, y0, x1, y1, fill=color, width=width)
##############################################################################
### Keypress handling ########################################################
##############################################################################
# We bind to key-down and key-up events.
_keysdown = {}
_keyswaiting = {}
# This holds an unprocessed key release. We delay key releases by up to
# one call to keys_pressed() to get round a problem with auto repeat.
_got_release = None
def _keypress(event):
global _got_release
#remap_arrows(event)
_keysdown[event.keysym] = 1
_keyswaiting[event.keysym] = 1
# print event.char, event.keycode
_got_release = None
def _keyrelease(event):
global _got_release
#remap_arrows(event)
try:
del _keysdown[event.keysym]
except:
pass
_got_release = 1
def remap_arrows(event):
# TURN ARROW PRESSES INTO LETTERS (SHOULD BE IN KEYBOARD AGENT)
if event.char in ['a', 's', 'd', 'w']:
return
if event.keycode in [37, 101]: # LEFT ARROW (win / x)
event.char = 'a'
if event.keycode in [38, 99]: # UP ARROW
event.char = 'w'
if event.keycode in [39, 102]: # RIGHT ARROW
event.char = 'd'
if event.keycode in [40, 104]: # DOWN ARROW
event.char = 's'
def _clear_keys(event=None):
global _keysdown, _got_release, _keyswaiting
_keysdown = {}
_keyswaiting = {}
_got_release = None
def keys_pressed(d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
d_o_e(d_w)
if _got_release:
d_o_e(d_w)
return _keysdown.keys()
def keys_waiting():
global _keyswaiting
keys = _keyswaiting.keys()
_keyswaiting = {}
return keys
# Block for a list of keys...
def wait_for_keys():
keys = []
while keys == []:
keys = keys_pressed()
sleep(0.05)
return keys
def remove_from_screen(x,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
_canvas.delete(x)
d_o_e(d_w)
def _adjust_coords(coord_list, x, y):
for i in range(0, len(coord_list), 2):
coord_list[i] = coord_list[i] + x
coord_list[i + 1] = coord_list[i + 1] + y
return coord_list
def move_to(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT):
if y is None:
try: x, y = x
except: raise 'incomprehensible coordinates'
horiz = True
newCoords = []
current_x, current_y = _canvas.coords(object)[0:2] # first point
for coord in _canvas.coords(object):
if horiz:
inc = x - current_x
else:
inc = y - current_y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
def move_by(object, x, y=None,
d_o_e=Tkinter.tkinter.dooneevent,
d_w=Tkinter.tkinter.DONT_WAIT, lift=False):
if y is None:
try: x, y = x
except: raise Exception, 'incomprehensible coordinates'
horiz = True
newCoords = []
for coord in _canvas.coords(object):
if horiz:
inc = x
else:
inc = y
horiz = not horiz
newCoords.append(coord + inc)
_canvas.coords(object, *newCoords)
d_o_e(d_w)
if lift:
_canvas.tag_raise(object)
def writePostscript(filename):
"Writes the current canvas to a postscript file."
psfile = file(filename, 'w')
psfile.write(_canvas.postscript(pageanchor='sw',
y='0.c',
x='0.c'))
psfile.close()
ghost_shape = [
(0, - 0.5),
(0.25, - 0.75),
(0.5, - 0.5),
(0.75, - 0.75),
(0.75, 0.5),
(0.5, 0.75),
(- 0.5, 0.75),
(- 0.75, 0.5),
(- 0.75, - 0.75),
(- 0.5, - 0.5),
(- 0.25, - 0.75)
]
if __name__ == '__main__':
begin_graphics()
clear_screen()
ghost_shape = [(x * 10 + 20, y * 10 + 20) for x, y in ghost_shape]
g = polygon(ghost_shape, formatColor(1, 1, 1))
move_to(g, (50, 50))
circle((150, 150), 20, formatColor(0.7, 0.3, 0.0), endpoints=[15, - 15])
sleep(2)
| {
"content_hash": "269252279e4d41f2a1f42632a52b5902",
"timestamp": "",
"source": "github",
"line_count": 385,
"max_line_length": 104,
"avg_line_length": 28.948051948051948,
"alnum_prop": 0.570569762225213,
"repo_name": "chenhw2/AI_Pacman",
"id": "f6b1ab37015b03a55aabbd5387e8cea36963576f",
"size": "11808",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "search/graphicsUtils.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "233875"
}
],
"symlink_target": ""
} |
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Restaurant'
db.create_table(u'mining_restaurant', (
('business_id', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50, primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100)),
('quadrant', self.gf('django.db.models.fields.IntegerField')(default=0)),
('full_address', self.gf('django.db.models.fields.CharField')(max_length=150)),
('city', self.gf('django.db.models.fields.CharField')(max_length=25)),
('state', self.gf('django.db.models.fields.CharField')(max_length=10)),
('latitude', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('longitude', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('stars', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('review_count', self.gf('django.db.models.fields.IntegerField')(default=0)),
('categories', self.gf('djorm_pgarray.fields.ArrayField')(default=None, dbtype='varchar(255)', null=True, blank=True)),
('open_status', self.gf('django.db.models.fields.BooleanField')(default=False)),
))
db.send_create_signal(u'mining', ['Restaurant'])
# Adding model 'Review'
db.create_table(u'mining_review', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('restaurant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['mining.Restaurant'], null=True)),
('user_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('stars', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('text', self.gf('django.db.models.fields.TextField')()),
('date', self.gf('django.db.models.fields.DateField')()),
('votes', self.gf('djorm_pgarray.fields.ArrayField')(default=None, dbtype='varchar(255)', null=True, blank=True)),
))
db.send_create_signal(u'mining', ['Review'])
# Adding model 'Rank'
db.create_table(u'mining_rank', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('rank', self.gf('django.db.models.fields.IntegerField')(default=0)),
('business_id', self.gf('django.db.models.fields.CharField')(max_length=50)),
('effectiveness', self.gf('django.db.models.fields.FloatField')(default=0.0)),
))
db.send_create_signal(u'mining', ['Rank'])
# Adding model 'GeoRegion'
db.create_table(u'mining_georegion', (
('quadrant', self.gf('django.db.models.fields.IntegerField')(default=0, primary_key=True)),
('x1', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('x2', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('y1', self.gf('django.db.models.fields.FloatField')(default=0.0)),
('y2', self.gf('django.db.models.fields.FloatField')(default=0.0)),
))
db.send_create_signal(u'mining', ['GeoRegion'])
def backwards(self, orm):
# Deleting model 'Restaurant'
db.delete_table(u'mining_restaurant')
# Deleting model 'Review'
db.delete_table(u'mining_review')
# Deleting model 'Rank'
db.delete_table(u'mining_rank')
# Deleting model 'GeoRegion'
db.delete_table(u'mining_georegion')
models = {
u'mining.georegion': {
'Meta': {'object_name': 'GeoRegion'},
'quadrant': ('django.db.models.fields.IntegerField', [], {'default': '0', 'primary_key': 'True'}),
'x1': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'x2': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'y1': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'y2': ('django.db.models.fields.FloatField', [], {'default': '0.0'})
},
u'mining.rank': {
'Meta': {'object_name': 'Rank'},
'business_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'effectiveness': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rank': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'mining.restaurant': {
'Meta': {'object_name': 'Restaurant'},
'business_id': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50', 'primary_key': 'True'}),
'categories': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "'varchar(255)'", 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '25'}),
'full_address': ('django.db.models.fields.CharField', [], {'max_length': '150'}),
'latitude': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'longitude': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'open_status': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'quadrant': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'review_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stars': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'state': ('django.db.models.fields.CharField', [], {'max_length': '10'})
},
u'mining.review': {
'Meta': {'object_name': 'Review'},
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'restaurant': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mining.Restaurant']", 'null': 'True'}),
'stars': ('django.db.models.fields.FloatField', [], {'default': '0.0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'user_id': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'votes': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "'varchar(255)'", 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['mining'] | {
"content_hash": "073e9c23b440b7d4763fa1d2a324012d",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 148,
"avg_line_length": 57.525862068965516,
"alnum_prop": 0.5702083021129927,
"repo_name": "thebenwaters/yelp-dataset-geographical-topic-modeling",
"id": "d2a417901c660d3157056f2b204bb0ecaf330e13",
"size": "6697",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mining/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "55091"
}
],
"symlink_target": ""
} |
import copy
from datetime import timedelta # noqa
from django.conf import settings
from django.utils import datetime_safe
from keystoneclient import access
from keystoneclient.v2_0 import ec2
from keystoneclient.v2_0 import roles
from keystoneclient.v2_0 import tenants
from keystoneclient.v2_0 import users
from keystoneclient.v3 import domains
from keystoneclient.v3 import groups
from keystoneclient.v3 import role_assignments
from openstack_auth import user as auth_user
from openstack_dashboard.test.test_data import utils
# Dummy service catalog with all service.
# All endpoint URLs should point to example.com.
# Try to keep them as accurate to real data as possible (ports, URIs, etc.)
SERVICE_CATALOG = [
{"type": "compute",
"name": "nova",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8774/v2",
"internalURL": "http://int.nova.example.com:8774/v2",
"publicURL": "http://public.nova.example.com:8774/v2"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova2.example.com:8774/v2",
"internalURL": "http://int.nova2.example.com:8774/v2",
"publicURL": "http://public.nova2.example.com:8774/v2"}]},
{"type": "volume",
"name": "cinder",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"},
{"region": "RegionTwo",
"adminURL": "http://admin.nova.example.com:8776/v1",
"internalURL": "http://int.nova.example.com:8776/v1",
"publicURL": "http://public.nova.example.com:8776/v1"}]},
{"type": "image",
"name": "glance",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.glance.example.com:9292/v1",
"internalURL": "http://int.glance.example.com:9292/v1",
"publicURL": "http://public.glance.example.com:9292/v1"}]},
{"type": "identity",
"name": "keystone",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.keystone.example.com:35357/v2.0",
"internalURL": "http://int.keystone.example.com:5000/v2.0",
"publicURL": "http://public.keystone.example.com:5000/v2.0"}]},
{"type": "object-store",
"name": "swift",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.swift.example.com:8080/",
"internalURL": "http://int.swift.example.com:8080/",
"publicURL": "http://public.swift.example.com:8080/"}]},
{"type": "network",
"name": "neutron",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.neutron.example.com:9696/",
"internalURL": "http://int.neutron.example.com:9696/",
"publicURL": "http://public.neutron.example.com:9696/"}]},
{"type": "ec2",
"name": "EC2 Service",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.nova.example.com:8773/services/Admin",
"publicURL": "http://public.nova.example.com:8773/services/Cloud",
"internalURL": "http://int.nova.example.com:8773/services/Cloud"}]},
{"type": "metering",
"name": "ceilometer",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.ceilometer.example.com:8777",
"publicURL": "http://public.ceilometer.example.com:8777",
"internalURL": "http://int.ceilometer.example.com:8777"}]},
{"type": "orchestration",
"name": "Heat",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.heat.example.com:8004/v1",
"publicURL": "http://public.heat.example.com:8004/v1",
"internalURL": "http://int.heat.example.com:8004/v1"}]},
{"type": "database",
"name": "Trove",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.trove.example.com:8779/v1.0",
"publicURL": "http://public.trove.example.com:8779/v1.0",
"internalURL": "http://int.trove.example.com:8779/v1.0"}]},
{"type": "data_processing",
"name": "Sahara",
"endpoints_links": [],
"endpoints": [
{"region": "RegionOne",
"adminURL": "http://admin.sahara.example.com:8386/v1.1",
"publicURL": "http://public.sahara.example.com:8386/v1.1",
"internalURL": "http://int.sahara.example.com:8386/v1.1"}]}
]
def data(TEST):
# Make a deep copy of the catalog to avoid persisting side-effects
# when tests modify the catalog.
TEST.service_catalog = copy.deepcopy(SERVICE_CATALOG)
TEST.tokens = utils.TestDataContainer()
TEST.domains = utils.TestDataContainer()
TEST.users = utils.TestDataContainer()
TEST.groups = utils.TestDataContainer()
TEST.tenants = utils.TestDataContainer()
TEST.role_assignments = utils.TestDataContainer()
TEST.roles = utils.TestDataContainer()
TEST.ec2 = utils.TestDataContainer()
admin_role_dict = {'id': '1',
'name': 'admin'}
admin_role = roles.Role(roles.RoleManager, admin_role_dict)
member_role_dict = {'id': "2",
'name': settings.OPENSTACK_KEYSTONE_DEFAULT_ROLE}
member_role = roles.Role(roles.RoleManager, member_role_dict)
TEST.roles.add(admin_role, member_role)
TEST.roles.admin = admin_role
TEST.roles.member = member_role
domain_dict = {'id': "1",
'name': 'test_domain',
'description': "a test domain.",
'enabled': True}
domain_dict_2 = {'id': "2",
'name': 'disabled_domain',
'description': "a disabled test domain.",
'enabled': False}
domain = domains.Domain(domains.DomainManager, domain_dict)
disabled_domain = domains.Domain(domains.DomainManager, domain_dict_2)
TEST.domains.add(domain, disabled_domain)
TEST.domain = domain # Your "current" domain
user_dict = {'id': "1",
'name': 'test_user',
'email': 'test@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user = users.User(None, user_dict)
user_dict = {'id': "2",
'name': 'user_two',
'email': 'two@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user2 = users.User(None, user_dict)
user_dict = {'id': "3",
'name': 'user_three',
'email': 'three@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '1',
'enabled': True,
'domain_id': "1"}
user3 = users.User(None, user_dict)
user_dict = {'id': "4",
'name': 'user_four',
'email': 'four@example.com',
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "2"}
user4 = users.User(None, user_dict)
user_dict = {'id': "5",
'name': 'user_five',
'email': None,
'password': 'password',
'token': 'test_token',
'project_id': '2',
'enabled': True,
'domain_id': "1"}
user5 = users.User(None, user_dict)
TEST.users.add(user, user2, user3, user4, user5)
TEST.user = user # Your "current" user
TEST.user.service_catalog = copy.deepcopy(SERVICE_CATALOG)
group_dict = {'id': "1",
'name': 'group_one',
'description': 'group one description',
'project_id': '1',
'domain_id': '1'}
group = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "2",
'name': 'group_two',
'description': 'group two description',
'project_id': '1',
'domain_id': '1'}
group2 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "3",
'name': 'group_three',
'description': 'group three description',
'project_id': '1',
'domain_id': '1'}
group3 = groups.Group(groups.GroupManager(None), group_dict)
group_dict = {'id': "4",
'name': 'group_four',
'description': 'group four description',
'project_id': '2',
'domain_id': '2'}
group4 = groups.Group(groups.GroupManager(None), group_dict)
TEST.groups.add(group, group2, group3, group4)
role_assignments_dict = {'user': {'id': '1'},
'role': {'id': '1'},
'scope': {'project': {'id': '1'}}}
role_assignment1 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '2'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
role_assignment2 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'group': {'id': '1'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
role_assignment3 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
role_assignments_dict = {'user': {'id': '3'},
'role': {'id': '2'},
'scope': {'project': {'id': '1'}}}
role_assignment4 = role_assignments.RoleAssignment(
role_assignments.RoleAssignmentManager, role_assignments_dict)
TEST.role_assignments.add(role_assignment1,
role_assignment2,
role_assignment3,
role_assignment4)
tenant_dict = {'id': "1",
'name': 'test_tenant',
'description': "a test tenant.",
'enabled': True,
'domain_id': '1',
'domain_name': 'test_domain'}
tenant_dict_2 = {'id': "2",
'name': 'disabled_tenant',
'description': "a disabled test tenant.",
'enabled': False,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant_dict_3 = {'id': "3",
'name': u'\u4e91\u89c4\u5219',
'description': "an unicode-named tenant.",
'enabled': True,
'domain_id': '2',
'domain_name': 'disabled_domain'}
tenant = tenants.Tenant(tenants.TenantManager, tenant_dict)
disabled_tenant = tenants.Tenant(tenants.TenantManager, tenant_dict_2)
tenant_unicode = tenants.Tenant(tenants.TenantManager, tenant_dict_3)
TEST.tenants.add(tenant, disabled_tenant, tenant_unicode)
TEST.tenant = tenant # Your "current" tenant
tomorrow = datetime_safe.datetime.now() + timedelta(days=1)
expiration = tomorrow.isoformat()
scoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration,
'tenant': tenant_dict,
'tenants': [tenant_dict]},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
scoped_access_info = access.AccessInfo.factory(resp=None,
body=scoped_token_dict)
unscoped_token_dict = {
'access': {
'token': {
'id': "test_token_id",
'expires': expiration},
'user': {
'id': "test_user_id",
'name': "test_user",
'roles': [member_role_dict]},
'serviceCatalog': TEST.service_catalog
}
}
unscoped_access_info = access.AccessInfo.factory(resp=None,
body=unscoped_token_dict)
scoped_token = auth_user.Token(scoped_access_info)
unscoped_token = auth_user.Token(unscoped_access_info)
TEST.tokens.add(scoped_token, unscoped_token)
TEST.token = scoped_token # your "current" token.
TEST.tokens.scoped_token = scoped_token
TEST.tokens.unscoped_token = unscoped_token
access_secret = ec2.EC2(ec2.CredentialsManager, {"access": "access",
"secret": "secret"})
TEST.ec2.add(access_secret)
| {
"content_hash": "cd3a86b9d06e5a83eb89a550b6ef7bcb",
"timestamp": "",
"source": "github",
"line_count": 330,
"max_line_length": 78,
"avg_line_length": 40.875757575757575,
"alnum_prop": 0.527837497219957,
"repo_name": "394954369/horizon",
"id": "1fceceea11150081c68307f982a5c62a2e0fb02c",
"size": "14094",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "openstack_dashboard/test/test_data/keystone_data.py",
"mode": "33188",
"license": "apache-2.0",
"language": [],
"symlink_target": ""
} |
"""
Website property.
"""
from pkg_resources import resource_stream # @UnresolvedImport
from rebulk.remodule import re
from rebulk import Rebulk, Rule, RemoveMatch
from ..common import seps
from ..common.formatters import cleanup
from ..common.validators import seps_surround
from ...reutils import build_or_pattern
def website():
"""
Builder for rebulk object.
:return: Created Rebulk object
:rtype: Rebulk
"""
rebulk = Rebulk().regex_defaults(flags=re.IGNORECASE).string_defaults(ignore_case=True)
rebulk.defaults(name="website")
tlds = [l.strip().decode('utf-8')
for l in resource_stream('guessit', 'tlds-alpha-by-domain.txt').readlines()
if b'--' not in l][1:] # All registered domain extension
safe_tlds = ['com', 'org', 'net'] # For sure a website extension
safe_subdomains = ['www'] # For sure a website subdomain
safe_prefix = ['co', 'com', 'org', 'net'] # Those words before a tlds are sure
website_prefixes = ['from']
rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) +
r'\.)+(?:[a-z-]+\.)+(?:'+build_or_pattern(tlds) +
r'))(?:[^a-z0-9]|$)',
children=True)
rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) +
r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_tlds) +
r'))(?:[^a-z0-9]|$)',
safe_subdomains=safe_subdomains, safe_tlds=safe_tlds, children=True)
rebulk.regex(r'(?:[^a-z0-9]|^)((?:'+build_or_pattern(safe_subdomains) +
r'\.)*[a-z-]+\.(?:'+build_or_pattern(safe_prefix) +
r'\.)+(?:'+build_or_pattern(tlds) +
r'))(?:[^a-z0-9]|$)',
safe_subdomains=safe_subdomains, safe_prefix=safe_prefix, tlds=tlds, children=True)
rebulk.string(*website_prefixes,
validator=seps_surround, private=True, tags=['website.prefix'])
class PreferTitleOverWebsite(Rule):
"""
If found match is more likely a title, remove website.
"""
consequence = RemoveMatch
@staticmethod
def valid_followers(match):
"""
Validator for next website matches
"""
return any(name in ['season', 'episode', 'year'] for name in match.names)
def when(self, matches, context):
to_remove = []
for website_match in matches.named('website'):
safe = False
for safe_start in safe_subdomains + safe_prefix:
if website_match.value.lower().startswith(safe_start):
safe = True
break
if not safe:
suffix = matches.next(website_match, PreferTitleOverWebsite.valid_followers, 0)
if suffix:
to_remove.append(website_match)
return to_remove
rebulk.rules(PreferTitleOverWebsite, ValidateWebsitePrefix)
return rebulk
class ValidateWebsitePrefix(Rule):
"""
Validate website prefixes
"""
priority = 64
consequence = RemoveMatch
def when(self, matches, context):
to_remove = []
for prefix in matches.tagged('website.prefix'):
website_match = matches.next(prefix, predicate=lambda match: match.name == 'website', index=0)
if (not website_match or
matches.holes(prefix.end, website_match.start,
formatter=cleanup, seps=seps, predicate=lambda match: match.value)):
to_remove.append(prefix)
return to_remove
| {
"content_hash": "28e573bfa6c04953a1fdf257d0f1493d",
"timestamp": "",
"source": "github",
"line_count": 97,
"max_line_length": 106,
"avg_line_length": 37.7319587628866,
"alnum_prop": 0.5663934426229508,
"repo_name": "pannal/Subliminal.bundle",
"id": "afca57abbabb9388628dc22388587c0c96f1a8d0",
"size": "3706",
"binary": false,
"copies": "10",
"ref": "refs/heads/master",
"path": "Contents/Libraries/Shared/guessit/rules/properties/website.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "3012769"
},
{
"name": "Python",
"bytes": "3311785"
},
{
"name": "Shell",
"bytes": "273"
}
],
"symlink_target": ""
} |
import Queue
import threading
logFilePath = './log.txt'
#dummy ajax call
def ajax(line):
print line
#get lines from file
def getLines(filename):
with open(logFilePath, 'r') as file:
lines = file.readlines()
return lines
#single threaded solution
def send(arr):
for line in lines:
ajax(line)
#single threaded use case
# lines = getLines(logFilePath)
# send(lines)
#worker function to trigger ajax function
def worker():
while True:
line = lines.get()
if line is None:
break
#ajax(line)
lines.task_done()
threads = []
threadCount = 1000
#initialize thread with the task
def initThreads(threadCount, workerFunc):
for i in range(threadCount):
t = threading.Thread(target=workerFunc)
t.start()
threads.append(t)
def getLines(filename):
q = Queue.Queue()
with open(logFilePath) as file:
lines = file.readlines()
for line in lines:
q.put(line)
return q
def cleanup(threads, threadCount, lines):
for i in range(threadCount):
lines.put(None)
for thread in threads:
thread.join()
#multi threaded use case
lines = getLines(logFilePath)
initThreads(threadCount, worker)
# block until all tasks are done
lines.join()
# stop workers
cleanup(threads, threadCount, lines)
| {
"content_hash": "70b9d6239ab0e33a684bedcac6ef253c",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 47,
"avg_line_length": 19.63235294117647,
"alnum_prop": 0.6584269662921348,
"repo_name": "schandrasekhar/msgWatcher",
"id": "ea1b7042677848e076787e998c6ed35939fb9077",
"size": "1381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1891"
}
],
"symlink_target": ""
} |
"""Ansible module for retrieving and setting openshift related facts"""
# pylint: disable=no-name-in-module, import-error, wrong-import-order
import copy
import errno
import json
import re
import os
import yaml
import struct
import socket
import ipaddress
from distutils.util import strtobool
from distutils.version import LooseVersion
from ansible.module_utils.six import text_type
from ansible.module_utils.six import string_types
from ansible.module_utils.six.moves import configparser
# ignore pylint errors related to the module_utils import
# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import
# import module snippets
from ansible.module_utils.basic import * # noqa: F403
from ansible.module_utils.facts import * # noqa: F403
from ansible.module_utils.urls import * # noqa: F403
from ansible.module_utils.six import iteritems, itervalues
from ansible.module_utils.six.moves.urllib.parse import urlparse, urlunparse
from ansible.module_utils._text import to_native
HAVE_DBUS = False
try:
from dbus import SystemBus, Interface
from dbus.exceptions import DBusException
HAVE_DBUS = True
except ImportError:
pass
DOCUMENTATION = '''
---
module: openshift_facts
short_description: Cluster Facts
author: Jason DeTiberus
requirements: [ ]
'''
EXAMPLES = '''
'''
# TODO: We should add a generic migration function that takes source and destination
# paths and does the right thing rather than one function for common, one for node, etc.
def migrate_common_facts(facts):
""" Migrate facts from various roles into common """
params = {
'node': ('portal_net'),
'master': ('portal_net')
}
if 'common' not in facts:
facts['common'] = {}
# pylint: disable=consider-iterating-dictionary
for role in params.keys():
if role in facts:
for param in params[role]:
if param in facts[role]:
facts['common'][param] = facts[role].pop(param)
return facts
def migrate_admission_plugin_facts(facts):
""" Apply migrations for admission plugin facts """
if 'master' in facts:
if 'kube_admission_plugin_config' in facts['master']:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
# Merge existing kube_admission_plugin_config with admission_plugin_config.
facts['master']['admission_plugin_config'] = merge_facts(facts['master']['admission_plugin_config'],
facts['master']['kube_admission_plugin_config'],
additive_facts_to_overwrite=[])
# Remove kube_admission_plugin_config fact
facts['master'].pop('kube_admission_plugin_config', None)
return facts
def migrate_local_facts(facts):
""" Apply migrations of local facts """
migrated_facts = copy.deepcopy(facts)
migrated_facts = migrate_common_facts(migrated_facts)
migrated_facts = migrate_admission_plugin_facts(migrated_facts)
return migrated_facts
def first_ip(network):
""" Return the first IPv4 address in network
Args:
network (str): network in CIDR format
Returns:
str: first IPv4 address
"""
atoi = lambda addr: struct.unpack("!I", socket.inet_aton(addr))[0] # noqa: E731
itoa = lambda addr: socket.inet_ntoa(struct.pack("!I", addr)) # noqa: E731
(address, netmask) = network.split('/')
netmask_i = (0xffffffff << (32 - atoi(netmask))) & 0xffffffff
return itoa((atoi(address) & netmask_i) + 1)
def hostname_valid(hostname):
""" Test if specified hostname should be considered valid
Args:
hostname (str): hostname to test
Returns:
bool: True if valid, otherwise False
"""
if (not hostname or
hostname.startswith('localhost') or
hostname.endswith('localdomain') or
# OpenShift will not allow a node with more than 63 chars in name.
len(hostname) > 63):
return False
return True
def choose_hostname(hostnames=None, fallback=''):
""" Choose a hostname from the provided hostnames
Given a list of hostnames and a fallback value, choose a hostname to
use. This function will prefer fqdns if they exist (excluding any that
begin with localhost or end with localdomain) over ip addresses.
Args:
hostnames (list): list of hostnames
fallback (str): default value to set if hostnames does not contain
a valid hostname
Returns:
str: chosen hostname
"""
hostname = fallback
if hostnames is None:
return hostname
ip_regex = r'\A\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\Z'
ips = [i for i in hostnames if i is not None and isinstance(i, string_types) and re.match(ip_regex, i)]
hosts = [i for i in hostnames if i is not None and i != '' and i not in ips]
for host_list in (hosts, ips):
for host in host_list:
if hostname_valid(host):
return host
return hostname
def query_metadata(metadata_url, headers=None, expect_json=False):
""" Return metadata from the provided metadata_url
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict or list: metadata request result
"""
result, info = fetch_url(module, metadata_url, headers=headers) # noqa: F405
if info['status'] != 200:
raise OpenShiftFactsMetadataUnavailableError("Metadata unavailable")
if expect_json:
return module.from_json(to_native(result.read())) # noqa: F405
else:
return [to_native(line.strip()) for line in result.readlines()]
def walk_metadata(metadata_url, headers=None, expect_json=False):
""" Walk the metadata tree and return a dictionary of the entire tree
Args:
metadata_url (str): metadata url
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the result of walking the metadata tree
"""
metadata = dict()
for line in query_metadata(metadata_url, headers, expect_json):
if line.endswith('/') and not line == 'public-keys/':
key = line[:-1]
metadata[key] = walk_metadata(metadata_url + line,
headers, expect_json)
else:
results = query_metadata(metadata_url + line, headers,
expect_json)
if len(results) == 1:
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
metadata[line] = results.pop()
else:
metadata[line] = results
return metadata
def get_provider_metadata(metadata_url, supports_recursive=False,
headers=None, expect_json=False):
""" Retrieve the provider metadata
Args:
metadata_url (str): metadata url
supports_recursive (bool): does the provider metadata api support
recursion
headers (dict): headers to set for metadata request
expect_json (bool): does the metadata_url return json
Returns:
dict: the provider metadata
"""
try:
if supports_recursive:
metadata = query_metadata(metadata_url, headers,
expect_json)
else:
metadata = walk_metadata(metadata_url, headers,
expect_json)
except OpenShiftFactsMetadataUnavailableError:
metadata = None
return metadata
def normalize_gce_facts(metadata, facts):
""" Normalize gce facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in metadata['instance']['networkInterfaces']:
int_info = dict(ips=[interface['ip']], network_type='gce')
int_info['public_ips'] = [ac['externalIp'] for ac
in interface['accessConfigs']]
int_info['public_ips'].extend(interface['forwardedIps'])
_, _, network_id = interface['network'].rpartition('/')
int_info['network_id'] = network_id
facts['network']['interfaces'].append(int_info)
_, _, zone = metadata['instance']['zone'].rpartition('/')
facts['zone'] = zone
# GCE currently only supports a single interface
facts['network']['ip'] = facts['network']['interfaces'][0]['ips'][0]
pub_ip = facts['network']['interfaces'][0]['public_ips'][0]
facts['network']['public_ip'] = pub_ip
# Split instance hostname from GCE metadata to use the short instance name
facts['network']['hostname'] = metadata['instance']['hostname'].split('.')[0]
# TODO: attempt to resolve public_hostname
facts['network']['public_hostname'] = facts['network']['public_ip']
return facts
def normalize_aws_facts(metadata, facts):
""" Normalize aws facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
for interface in sorted(
metadata['network']['interfaces']['macs'].values(),
key=lambda x: x['device-number']
):
int_info = dict()
var_map = {'ips': 'local-ipv4s', 'public_ips': 'public-ipv4s'}
for ips_var, int_var in iteritems(var_map):
ips = interface.get(int_var)
if isinstance(ips, string_types):
int_info[ips_var] = [ips]
else:
int_info[ips_var] = ips
if 'vpc-id' in interface:
int_info['network_type'] = 'vpc'
else:
int_info['network_type'] = 'classic'
if int_info['network_type'] == 'vpc':
int_info['network_id'] = interface['subnet-id']
else:
int_info['network_id'] = None
facts['network']['interfaces'].append(int_info)
facts['zone'] = metadata['placement']['availability-zone']
# TODO: actually attempt to determine default local and public ips
# by using the ansible default ip fact and the ipv4-associations
# from the ec2 metadata
facts['network']['ip'] = metadata.get('local-ipv4')
facts['network']['public_ip'] = metadata.get('public-ipv4')
# TODO: verify that local hostname makes sense and is resolvable
facts['network']['hostname'] = metadata.get('local-hostname')
# TODO: verify that public hostname makes sense and is resolvable
facts['network']['public_hostname'] = metadata.get('public-hostname')
return facts
def normalize_openstack_facts(metadata, facts):
""" Normalize openstack facts
Args:
metadata (dict): provider metadata
facts (dict): facts to update
Returns:
dict: the result of adding the normalized metadata to the provided
facts dict
"""
# openstack ec2 compat api does not support network interfaces and
# the version tested on did not include the info in the openstack
# metadata api, should be updated if neutron exposes this.
facts['zone'] = metadata['availability_zone']
local_ipv4 = metadata['ec2_compat']['local-ipv4'].split(',')[0]
facts['network']['ip'] = local_ipv4
facts['network']['public_ip'] = metadata['ec2_compat']['public-ipv4']
for f_var, h_var, ip_var in [('hostname', 'hostname', 'local-ipv4'),
('public_hostname', 'public-hostname', 'public-ipv4')]:
try:
if socket.gethostbyname(metadata['ec2_compat'][h_var]) == metadata['ec2_compat'][ip_var]:
facts['network'][f_var] = metadata['ec2_compat'][h_var]
else:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
except socket.gaierror:
facts['network'][f_var] = metadata['ec2_compat'][ip_var]
return facts
def normalize_provider_facts(provider, metadata):
""" Normalize provider facts
Args:
provider (str): host provider
metadata (dict): provider metadata
Returns:
dict: the normalized provider facts
"""
if provider is None or metadata is None:
return {}
# TODO: test for ipv6_enabled where possible (gce, aws do not support)
# and configure ipv6 facts if available
# TODO: add support for setting user_data if available
facts = dict(name=provider, metadata=metadata,
network=dict(interfaces=[], ipv6_enabled=False))
if provider == 'gce':
facts = normalize_gce_facts(metadata, facts)
elif provider == 'aws':
facts = normalize_aws_facts(metadata, facts)
elif provider == 'openstack':
facts = normalize_openstack_facts(metadata, facts)
return facts
def set_identity_providers_if_unset(facts):
""" Set identity_providers fact if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated identity providers
facts if they were not already present
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
if 'identity_providers' not in facts['master']:
identity_provider = dict(
name='allow_all', challenge=True, login=True,
kind='AllowAllPasswordIdentityProvider'
)
if deployment_type == 'openshift-enterprise':
identity_provider = dict(
name='deny_all', challenge=True, login=True,
kind='DenyAllPasswordIdentityProvider'
)
facts['master']['identity_providers'] = [identity_provider]
return facts
def set_url_facts_if_unset(facts):
""" Set url facts if not already present in facts dict
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated url facts if they
were not already present
"""
if 'master' in facts:
hostname = facts['common']['hostname']
cluster_hostname = facts['master'].get('cluster_hostname')
cluster_public_hostname = facts['master'].get('cluster_public_hostname')
public_hostname = facts['common']['public_hostname']
api_hostname = cluster_hostname if cluster_hostname else hostname
api_public_hostname = cluster_public_hostname if cluster_public_hostname else public_hostname
console_path = facts['master']['console_path']
use_ssl = dict(
api=facts['master']['api_use_ssl'],
public_api=facts['master']['api_use_ssl'],
loopback_api=facts['master']['api_use_ssl'],
console=facts['master']['console_use_ssl'],
public_console=facts['master']['console_use_ssl'],
)
ports = dict(
api=facts['master']['api_port'],
public_api=facts['master']['api_port'],
loopback_api=facts['master']['api_port'],
console=facts['master']['console_port'],
public_console=facts['master']['console_port'],
)
prefix_hosts = [('api', api_hostname),
('public_api', api_public_hostname),
('loopback_api', hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix]))
r_lhn = "{0}:{1}".format(hostname, ports['api']).replace('.', '-')
r_lhu = "system:openshift-master/{0}:{1}".format(api_hostname, ports['api']).replace('.', '-')
facts['master'].setdefault('loopback_cluster_name', r_lhn)
facts['master'].setdefault('loopback_context_name', "default/{0}/system:openshift-master".format(r_lhn))
facts['master'].setdefault('loopback_user', r_lhu)
prefix_hosts = [('console', api_hostname), ('public_console', api_public_hostname)]
for prefix, host in prefix_hosts:
facts['master'].setdefault(prefix + '_url', format_url(use_ssl[prefix],
host,
ports[prefix],
console_path))
return facts
def set_aggregate_facts(facts):
""" Set aggregate facts
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with aggregated facts
"""
all_hostnames = set()
internal_hostnames = set()
kube_svc_ip = first_ip(facts['common']['portal_net'])
if 'common' in facts:
all_hostnames.add(facts['common']['hostname'])
all_hostnames.add(facts['common']['public_hostname'])
all_hostnames.add(facts['common']['ip'])
all_hostnames.add(facts['common']['public_ip'])
facts['common']['kube_svc_ip'] = kube_svc_ip
internal_hostnames.add(facts['common']['hostname'])
internal_hostnames.add(facts['common']['ip'])
cluster_domain = facts['common']['dns_domain']
if 'master' in facts:
if 'cluster_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_hostname'])
if 'cluster_public_hostname' in facts['master']:
all_hostnames.add(facts['master']['cluster_public_hostname'])
svc_names = ['openshift', 'openshift.default', 'openshift.default.svc',
'openshift.default.svc.' + cluster_domain, 'kubernetes', 'kubernetes.default',
'kubernetes.default.svc', 'kubernetes.default.svc.' + cluster_domain]
all_hostnames.update(svc_names)
internal_hostnames.update(svc_names)
all_hostnames.add(kube_svc_ip)
internal_hostnames.add(kube_svc_ip)
facts['common']['all_hostnames'] = list(all_hostnames)
facts['common']['internal_hostnames'] = list(internal_hostnames)
return facts
def set_deployment_facts_if_unset(facts):
""" Set Facts that vary based on deployment_type. This currently
includes master.registry_url
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated deployment_type
facts
"""
if 'master' in facts:
deployment_type = facts['common']['deployment_type']
openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']
if 'disabled_features' not in facts['master']:
if facts['common']['deployment_subtype'] == 'registry':
facts['master']['disabled_features'] = openshift_features
if 'registry_url' not in facts['master']:
registry_url = 'openshift/origin-${component}:${version}'
if deployment_type == 'openshift-enterprise':
registry_url = 'openshift3/ose-${component}:${version}'
facts['master']['registry_url'] = registry_url
return facts
# pylint: disable=too-many-statements
def set_version_facts_if_unset(facts):
""" Set version facts. This currently includes common.version and
common.version_gte_3_x
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with version facts.
"""
if 'common' in facts:
openshift_version = get_openshift_version(facts)
if openshift_version and openshift_version != "latest":
version = LooseVersion(openshift_version)
facts['common']['version'] = openshift_version
facts['common']['short_version'] = '.'.join([str(x) for x in version.version[0:2]])
version_gte_3_6 = version >= LooseVersion('3.6')
version_gte_3_7 = version >= LooseVersion('3.7')
version_gte_3_8 = version >= LooseVersion('3.8')
version_gte_3_9 = version >= LooseVersion('3.9')
version_gte_3_10 = version >= LooseVersion('3.10')
else:
# 'Latest' version is set to True, 'Next' versions set to False
version_gte_3_6 = True
version_gte_3_7 = True
version_gte_3_8 = False
version_gte_3_9 = False
version_gte_3_10 = False
facts['common']['version_gte_3_6'] = version_gte_3_6
facts['common']['version_gte_3_7'] = version_gte_3_7
facts['common']['version_gte_3_8'] = version_gte_3_8
facts['common']['version_gte_3_9'] = version_gte_3_9
facts['common']['version_gte_3_10'] = version_gte_3_10
if version_gte_3_10:
examples_content_version = 'v3.10'
elif version_gte_3_9:
examples_content_version = 'v3.9'
elif version_gte_3_8:
examples_content_version = 'v3.8'
elif version_gte_3_7:
examples_content_version = 'v3.7'
elif version_gte_3_6:
examples_content_version = 'v3.6'
else:
examples_content_version = 'v1.5'
facts['common']['examples_content_version'] = examples_content_version
return facts
def set_sdn_facts_if_unset(facts, system_facts):
""" Set sdn facts if not already present in facts dict
Args:
facts (dict): existing facts
system_facts (dict): ansible_facts
Returns:
dict: the facts dict updated with the generated sdn facts if they
were not already present
"""
if 'master' in facts:
# set defaults for sdn_cluster_network_cidr and sdn_host_subnet_length
# these might be overridden if they exist in the master config file
sdn_cluster_network_cidr = '10.128.0.0/14'
sdn_host_subnet_length = '9'
master_cfg_path = os.path.join(facts['common']['config_base'],
'master/master-config.yaml')
if os.path.isfile(master_cfg_path):
with open(master_cfg_path, 'r') as master_cfg_f:
config = yaml.safe_load(master_cfg_f.read())
if 'networkConfig' in config:
if 'clusterNetworkCIDR' in config['networkConfig']:
sdn_cluster_network_cidr = \
config['networkConfig']['clusterNetworkCIDR']
if 'hostSubnetLength' in config['networkConfig']:
sdn_host_subnet_length = \
config['networkConfig']['hostSubnetLength']
if 'sdn_cluster_network_cidr' not in facts['master']:
facts['master']['sdn_cluster_network_cidr'] = sdn_cluster_network_cidr
if 'sdn_host_subnet_length' not in facts['master']:
facts['master']['sdn_host_subnet_length'] = sdn_host_subnet_length
if 'node' in facts and 'sdn_mtu' not in facts['node']:
node_ip = facts['common']['ip']
# default MTU if interface MTU cannot be detected
facts['node']['sdn_mtu'] = '1450'
for val in itervalues(system_facts):
if isinstance(val, dict) and 'mtu' in val:
mtu = val['mtu']
if 'ipv4' in val and val['ipv4'].get('address') == node_ip:
facts['node']['sdn_mtu'] = str(mtu - 50)
return facts
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
# TODO: The openstack cloudprovider nodename setting was too opinionaed.
# It needs to be generalized before it can be enabled again.
# elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
# facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
Args:
use_ssl (bool): is ssl enabled
hostname (str): hostname
port (str): port
path (str): url path
Returns:
str: The generated url string
"""
scheme = 'https' if use_ssl else 'http'
netloc = hostname
if (use_ssl and port != '443') or (not use_ssl and port != '80'):
netloc += ":%s" % port
try:
url = urlparse.urlunparse((scheme, netloc, path, '', '', ''))
except AttributeError:
# pylint: disable=undefined-variable
url = urlunparse((scheme, netloc, path, '', '', ''))
return url
def get_current_config(facts):
""" Get current openshift config
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the current openshift config
"""
current_config = dict()
roles = [role for role in facts if role not in ['common', 'provider']]
for role in roles:
if 'roles' in current_config:
current_config['roles'].append(role)
else:
current_config['roles'] = [role]
# TODO: parse the /etc/sysconfig/openshift-{master,node} config to
# determine the location of files.
# TODO: I suspect this isn't working right now, but it doesn't prevent
# anything from working properly as far as I can tell, perhaps because
# we override the kubeconfig path everywhere we use it?
# Query kubeconfig settings
kubeconfig_dir = '/var/lib/origin/openshift.local.certificates'
if role == 'node':
kubeconfig_dir = os.path.join(
kubeconfig_dir, "node-%s" % facts['common']['hostname']
)
kubeconfig_path = os.path.join(kubeconfig_dir, '.kubeconfig')
if os.path.isfile('/usr/bin/openshift') and os.path.isfile(kubeconfig_path):
try:
_, output, _ = module.run_command( # noqa: F405
["/usr/bin/openshift", "ex", "config", "view", "-o",
"json", "--kubeconfig=%s" % kubeconfig_path],
check_rc=False
)
config = json.loads(output)
cad = 'certificate-authority-data'
try:
for cluster in config['clusters']:
config['clusters'][cluster][cad] = 'masked'
except KeyError:
pass
try:
for user in config['users']:
config['users'][user][cad] = 'masked'
config['users'][user]['client-key-data'] = 'masked'
except KeyError:
pass
current_config['kubeconfig'] = config
# override pylint broad-except warning, since we do not want
# to bubble up any exceptions if oc config view
# fails
# pylint: disable=broad-except
except Exception:
pass
return current_config
def build_controller_args(facts):
""" Build master controller_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
controller_args['cloud-provider'] = ['aws']
controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
controller_args['cloud-provider'] = ['openstack']
controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
controller_args['cloud-provider'] = ['gce']
controller_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [])
return facts
def build_api_server_args(facts):
""" Build master api_server_args """
cloud_cfg_path = os.path.join(facts['common']['config_base'],
'cloudprovider')
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
if 'kind' in facts['cloudprovider']:
if facts['cloudprovider']['kind'] == 'aws':
api_server_args['cloud-provider'] = ['aws']
api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
if facts['cloudprovider']['kind'] == 'openstack':
api_server_args['cloud-provider'] = ['openstack']
api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if facts['cloudprovider']['kind'] == 'gce':
api_server_args['cloud-provider'] = ['gce']
api_server_args['cloud-config'] = [cloud_cfg_path + '/gce.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [])
return facts
def is_service_running(service):
""" Queries systemd through dbus to see if the service is running """
service_running = False
try:
bus = SystemBus()
systemd = bus.get_object('org.freedesktop.systemd1', '/org/freedesktop/systemd1')
manager = Interface(systemd, dbus_interface='org.freedesktop.systemd1.Manager')
service_unit = service if service.endswith('.service') else manager.GetUnit('{0}.service'.format(service))
service_proxy = bus.get_object('org.freedesktop.systemd1', str(service_unit))
service_properties = Interface(service_proxy, dbus_interface='org.freedesktop.DBus.Properties')
service_load_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'LoadState')
service_active_state = service_properties.Get('org.freedesktop.systemd1.Unit', 'ActiveState')
if service_load_state == 'loaded' and service_active_state == 'active':
service_running = True
except DBusException:
# TODO: do not swallow exception, as it may be hiding useful debugging
# information.
pass
return service_running
def rpm_rebuilddb():
"""
Runs rpm --rebuilddb to ensure the db is in good shape.
"""
module.run_command(['/usr/bin/rpm', '--rebuilddb']) # noqa: F405
def get_version_output(binary, version_cmd):
""" runs and returns the version output for a command """
cmd = []
for item in (binary, version_cmd):
if isinstance(item, list):
cmd.extend(item)
else:
cmd.append(item)
if os.path.isfile(cmd[0]):
_, output, _ = module.run_command(cmd) # noqa: F405
return output
# We may need this in the future.
def get_docker_version_info():
""" Parses and returns the docker version info """
result = None
if is_service_running('docker') or is_service_running('container-engine'):
version_info = yaml.safe_load(get_version_output('/usr/bin/docker', 'version'))
if 'Server' in version_info:
result = {
'api_version': version_info['Server']['API version'],
'version': version_info['Server']['Version']
}
return result
def get_openshift_version(facts):
""" Get current version of openshift on the host.
Checks a variety of ways ranging from fastest to slowest.
Args:
facts (dict): existing facts
optional cli_image for pulling the version number
Returns:
version: the current openshift version
"""
version = None
image_type_dict = {'origin': 'openshift/origin',
'openshift-enterprise': 'openshift3/ose'}
# No need to run this method repeatedly on a system if we already know the
# version
# TODO: We need a way to force reload this after upgrading bits.
if 'common' in facts:
if 'version' in facts['common'] and facts['common']['version'] is not None:
return chomp_commit_offset(facts['common']['version'])
if os.path.isfile('/usr/bin/openshift'):
_, output, _ = module.run_command(['/usr/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
else:
deployment_type = facts['common']['deployment_type']
image_tag = get_container_openshift_version(deployment_type)
if image_tag is None:
return version
cli_image = image_type_dict[deployment_type] + ":" + image_tag
_, output, _ = module.run_command(['docker', 'run', '--rm', cli_image, 'version']) # noqa: F405
version = parse_openshift_version(output)
# Handle containerized masters that have not yet been configured as a node.
# This can be very slow and may get re-run multiple times, so we only use this
# if other methods failed to find a version.
if not version and os.path.isfile('/usr/local/bin/openshift'):
_, output, _ = module.run_command(['/usr/local/bin/openshift', 'version']) # noqa: F405
version = parse_openshift_version(output)
return chomp_commit_offset(version)
def chomp_commit_offset(version):
"""Chomp any "+git.foo" commit offset string from the given `version`
and return the modified version string.
Ex:
- chomp_commit_offset(None) => None
- chomp_commit_offset(1337) => "1337"
- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15"
- chomp_commit_offset("v3.4.0.15") => "v3.4.0.15"
- chomp_commit_offset("v1.3.0+52492b4") => "v1.3.0"
"""
if version is None:
return version
else:
# Stringify, just in case it's a Number type. Split by '+' and
# return the first split. No concerns about strings without a
# '+', .split() returns an array of the original string.
return str(version).split('+')[0]
def get_container_openshift_version(deployment_type):
"""
If containerized, see if we can determine the installed version via the
systemd environment files.
"""
service_type_dict = {'origin': 'origin',
'openshift-enterprise': 'atomic-openshift'}
service_type = service_type_dict[deployment_type]
for filename in ['/etc/sysconfig/%s-master-controllers', '/etc/sysconfig/%s-node']:
env_path = filename % service_type
if not os.path.exists(env_path):
continue
with open(env_path) as env_file:
for line in env_file:
if line.startswith("IMAGE_VERSION="):
return line[len("IMAGE_VERSION="):].strip()
return None
def parse_openshift_version(output):
""" Apply provider facts to supplied facts dict
Args:
string: output of 'openshift version'
Returns:
string: the version number
"""
versions = dict(e.split(' v') for e in output.splitlines() if ' v' in e)
ver = versions.get('openshift', '')
# Remove trailing build number and commit hash from older versions, we need to return a straight
# w.x.y.z version here for use as openshift_version throughout the playbooks/roles. (i.e. 3.1.1.6-64-g80b61da)
ver = ver.split('-')[0]
return ver
def apply_provider_facts(facts, provider_facts):
""" Apply provider facts to supplied facts dict
Args:
facts (dict): facts dict to update
provider_facts (dict): provider facts to apply
roles: host roles
Returns:
dict: the merged facts
"""
if not provider_facts:
return facts
common_vars = [('hostname', 'ip'), ('public_hostname', 'public_ip')]
for h_var, ip_var in common_vars:
ip_value = provider_facts['network'].get(ip_var)
if ip_value:
facts['common'][ip_var] = ip_value
facts['common'][h_var] = choose_hostname(
[provider_facts['network'].get(h_var)],
facts['common'][h_var]
)
facts['provider'] = provider_facts
return facts
# Disabling pylint too many branches. This function needs refactored
# but is a very core part of openshift_facts.
# pylint: disable=too-many-branches, too-many-nested-blocks
def merge_facts(orig, new, additive_facts_to_overwrite):
""" Recursively merge facts dicts
Args:
orig (dict): existing facts
new (dict): facts to update
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: the merged facts
"""
additive_facts = ['named_certificates']
# Facts we do not ever want to merge. These originate in inventory variables
# and contain JSON dicts. We don't ever want to trigger a merge
# here, just completely overwrite with the new if they are present there.
inventory_json_facts = ['admission_plugin_config',
'kube_admission_plugin_config',
'image_policy_config',
"builddefaults",
"buildoverrides"]
facts = dict()
for key, value in iteritems(orig):
# Key exists in both old and new facts.
if key in new:
if key in inventory_json_facts:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
# Continue to recurse if old and new fact is a dictionary.
elif isinstance(value, dict) and isinstance(new[key], dict):
# Collect the subset of additive facts to overwrite if
# key matches. These will be passed to the subsequent
# merge_facts call.
relevant_additive_facts = []
for item in additive_facts_to_overwrite:
if '.' in item and item.startswith(key + '.'):
relevant_additive_facts.append(item)
facts[key] = merge_facts(value, new[key], relevant_additive_facts)
# Key matches an additive fact and we are not overwriting
# it so we will append the new value to the existing value.
elif key in additive_facts and key not in [x.split('.')[-1] for x in additive_facts_to_overwrite]:
if isinstance(value, list) and isinstance(new[key], list):
new_fact = []
for item in copy.deepcopy(value) + copy.deepcopy(new[key]):
if item not in new_fact:
new_fact.append(item)
facts[key] = new_fact
# No other condition has been met. Overwrite the old fact
# with the new value.
else:
facts[key] = copy.deepcopy(new[key])
# Key isn't in new so add it to facts to keep it.
else:
facts[key] = copy.deepcopy(value)
new_keys = set(new.keys()) - set(orig.keys())
for key in new_keys:
# Watchout for JSON facts that sometimes load as strings.
# (can happen if the JSON contains a boolean)
if key in inventory_json_facts and isinstance(new[key], string_types):
facts[key] = yaml.safe_load(new[key])
else:
facts[key] = copy.deepcopy(new[key])
return facts
def save_local_facts(filename, facts):
""" Save local facts
Args:
filename (str): local facts file
facts (dict): facts to set
"""
try:
fact_dir = os.path.dirname(filename)
try:
os.makedirs(fact_dir) # try to make the directory
except OSError as exception:
if exception.errno != errno.EEXIST: # but it is okay if it is already there
raise # pass any other exceptions up the chain
with open(filename, 'w') as fact_file:
fact_file.write(module.jsonify(facts)) # noqa: F405
os.chmod(filename, 0o600)
except (IOError, OSError) as ex:
raise OpenShiftFactsFileWriteError(
"Could not create fact file: %s, error: %s" % (filename, ex)
)
def get_local_facts_from_file(filename):
""" Retrieve local facts from fact file
Args:
filename (str): local facts file
Returns:
dict: the retrieved facts
"""
local_facts = dict()
try:
# Handle conversion of INI style facts file to json style
ini_facts = configparser.SafeConfigParser()
ini_facts.read(filename)
for section in ini_facts.sections():
local_facts[section] = dict()
for key, value in ini_facts.items(section):
local_facts[section][key] = value
except (configparser.MissingSectionHeaderError,
configparser.ParsingError):
try:
with open(filename, 'r') as facts_file:
local_facts = json.load(facts_file)
except (ValueError, IOError):
pass
return local_facts
def sort_unique(alist):
""" Sorts and de-dupes a list
Args:
list: a list
Returns:
list: a sorted de-duped list
"""
return sorted(list(set(alist)))
def safe_get_bool(fact):
""" Get a boolean fact safely.
Args:
facts: fact to convert
Returns:
bool: given fact as a bool
"""
return bool(strtobool(str(fact)))
def set_proxy_facts(facts):
""" Set global proxy facts
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'common' in facts:
common = facts['common']
if 'http_proxy' in common or 'https_proxy' in common or 'no_proxy' in common:
if 'no_proxy' in common and isinstance(common['no_proxy'], string_types):
common['no_proxy'] = common['no_proxy'].split(",")
elif 'no_proxy' not in common:
common['no_proxy'] = []
# See https://bugzilla.redhat.com/show_bug.cgi?id=1466783
# masters behind a proxy need to connect to etcd via IP
if 'no_proxy_etcd_host_ips' in common:
if isinstance(common['no_proxy_etcd_host_ips'], string_types):
common['no_proxy'].extend(common['no_proxy_etcd_host_ips'].split(','))
if 'generate_no_proxy_hosts' in common and safe_get_bool(common['generate_no_proxy_hosts']):
if 'no_proxy_internal_hostnames' in common:
common['no_proxy'].extend(common['no_proxy_internal_hostnames'].split(','))
# We always add local dns domain and ourselves no matter what
kube_svc_ip = str(ipaddress.ip_network(text_type(common['portal_net']))[1])
common['no_proxy'].append(kube_svc_ip)
common['no_proxy'].append('.' + common['dns_domain'])
common['no_proxy'].append('.svc')
common['no_proxy'].append(common['hostname'])
common['no_proxy'] = ','.join(sort_unique(common['no_proxy']))
facts['common'] = common
return facts
def set_builddefaults_facts(facts):
""" Set build defaults including setting proxy values from http_proxy, https_proxy,
no_proxy to the more specific builddefaults and builddefaults_git vars.
1. http_proxy, https_proxy, no_proxy
2. builddefaults_*
3. builddefaults_git_*
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'builddefaults' in facts:
builddefaults = facts['builddefaults']
common = facts['common']
# Copy values from common to builddefaults
if 'http_proxy' not in builddefaults and 'http_proxy' in common:
builddefaults['http_proxy'] = common['http_proxy']
if 'https_proxy' not in builddefaults and 'https_proxy' in common:
builddefaults['https_proxy'] = common['https_proxy']
if 'no_proxy' not in builddefaults and 'no_proxy' in common:
builddefaults['no_proxy'] = common['no_proxy']
# Create git specific facts from generic values, if git specific values are
# not defined.
if 'git_http_proxy' not in builddefaults and 'http_proxy' in builddefaults:
builddefaults['git_http_proxy'] = builddefaults['http_proxy']
if 'git_https_proxy' not in builddefaults and 'https_proxy' in builddefaults:
builddefaults['git_https_proxy'] = builddefaults['https_proxy']
if 'git_no_proxy' not in builddefaults and 'no_proxy' in builddefaults:
builddefaults['git_no_proxy'] = builddefaults['no_proxy']
# If we're actually defining a builddefaults config then create admission_plugin_config
# then merge builddefaults[config] structure into admission_plugin_config
# 'config' is the 'openshift_builddefaults_json' inventory variable
if 'config' in builddefaults:
if 'admission_plugin_config' not in facts['master']:
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
def delete_empty_keys(keylist):
""" Delete dictionary elements from keylist where "value" is empty.
Args:
keylist(list): A list of builddefault configuration envs.
Returns:
none
Example:
keylist = [{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'NO_PROXY', 'value': ''}]
After calling delete_empty_keys the provided list is modified to become:
[{'name': 'HTTP_PROXY', 'value': 'http://file.rdu.redhat.com:3128'},
{'name': 'HTTPS_PROXY', 'value': 'http://file.rdu.redhat.com:3128'}]
"""
count = 0
for i in range(0, len(keylist)):
if len(keylist[i - count]['value']) == 0:
del keylist[i - count]
count += 1
def set_buildoverrides_facts(facts):
""" Set build overrides
Args:
facts(dict): existing facts
Returns:
facts(dict): Updated facts with missing values
"""
if 'buildoverrides' in facts:
buildoverrides = facts['buildoverrides']
# If we're actually defining a buildoverrides config then create admission_plugin_config
# then merge buildoverrides[config] structure into admission_plugin_config
if 'config' in buildoverrides:
if 'admission_plugin_config' not in facts['master']:
facts['master']['admission_plugin_config'] = dict()
facts['master']['admission_plugin_config'].update(buildoverrides['config'])
return facts
# pylint: disable=too-many-statements
def set_container_facts_if_unset(facts):
""" Set containerized facts.
Args:
facts (dict): existing facts
Returns:
dict: the facts dict updated with the generated containerization
facts
"""
return facts
def pop_obsolete_local_facts(local_facts):
"""Remove unused keys from local_facts"""
keys_to_remove = {
'master': ('etcd_port', 'etcd_use_ssl', 'etcd_hosts')
}
for role in keys_to_remove:
if role in local_facts:
for key in keys_to_remove[role]:
local_facts[role].pop(key, None)
class OpenShiftFactsInternalError(Exception):
"""Origin Facts Error"""
pass
class OpenShiftFactsUnsupportedRoleError(Exception):
"""Origin Facts Unsupported Role Error"""
pass
class OpenShiftFactsFileWriteError(Exception):
"""Origin Facts File Write Error"""
pass
class OpenShiftFactsMetadataUnavailableError(Exception):
"""Origin Facts Metadata Unavailable Error"""
pass
class OpenShiftFacts(object):
""" Origin Facts
Attributes:
facts (dict): facts for the host
Args:
module (AnsibleModule): an AnsibleModule object
role (str): role for setting local facts
filename (str): local facts file to use
local_facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Raises:
OpenShiftFactsUnsupportedRoleError:
"""
known_roles = ['builddefaults',
'buildoverrides',
'cloudprovider',
'common',
'etcd',
'master',
'node']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
def __init__(self, role, filename, local_facts,
additive_facts_to_overwrite=None):
self.changed = False
self.filename = filename
if role not in self.known_roles:
raise OpenShiftFactsUnsupportedRoleError(
"Role %s is not supported by this module" % role
)
self.role = role
# Collect system facts and preface each fact with 'ansible_'.
try:
# pylint: disable=too-many-function-args,invalid-name
self.system_facts = ansible_facts(module, ['hardware', 'network', 'virtual', 'facter']) # noqa: F405
additional_facts = {}
for (k, v) in self.system_facts.items():
additional_facts["ansible_%s" % k.replace('-', '_')] = v
self.system_facts.update(additional_facts)
except UnboundLocalError:
# ansible-2.2,2.3
self.system_facts = get_all_facts(module)['ansible_facts'] # noqa: F405
self.facts = self.generate_facts(local_facts,
additive_facts_to_overwrite)
def generate_facts(self,
local_facts,
additive_facts_to_overwrite):
""" Generate facts
Args:
local_facts (dict): local_facts for overriding generated defaults
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The generated facts
"""
local_facts = self.init_local_facts(local_facts,
additive_facts_to_overwrite)
roles = local_facts.keys()
if 'common' in local_facts and 'deployment_type' in local_facts['common']:
deployment_type = local_facts['common']['deployment_type']
else:
deployment_type = 'origin'
if 'common' in local_facts and 'deployment_subtype' in local_facts['common']:
deployment_subtype = local_facts['common']['deployment_subtype']
else:
deployment_subtype = 'basic'
defaults = self.get_defaults(roles, deployment_type, deployment_subtype)
provider_facts = self.init_provider_facts()
facts = apply_provider_facts(defaults, provider_facts)
facts = merge_facts(facts,
local_facts,
additive_facts_to_overwrite)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_deployment_facts_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_container_facts_if_unset(facts)
facts = build_controller_args(facts)
facts = build_api_server_args(facts)
facts = set_version_facts_if_unset(facts)
facts = set_aggregate_facts(facts)
facts = set_proxy_facts(facts)
facts = set_builddefaults_facts(facts)
facts = set_buildoverrides_facts(facts)
facts = set_nodename(facts)
return dict(openshift=facts)
def get_defaults(self, roles, deployment_type, deployment_subtype):
""" Get default fact values
Args:
roles (list): list of roles for this host
Returns:
dict: The generated default facts
"""
defaults = {}
ip_addr = self.system_facts['ansible_default_ipv4']['address']
exit_code, output, _ = module.run_command(['hostname', '-f']) # noqa: F405
hostname_f = output.strip() if exit_code == 0 else ''
hostname_values = [hostname_f, self.system_facts['ansible_nodename'],
self.system_facts['ansible_fqdn']]
hostname = choose_hostname(hostname_values, ip_addr).lower()
defaults['common'] = dict(ip=ip_addr,
public_ip=ip_addr,
deployment_type=deployment_type,
deployment_subtype=deployment_subtype,
hostname=hostname,
public_hostname=hostname,
portal_net='172.30.0.0/16',
dns_domain='cluster.local',
config_base='/etc/origin')
if 'master' in roles:
defaults['master'] = dict(api_use_ssl=True, api_port='8443',
controllers_port='8444',
console_use_ssl=True,
console_path='/console',
console_port='8443',
portal_net='172.30.0.0/16',
embedded_kube=True,
embedded_dns=True,
bind_addr='0.0.0.0',
session_max_seconds=3600,
session_name='ssn',
session_secrets_file='',
access_token_max_seconds=86400,
auth_token_max_seconds=500,
oauth_grant_method='auto',
dynamic_provisioning_enabled=True,
max_requests_inflight=500)
if 'cloudprovider' in roles:
defaults['cloudprovider'] = dict(kind=None)
return defaults
def guess_host_provider(self):
""" Guess the host provider
Returns:
dict: The generated default facts for the detected provider
"""
# TODO: cloud provider facts should probably be submitted upstream
product_name = self.system_facts['ansible_product_name']
product_version = self.system_facts['ansible_product_version']
virt_type = self.system_facts['ansible_virtualization_type']
virt_role = self.system_facts['ansible_virtualization_role']
bios_vendor = self.system_facts['ansible_system_vendor']
provider = None
metadata = None
if bios_vendor == 'Google':
provider = 'gce'
metadata_url = ('http://metadata.google.internal/'
'computeMetadata/v1/?recursive=true')
headers = {'Metadata-Flavor': 'Google'}
metadata = get_provider_metadata(metadata_url, True, headers,
True)
# Filter sshKeys and serviceAccounts from gce metadata
if metadata:
metadata['project']['attributes'].pop('sshKeys', None)
metadata['instance'].pop('serviceAccounts', None)
elif bios_vendor == 'Amazon EC2':
# Adds support for Amazon EC2 C5 instance types
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif virt_type == 'xen' and virt_role == 'guest' and re.match(r'.*\.amazon$', product_version):
provider = 'aws'
metadata_url = 'http://169.254.169.254/latest/meta-data/'
metadata = get_provider_metadata(metadata_url)
elif re.search(r'OpenStack', product_name):
provider = 'openstack'
metadata_url = ('http://169.254.169.254/openstack/latest/'
'meta_data.json')
metadata = get_provider_metadata(metadata_url, True, None,
True)
if metadata:
ec2_compat_url = 'http://169.254.169.254/latest/meta-data/'
metadata['ec2_compat'] = get_provider_metadata(
ec2_compat_url
)
# disable pylint maybe-no-member because overloaded use of
# the module name causes pylint to not detect that results
# is an array or hash
# pylint: disable=maybe-no-member
# Filter public_keys and random_seed from openstack metadata
metadata.pop('public_keys', None)
metadata.pop('random_seed', None)
if not metadata['ec2_compat']:
metadata = None
return dict(name=provider, metadata=metadata)
def init_provider_facts(self):
""" Initialize the provider facts
Returns:
dict: The normalized provider facts
"""
provider_info = self.guess_host_provider()
provider_facts = normalize_provider_facts(
provider_info.get('name'),
provider_info.get('metadata')
)
return provider_facts
# Disabling too-many-branches and too-many-locals.
# This should be cleaned up as a TODO item.
# pylint: disable=too-many-branches, too-many-locals
def init_local_facts(self, facts=None,
additive_facts_to_overwrite=None):
""" Initialize the local facts
Args:
facts (dict): local facts to set
additive_facts_to_overwrite (list): additive facts to overwrite in jinja
'.' notation ex: ['master.named_certificates']
Returns:
dict: The result of merging the provided facts with existing
local facts
"""
changed = False
facts_to_set = dict()
if facts is not None:
facts_to_set[self.role] = facts
local_facts = get_local_facts_from_file(self.filename)
migrated_facts = migrate_local_facts(local_facts)
new_local_facts = merge_facts(migrated_facts,
facts_to_set,
additive_facts_to_overwrite)
new_local_facts = self.remove_empty_facts(new_local_facts)
pop_obsolete_local_facts(new_local_facts)
if new_local_facts != local_facts:
self.validate_local_facts(new_local_facts)
changed = True
if not module.check_mode: # noqa: F405
save_local_facts(self.filename, new_local_facts)
self.changed = changed
return new_local_facts
def remove_empty_facts(self, facts=None):
""" Remove empty facts
Args:
facts (dict): facts to clean
"""
facts_to_remove = []
for fact, value in iteritems(facts):
if isinstance(facts[fact], dict):
facts[fact] = self.remove_empty_facts(facts[fact])
else:
if value == "" or value == [""] or value is None:
facts_to_remove.append(fact)
for fact in facts_to_remove:
del facts[fact]
return facts
def validate_local_facts(self, facts=None):
""" Validate local facts
Args:
facts (dict): local facts to validate
"""
invalid_facts = dict()
invalid_facts = self.validate_master_facts(facts, invalid_facts)
if invalid_facts:
msg = 'Invalid facts detected:\n'
# pylint: disable=consider-iterating-dictionary
for key in invalid_facts.keys():
msg += '{0}: {1}\n'.format(key, invalid_facts[key])
module.fail_json(msg=msg, changed=self.changed) # noqa: F405
# disabling pylint errors for line-too-long since we're dealing
# with best effort reduction of error messages here.
# disabling errors for too-many-branches since we require checking
# many conditions.
# pylint: disable=line-too-long, too-many-branches
@staticmethod
def validate_master_facts(facts, invalid_facts):
""" Validate master facts
Args:
facts (dict): local facts to validate
invalid_facts (dict): collected invalid_facts
Returns:
dict: Invalid facts
"""
if 'master' in facts:
# openshift.master.session_auth_secrets
if 'session_auth_secrets' in facts['master']:
session_auth_secrets = facts['master']['session_auth_secrets']
if not issubclass(type(session_auth_secrets), list):
invalid_facts['session_auth_secrets'] = 'Expects session_auth_secrets is a list.'
elif 'session_encryption_secrets' not in facts['master']:
invalid_facts['session_auth_secrets'] = ('openshift_master_session_encryption secrets must be set '
'if openshift_master_session_auth_secrets is provided.')
elif len(session_auth_secrets) != len(facts['master']['session_encryption_secrets']):
invalid_facts['session_auth_secrets'] = ('openshift_master_session_auth_secrets and '
'openshift_master_session_encryption_secrets must be '
'equal length.')
else:
for secret in session_auth_secrets:
if len(secret) < 32:
invalid_facts['session_auth_secrets'] = ('Invalid secret in session_auth_secrets. '
'Secrets must be at least 32 characters in length.')
# openshift.master.session_encryption_secrets
if 'session_encryption_secrets' in facts['master']:
session_encryption_secrets = facts['master']['session_encryption_secrets']
if not issubclass(type(session_encryption_secrets), list):
invalid_facts['session_encryption_secrets'] = 'Expects session_encryption_secrets is a list.'
elif 'session_auth_secrets' not in facts['master']:
invalid_facts['session_encryption_secrets'] = ('openshift_master_session_auth_secrets must be '
'set if openshift_master_session_encryption_secrets '
'is provided.')
else:
for secret in session_encryption_secrets:
if len(secret) not in [16, 24, 32]:
invalid_facts['session_encryption_secrets'] = ('Invalid secret in session_encryption_secrets. '
'Secrets must be 16, 24, or 32 characters in length.')
return invalid_facts
def main():
""" main """
# disabling pylint errors for global-variable-undefined and invalid-name
# for 'global module' usage, since it is required to use ansible_facts
# pylint: disable=global-variable-undefined, invalid-name
global module
module = AnsibleModule( # noqa: F405
argument_spec=dict(
role=dict(default='common', required=False,
choices=OpenShiftFacts.known_roles),
local_facts=dict(default=None, type='dict', required=False),
additive_facts_to_overwrite=dict(default=[], type='list', required=False),
),
supports_check_mode=True,
add_file_common_args=True,
)
if not HAVE_DBUS:
module.fail_json(msg="This module requires dbus python bindings") # noqa: F405
module.params['gather_subset'] = ['hardware', 'network', 'virtual', 'facter'] # noqa: F405
module.params['gather_timeout'] = 10 # noqa: F405
module.params['filter'] = '*' # noqa: F405
role = module.params['role'] # noqa: F405
local_facts = module.params['local_facts'] # noqa: F405
additive_facts_to_overwrite = module.params['additive_facts_to_overwrite'] # noqa: F405
fact_file = '/etc/ansible/facts.d/openshift.fact'
openshift_facts = OpenShiftFacts(role,
fact_file,
local_facts,
additive_facts_to_overwrite)
file_params = module.params.copy() # noqa: F405
file_params['path'] = fact_file
file_args = module.load_file_common_arguments(file_params) # noqa: F405
changed = module.set_fs_attributes_if_different(file_args, # noqa: F405
openshift_facts.changed)
return module.exit_json(changed=changed, # noqa: F405
ansible_facts=openshift_facts.facts)
if __name__ == '__main__':
main()
| {
"content_hash": "6f6c8f9d27c474c2fd15ccb541b74450",
"timestamp": "",
"source": "github",
"line_count": 1684,
"max_line_length": 129,
"avg_line_length": 40.308194774346795,
"alnum_prop": 0.5727691922391314,
"repo_name": "openshift/openshift-tools",
"id": "c1968987c0ef6ab713590b229d2d2e650fe11de2",
"size": "68122",
"binary": false,
"copies": "4",
"ref": "refs/heads/prod",
"path": "openshift/installer/vendored/openshift-ansible-3.9.40/roles/openshift_facts/library/openshift_facts.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "24919"
},
{
"name": "Dockerfile",
"bytes": "10248"
},
{
"name": "Go",
"bytes": "127388"
},
{
"name": "Groovy",
"bytes": "6322"
},
{
"name": "HTML",
"bytes": "67678"
},
{
"name": "JavaScript",
"bytes": "9573"
},
{
"name": "Makefile",
"bytes": "1108"
},
{
"name": "PHP",
"bytes": "30017"
},
{
"name": "Python",
"bytes": "19774421"
},
{
"name": "Shell",
"bytes": "553874"
}
],
"symlink_target": ""
} |
"""add max tries column to task instance
Revision ID: cc1e65623dc7
Revises: 127d2bf2dfa7
Create Date: 2017-06-19 16:53:12.851141
"""
import sqlalchemy as sa
from alembic import op
from sqlalchemy import Column, Integer, String
from sqlalchemy.engine.reflection import Inspector
from sqlalchemy.ext.declarative import declarative_base
from airflow import settings
from airflow.models import DagBag
from airflow.models.base import COLLATION_ARGS
# revision identifiers, used by Alembic.
revision = 'cc1e65623dc7'
down_revision = '127d2bf2dfa7'
branch_labels = None
depends_on = None
Base = declarative_base()
BATCH_SIZE = 5000
ID_LEN = 250
class TaskInstance(Base): # type: ignore
"""Task Instance class."""
__tablename__ = "task_instance"
task_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
dag_id = Column(String(ID_LEN, **COLLATION_ARGS), primary_key=True)
execution_date = Column(sa.DateTime, primary_key=True)
max_tries = Column(Integer)
try_number = Column(Integer, default=0)
def upgrade():
op.add_column('task_instance', sa.Column('max_tries', sa.Integer, server_default="-1"))
# Check if table task_instance exist before data migration. This check is
# needed for database that does not create table until migration finishes.
# Checking task_instance table exists prevent the error of querying
# non-existing task_instance table.
connection = op.get_bind()
inspector = Inspector.from_engine(connection)
tables = inspector.get_table_names()
if 'task_instance' in tables:
# Get current session
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
if not bool(session.query(TaskInstance).first()):
return
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries == -1)
# Separate db query in batch to prevent loading entire table
# into memory and cause out of memory error.
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries == -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
# task_instance table might not have the up-to-date
# information, i.e dag or task might be modified or
# deleted in dagbag but is reflected in task instance
# table. In this case we do not retry the task that can't
# be parsed.
ti.max_tries = ti.try_number
else:
task = dag.get_task(ti.task_id)
if task.retries:
ti.max_tries = task.retries
else:
ti.max_tries = ti.try_number
session.merge(ti)
session.commit()
# Commit the current session.
session.commit()
def downgrade():
engine = settings.engine
if engine.dialect.has_table(engine, 'task_instance'):
connection = op.get_bind()
sessionmaker = sa.orm.sessionmaker()
session = sessionmaker(bind=connection)
dagbag = DagBag(settings.DAGS_FOLDER)
query = session.query(sa.func.count(TaskInstance.max_tries)).filter(TaskInstance.max_tries != -1)
while query.scalar():
tis = session.query(TaskInstance).filter(TaskInstance.max_tries != -1).limit(BATCH_SIZE).all()
for ti in tis:
dag = dagbag.get_dag(ti.dag_id)
if not dag or not dag.has_task(ti.task_id):
ti.try_number = 0
else:
task = dag.get_task(ti.task_id)
# max_tries - try_number is number of times a task instance
# left to retry by itself. So the current try_number should be
# max number of self retry (task.retries) minus number of
# times left for task instance to try the task.
ti.try_number = max(0, task.retries - (ti.max_tries - ti.try_number))
ti.max_tries = -1
session.merge(ti)
session.commit()
session.commit()
op.drop_column('task_instance', 'max_tries')
| {
"content_hash": "b111ad8650d8ffd1d1ef8e2175701f1d",
"timestamp": "",
"source": "github",
"line_count": 111,
"max_line_length": 106,
"avg_line_length": 39.68468468468468,
"alnum_prop": 0.6152099886492622,
"repo_name": "apache/incubator-airflow",
"id": "95a32c0b443320b8f535de88d489c828e5848b6f",
"size": "5191",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "airflow/migrations/versions/cc1e65623dc7_add_max_tries_column_to_task_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "69070"
},
{
"name": "Dockerfile",
"bytes": "2001"
},
{
"name": "HTML",
"bytes": "283783"
},
{
"name": "JavaScript",
"bytes": "1387552"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5482822"
},
{
"name": "Shell",
"bytes": "40957"
}
],
"symlink_target": ""
} |
import mock
import datetime
import random
from django.conf import settings
from django.test import SimpleTestCase, TestCase
from django.utils import timezone
from eligibility_calculator.models import CaseData, ModelMixin
from eligibility_calculator.exceptions import PropertyExpectedException
from cla_common.constants import (
ELIGIBILITY_STATES,
CONTACT_SAFETY,
THIRDPARTY_REASON,
THIRDPARTY_RELATIONSHIP,
ADAPTATION_LANGUAGES,
REQUIRES_ACTION_BY,
DIAGNOSIS_SCOPE,
EXEMPT_USER_REASON,
ECF_STATEMENT,
CASE_SOURCE,
)
from cla_common.money_interval.models import MoneyInterval
from cla_eventlog.constants import LOG_LEVELS
from core.tests.mommy_utils import make_recipe, make_user
from legalaid.models import (
Savings,
Income,
Deductions,
PersonalDetails,
ThirdPartyDetails,
AdaptationDetails,
Person,
Case,
EligibilityCheck,
Property,
CaseKnowledgebaseAssignment,
)
def walk(coll):
"""Return a generator for all atomic values in coll and its subcollections.
An atomic value is one that's not iterable as determined by iter."""
try:
k = iter(coll)
for x in k:
for y in walk(x):
yield y
except TypeError:
yield coll
def get_full_case(matter_type1, matter_type2, provider=None):
provider = provider or make_recipe("cla_provider.provider")
organisation = make_recipe("call_centre.organisation")
ec = make_recipe(
"legalaid.eligibility_check_yes",
disputed_savings=make_recipe("legalaid.savings"),
on_passported_benefits=True,
specific_benefits={"income_support": True},
disregards={"criminal_injuries": True},
on_nass_benefits=True,
is_you_or_your_partner_over_60=True,
has_partner=True,
calculations={"disposable_income": 1000},
has_passported_proceedings_letter=False,
)
make_recipe(
"legalaid.property",
eligibility_check=ec,
value=random.randint(1, 100),
mortgage_left=random.randint(1, 100),
share=random.randint(1, 100),
disputed=True,
main=True,
_quantity=2,
)
outcome = make_recipe("cla_eventlog.log")
case = make_recipe(
"legalaid.case",
eligibility_check=ec,
diagnosis=make_recipe("diagnosis.diagnosis_yes"),
personal_details=make_recipe("legalaid.personal_details"),
created_by=make_user(),
requires_action_by=REQUIRES_ACTION_BY.PROVIDER_REVIEW,
requires_action_at=timezone.now(),
callback_attempt=2,
locked_by=make_user(),
locked_at=timezone.now(),
provider=provider,
notes="Notes",
provider_notes="Provider Notes",
thirdparty_details=make_recipe("legalaid.thirdparty_details"),
adaptation_details=make_recipe("legalaid.adaptation_details"),
billable_time=2000,
matter_type1=matter_type1,
matter_type2=matter_type2,
media_code=make_recipe("legalaid.media_code"),
outcome_code="outcome code",
outcome_code_id=outcome.pk,
level=40,
exempt_user=True,
exempt_user_reason=EXEMPT_USER_REASON.ECHI,
ecf_statement=ECF_STATEMENT.READ_OUT_MESSAGE,
source=CASE_SOURCE.WEB,
provider_viewed=timezone.now(),
provider_accepted=timezone.now(),
provider_closed=timezone.now(),
provider_assigned_at=timezone.now(),
is_urgent=True,
organisation=organisation,
)
make_recipe("legalaid.eod_details", notes="EOD notes", case=case)
CaseKnowledgebaseAssignment.objects.create(
case=case, assigned_by=make_user(), alternative_help_article=make_recipe("knowledgebase.article")
)
return case
class EligibilityCheckTestCase(TestCase):
# def test_to_case_data_fail_without_your_finances(self):
# """
# Should fail as your_finances object is always needed
# """
# check = EligibilityCheck()
#
# self.assertRaises(ValueError, check.to_case_data)
def assertModelMixinEqual(self, obj1, obj2):
for prop in obj1.__class__.PROPERTY_META.keys():
if hasattr(obj1, prop) or hasattr(obj2, prop):
val1 = getattr(obj1, prop)
val2 = getattr(obj2, prop)
assert_func = self.assertEqual
if isinstance(val1, list) or isinstance(val2, list):
assert_func = self.assertItemsEqual
if isinstance(val1, ModelMixin) or isinstance(val2, ModelMixin):
self.assertModelMixinEqual(val1, val2)
continue
assert_func(val1, val2, u"%s: %s != %s" % (prop, val1, val2))
def test_to_case_data_without_partner(self):
"""
EligibilityCheck partner data won't be used during CaseData creation
"""
check = make_recipe(
"legalaid.eligibility_check",
category=make_recipe("legalaid.category", code="code"),
you=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=100),
self_employment_drawings=MoneyInterval("per_month", pennies=200),
benefits=MoneyInterval("per_month", pennies=300),
tax_credits=MoneyInterval("per_month", pennies=400),
child_benefits=MoneyInterval("per_month", pennies=500),
maintenance_received=MoneyInterval("per_month", pennies=600),
pension=MoneyInterval("per_month", pennies=700),
other_income=MoneyInterval("per_month", pennies=800),
self_employed=True,
),
savings=make_recipe(
"legalaid.savings", bank_balance=100, investment_balance=200, asset_balance=300, credit_balance=400
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=600),
national_insurance=MoneyInterval("per_month", pennies=100),
maintenance=MoneyInterval("per_month", pennies=710),
childcare=MoneyInterval("per_month", pennies=715),
mortgage=MoneyInterval("per_month", pennies=700),
rent=MoneyInterval("per_month", pennies=20),
criminal_legalaid_contributions=730,
),
),
dependants_young=3,
dependants_old=2,
is_you_or_your_partner_over_60=True,
on_passported_benefits=True,
on_nass_benefits=False,
has_partner=False,
)
case_data = check.to_case_data()
self.assertModelMixinEqual(
case_data,
CaseData(
category="code",
facts={
"dependants_young": 3,
"dependants_old": 2,
"is_you_or_your_partner_over_60": True,
"on_passported_benefits": True,
"on_nass_benefits": False,
"has_partner": False,
"is_partner_opponent": False,
},
you={
"savings": {
"bank_balance": 100,
"investment_balance": 200,
"credit_balance": 400,
"asset_balance": 300,
},
"income": {
"earnings": 100,
"self_employment_drawings": 200,
"benefits": 300,
"tax_credits": 400,
"child_benefits": 500,
"maintenance_received": 600,
"pension": 700,
"other_income": 800,
"self_employed": True,
},
"deductions": {
"income_tax": 600,
"national_insurance": 100,
"maintenance": 710,
"childcare": 715,
"mortgage": 700,
"rent": 20,
"criminal_legalaid_contributions": 730,
},
},
property_data=[],
),
)
def test_to_case_data_with_partner_and_None_partner_child_benefits(self):
"""
EligibilityCheck partner data is used during CaseData creation
If partner.income.child_benefits is None, the to_case_data use a
default value (0). This is because that field is not exposed yet
(partner's child benefits can't be provided with).
"""
check = make_recipe(
"legalaid.eligibility_check",
category=make_recipe("legalaid.category", code="code"),
you=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=100),
self_employment_drawings=MoneyInterval("per_month", pennies=200),
benefits=MoneyInterval("per_month", pennies=300),
tax_credits=MoneyInterval("per_month", pennies=400),
child_benefits=MoneyInterval("per_month", pennies=500),
maintenance_received=MoneyInterval("per_month", pennies=600),
pension=MoneyInterval("per_month", pennies=700),
other_income=MoneyInterval("per_month", pennies=800),
self_employed=True,
),
savings=make_recipe(
"legalaid.savings", bank_balance=100, investment_balance=200, asset_balance=300, credit_balance=400
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=600),
national_insurance=MoneyInterval("per_month", pennies=100),
maintenance=MoneyInterval("per_month", pennies=710),
childcare=MoneyInterval("per_month", pennies=715),
mortgage=MoneyInterval("per_month", pennies=700),
rent=MoneyInterval("per_month", pennies=20),
criminal_legalaid_contributions=730,
),
),
partner=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=101),
self_employment_drawings=MoneyInterval("per_month", pennies=201),
benefits=MoneyInterval("per_month", pennies=301),
tax_credits=MoneyInterval("per_month", pennies=401),
# child_beneficts will be None. Testing that the to_case_data sets a default 0 for
# this value.
child_benefits=None,
maintenance_received=MoneyInterval("per_month", pennies=601),
pension=MoneyInterval("per_month", pennies=701),
other_income=MoneyInterval("per_month", pennies=801),
self_employed=False,
),
savings=make_recipe(
"legalaid.savings", bank_balance=101, investment_balance=201, asset_balance=301, credit_balance=401
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=700),
national_insurance=MoneyInterval("per_month", pennies=1),
maintenance=MoneyInterval("per_month", pennies=711),
childcare=MoneyInterval("per_month", pennies=716),
mortgage=MoneyInterval("per_month", pennies=720),
rent=MoneyInterval("per_month", pennies=1),
criminal_legalaid_contributions=731,
),
),
dependants_young=3,
dependants_old=2,
is_you_or_your_partner_over_60=True,
on_passported_benefits=True,
on_nass_benefits=False,
has_partner=True,
)
case_data = check.to_case_data()
self.assertModelMixinEqual(
case_data,
CaseData(
category="code",
facts={
"dependants_young": 3,
"dependants_old": 2,
"is_you_or_your_partner_over_60": True,
"on_passported_benefits": True,
"on_nass_benefits": False,
"has_partner": True,
"is_partner_opponent": False,
},
you={
"savings": {
"bank_balance": 100,
"investment_balance": 200,
"credit_balance": 400,
"asset_balance": 300,
},
"income": {
"earnings": 100,
"self_employment_drawings": 200,
"benefits": 300,
"tax_credits": 400,
"child_benefits": 500,
"maintenance_received": 600,
"pension": 700,
"other_income": 800,
"self_employed": True,
},
"deductions": {
"income_tax": 600,
"national_insurance": 100,
"maintenance": 710,
"childcare": 715,
"mortgage": 700,
"rent": 20,
"criminal_legalaid_contributions": 730,
},
},
partner={
"savings": {
"bank_balance": 101,
"investment_balance": 201,
"credit_balance": 401,
"asset_balance": 301,
},
"income": {
"earnings": 101,
"self_employment_drawings": 201,
"benefits": 301,
"tax_credits": 401,
"child_benefits": 0,
"maintenance_received": 601,
"pension": 701,
"other_income": 801,
"self_employed": False,
},
"deductions": {
"income_tax": 700,
"national_insurance": 1,
"maintenance": 711,
"childcare": 716,
"mortgage": 720,
"rent": 1,
"criminal_legalaid_contributions": 731,
},
},
property_data=[],
),
)
def test_to_case_data_with_partner_and_NOT_None_partner_child_benefits(self):
"""
EligibilityCheck partner data is used during CaseData creation
If partner.income.child_benefits is NOT None, the to_case_data will use
that value and will not override it with 0
"""
check = make_recipe(
"legalaid.eligibility_check",
category=make_recipe("legalaid.category", code="code"),
you=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=100),
self_employment_drawings=MoneyInterval("per_month", pennies=200),
benefits=MoneyInterval("per_month", pennies=300),
tax_credits=MoneyInterval("per_month", pennies=400),
child_benefits=MoneyInterval("per_month", pennies=500),
maintenance_received=MoneyInterval("per_month", pennies=600),
pension=MoneyInterval("per_month", pennies=700),
other_income=MoneyInterval("per_month", pennies=800),
self_employed=True,
),
savings=make_recipe(
"legalaid.savings", bank_balance=100, investment_balance=200, asset_balance=300, credit_balance=400
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=600),
national_insurance=MoneyInterval("per_month", pennies=100),
maintenance=MoneyInterval("per_month", pennies=710),
childcare=MoneyInterval("per_month", pennies=715),
mortgage=MoneyInterval("per_month", pennies=700),
rent=MoneyInterval("per_month", pennies=20),
criminal_legalaid_contributions=730,
),
),
partner=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=101),
self_employment_drawings=MoneyInterval("per_month", pennies=201),
benefits=MoneyInterval("per_month", pennies=301),
tax_credits=MoneyInterval("per_month", pennies=401),
# child_beneficts is not None. Testing that the to_case_data doesn't
# override this value
child_benefits=MoneyInterval("per_month", pennies=501),
maintenance_received=MoneyInterval("per_month", pennies=601),
pension=MoneyInterval("per_month", pennies=701),
other_income=MoneyInterval("per_month", pennies=801),
self_employed=False,
),
savings=make_recipe(
"legalaid.savings", bank_balance=101, investment_balance=201, asset_balance=301, credit_balance=401
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=700),
national_insurance=MoneyInterval("per_month", pennies=1),
maintenance=MoneyInterval("per_month", pennies=711),
childcare=MoneyInterval("per_month", pennies=716),
mortgage=MoneyInterval("per_month", pennies=720),
rent=MoneyInterval("per_month", pennies=1),
criminal_legalaid_contributions=731,
),
),
dependants_young=3,
dependants_old=2,
is_you_or_your_partner_over_60=True,
on_passported_benefits=True,
on_nass_benefits=False,
has_partner=True,
)
case_data = check.to_case_data()
self.assertModelMixinEqual(
case_data,
CaseData(
category="code",
facts={
"dependants_young": 3,
"dependants_old": 2,
"is_you_or_your_partner_over_60": True,
"on_passported_benefits": True,
"on_nass_benefits": False,
"has_partner": True,
"is_partner_opponent": False,
},
you={
"savings": {
"bank_balance": 100,
"investment_balance": 200,
"credit_balance": 400,
"asset_balance": 300,
},
"income": {
"earnings": 100,
"self_employment_drawings": 200,
"benefits": 300,
"tax_credits": 400,
"child_benefits": 500,
"maintenance_received": 600,
"pension": 700,
"other_income": 800,
"self_employed": True,
},
"deductions": {
"income_tax": 600,
"national_insurance": 100,
"maintenance": 710,
"childcare": 715,
"mortgage": 700,
"rent": 20,
"criminal_legalaid_contributions": 730,
},
},
partner={
"savings": {
"bank_balance": 101,
"investment_balance": 201,
"credit_balance": 401,
"asset_balance": 301,
},
"income": {
"earnings": 101,
"self_employment_drawings": 201,
"benefits": 301,
"tax_credits": 401,
"child_benefits": 501,
"maintenance_received": 601,
"pension": 701,
"other_income": 801,
"self_employed": False,
},
"deductions": {
"income_tax": 700,
"national_insurance": 1,
"maintenance": 711,
"childcare": 716,
"mortgage": 720,
"rent": 1,
"criminal_legalaid_contributions": 731,
},
},
property_data=[],
),
)
def test_validate(self):
check = make_recipe(
"legalaid.eligibility_check",
category=make_recipe("legalaid.category", code="code"),
you=make_recipe(
"legalaid.person",
income=make_recipe(
"legalaid.income",
earnings=MoneyInterval("per_month", pennies=500),
self_employment_drawings=MoneyInterval("per_month", pennies=200),
benefits=MoneyInterval("per_month", pennies=300),
tax_credits=MoneyInterval("per_month", pennies=400),
child_benefits=MoneyInterval("per_month", pennies=500),
maintenance_received=MoneyInterval("per_month", pennies=600),
pension=MoneyInterval("per_month", pennies=700),
other_income=MoneyInterval("per_month", pennies=600),
self_employed=True,
),
savings=make_recipe(
"legalaid.savings", bank_balance=100, investment_balance=200, asset_balance=300, credit_balance=400
),
deductions=make_recipe(
"legalaid.deductions",
income_tax=MoneyInterval("per_month", pennies=600),
national_insurance=MoneyInterval("per_month", pennies=100),
maintenance=MoneyInterval("per_month", pennies=710),
childcare=MoneyInterval("per_month", pennies=715),
mortgage=MoneyInterval("per_month", pennies=700),
rent=MoneyInterval("per_month", pennies=20),
criminal_legalaid_contributions=730,
),
),
dependants_young=3,
dependants_old=2,
is_you_or_your_partner_over_60=True,
on_passported_benefits=True,
has_partner=True,
)
expected = {
"warnings": {
"partner": {
"deductions": ['Field "deductions" is required'],
"income": ['Field "income" is required'],
"savings": ['Field "savings" is required'],
}
}
}
self.assertEqual(expected, check.validate())
check.you = None
expected2 = {
"warnings": {
"partner": {
"deductions": ['Field "deductions" is required'],
"income": ['Field "income" is required'],
"savings": ['Field "savings" is required'],
},
"you": {
"deductions": ['Field "deductions" is required'],
"income": ['Field "income" is required'],
"savings": ['Field "savings" is required'],
},
}
}
self.assertDictEqual(expected2, check.validate())
@mock.patch("legalaid.models.EligibilityChecker")
def test_update_state(self, MockedEligibilityChecker):
"""
calling .is_eligible() sequencially will:
1. through PropertyExpectedException
2. return True
3. return False
4. through PropertyExpectedException again
"""
mocked_checker = MockedEligibilityChecker()
mocked_checker.calcs = {}
mocked_checker.is_eligible.side_effect = [
PropertyExpectedException(),
True,
False,
PropertyExpectedException(),
]
# 1. PropertyExpectedException => UNKNOWN
check = make_recipe("legalaid.eligibility_check", state=ELIGIBILITY_STATES.UNKNOWN)
check.update_state()
self.assertEqual(check.state, ELIGIBILITY_STATES.UNKNOWN)
# 2. True => YES
check.update_state()
self.assertEqual(check.state, ELIGIBILITY_STATES.YES)
# 3. False => NO
check.update_state()
self.assertEqual(check.state, ELIGIBILITY_STATES.NO)
# 4. PropertyExpectedException => UNKNOWN
check.update_state()
self.assertEqual(check.state, ELIGIBILITY_STATES.UNKNOWN)
def test_has_passported_proceedings_letter(self):
check = make_recipe("legalaid.eligibility_check", has_passported_proceedings_letter=False)
state, ec, reasons = check.get_eligibility_state()
self.assertEqual(state, ELIGIBILITY_STATES.UNKNOWN)
check.has_passported_proceedings_letter = True
check.save()
state, ec, reasons = check.get_eligibility_state()
self.assertEqual(state, ELIGIBILITY_STATES.YES)
class CaseTestCase(TestCase):
def test_create_has_laa_reference(self):
case = make_recipe("legalaid.case")
# there is an LAA Reference
self.assertIsNotNone(case.laa_reference)
# it is valid as per algorithm
self.assertEqual(case.id + settings.LAA_REFERENCE_SEED, case.laa_reference)
# it is 7 digits long
self.assertEqual(len(unicode(case.laa_reference)), 7)
def test_case_doesnt_get_duplicate_reference(self):
with mock.patch("legalaid.models._make_reference") as mr:
mr.return_value = "AA-1234-1234"
c1 = Case()
c1._set_reference_if_necessary()
self.assertEqual(c1.reference, "AA-1234-1234")
self.assertEqual(mr.call_count, 1)
c1.save()
with mock.patch("legalaid.models._make_reference") as mr:
mr.return_value = c1.reference
c2 = Case()
c2._set_reference_if_necessary()
self.assertEqual(c1.reference, c2.reference)
self.assertTrue(mr.called)
self.assertEqual(mr.call_count, 11) # max retries + initial try
def test_assign_to_provider_overriding_provider(self):
providers = make_recipe("cla_provider.provider", _quantity=2)
case = make_recipe("legalaid.case", provider=providers[0])
self.assertTrue(case.provider)
case.assign_to_provider(providers[1])
def test_assign_to_provider_None(self):
provider = make_recipe("cla_provider.provider")
case = make_recipe("legalaid.case", provider=None)
self.assertFalse(case.provider)
case.assign_to_provider(provider)
self.assertEqual(case.provider, provider)
self.assertNotEqual(case.provider_assigned_at, None)
def test_assign_to_provider_resets_provider_viewed_accepted_closed(self):
providers = make_recipe("cla_provider.provider", _quantity=2)
case = make_recipe(
"legalaid.case",
provider=providers[0],
provider_viewed=timezone.now(),
provider_accepted=timezone.now(),
provider_assigned_at=timezone.now(),
provider_closed=timezone.now(),
)
self.assertTrue(case.provider)
self.assertNotEqual(case.provider_viewed, None)
self.assertNotEqual(case.provider_accepted, None)
self.assertNotEqual(case.provider_closed, None)
self.assertNotEqual(case.provider_assigned_at, None)
case.assign_to_provider(providers[1])
self.assertEqual(case.provider, providers[1])
self.assertEqual(case.provider_viewed, None)
self.assertEqual(case.provider_accepted, None)
self.assertEqual(case.provider_closed, None)
def test_assign_to_provider_resets_callback_info(self):
provider = make_recipe("cla_provider.provider")
case = make_recipe("legalaid.case", requires_action_at=timezone.now(), callback_attempt=2)
self.assertNotEqual(case.requires_action_at, None)
self.assertEqual(case.callback_attempt, 2)
case.assign_to_provider(provider)
self.assertEqual(case.provider, provider)
self.assertEqual(case.requires_action_at, None)
self.assertEqual(case.callback_attempt, 0)
def test_assign_alternative_help(self):
articles = make_recipe("knowledgebase.article", _quantity=10)
user = make_user()
case = make_recipe("legalaid.case", provider=None)
# assign some articles
self.assertListEqual(list(case.alternative_help_articles.all()), [])
case.assign_alternative_help(user, articles[:5])
self.assertListEqual(list(case.alternative_help_articles.all()), articles[:5])
# assign some more articles; originals should be gone
case.assign_alternative_help(user, articles[5:])
self.assertListEqual(list(case.alternative_help_articles.all()), articles[5:])
def test_assign_alternative_help_resets_callback_info(self):
articles = make_recipe("knowledgebase.article", _quantity=10)
user = make_user()
case = make_recipe("legalaid.case", provider=None, requires_action_at=timezone.now(), callback_attempt=2)
self.assertNotEqual(case.requires_action_at, None)
self.assertEqual(case.callback_attempt, 2)
# assign some articles
self.assertListEqual(list(case.alternative_help_articles.all()), [])
case.assign_alternative_help(user, articles[:5])
self.assertListEqual(list(case.alternative_help_articles.all()), articles[:5])
self.assertEqual(case.requires_action_at, None)
self.assertEqual(case.callback_attempt, 0)
def test_lock_doesnt_override_existing_lock(self):
import logging
# disabling logging temporarily
logging.disable(logging.CRITICAL)
users = make_user(_quantity=2)
case = make_recipe("legalaid.case", locked_by=users[0])
self.assertFalse(case.lock(users[1]))
self.assertEqual(case.locked_by, users[0])
# enabling logging back
logging.disable(logging.NOTSET)
def test_lock_without_saving(self):
user = make_user()
case = make_recipe("legalaid.case")
self.assertTrue(case.lock(user, save=False))
self.assertEqual(case.locked_by, user)
db_case = Case.objects.get(pk=case.pk)
self.assertEqual(db_case.locked_by, None)
def test_lock_and_save(self):
user = make_user()
case = make_recipe("legalaid.case")
self.assertTrue(case.lock(user))
self.assertEqual(case.locked_by, user)
db_case = Case.objects.get(pk=case.pk)
self.assertEqual(db_case.locked_by, user)
# CASE COUNT
def test_case_count_doesnt_updated_if_null_pd(self):
"""
if case.personal_details == None:
case.personal_details.case_count shouldn't get updated
"""
case = make_recipe("legalaid.case")
self.assertTrue(case.personal_details, None)
def test_case_count_gets_updated_if_pd_not_null(self):
pd = make_recipe("legalaid.personal_details")
self.assertEqual(pd.case_count, 0)
# saving first case
make_recipe("legalaid.case", personal_details=pd)
self.assertEqual(pd.case_count, 1)
# saving second case
make_recipe("legalaid.case", personal_details=pd)
self.assertEqual(pd.case_count, 2)
# saving different case
pd2 = make_recipe("legalaid.personal_details")
make_recipe("legalaid.case", personal_details=pd2)
self.assertEqual(pd.case_count, 2)
self.assertEqual(pd2.case_count, 1)
class CaseDatabaseTestCase(SimpleTestCase):
"""
Explicitly save to database to test reload behavior. Manually clean up.
"""
@classmethod
def setUpClass(cls):
cls.allow_database_queries = True
@mock.patch("legalaid.models.logger")
def test_log_denormalized_outcome_fields(self, mock_logger):
case = make_recipe("legalaid.case")
case.log_denormalized_outcome_fields()
# Test missing fields logs warning
self.assertEquals(mock_logger.warning.call_count, 1)
self.assertIn("LGA-275", str(mock_logger.warning.mock_calls))
# Test occasional existing erroneous behavior logs warning
case.outcome_code_id = 1
case.level = LOG_LEVELS.HIGH
case.save()
case.log_denormalized_outcome_fields()
self.assertEquals(mock_logger.warning.call_count, 2)
self.assertIn("LGA-275 Outcome code missing", str(mock_logger.warning.mock_calls))
# Test correct behaviour logs info
case.outcome_code = "COPE"
case.save()
case.log_denormalized_outcome_fields()
self.assertEquals(mock_logger.info.call_count, 1)
case.delete()
class MoneyIntervalFieldTestCase(TestCase):
def test_create_save_moneyinterval(self):
ei = MoneyInterval("per_week", pennies=5000)
per_month = int((5000.0 * 52.0) / 12.0)
i = Income(earnings=ei, other_income=ei, self_employed=True)
self.assertEqual(i.earnings.interval_period, "per_week")
i.save()
ix = Income.objects.get(id=i.id)
eix = ix.earnings
self.assertEqual(eix.interval_period, "per_week")
self.assertEqual(eix.per_interval_value, 5000)
self.assertEqual(eix.as_monthly(), per_month)
def test_annual_moneyinterval(self):
ei = MoneyInterval(interval_period="per_year", pennies=1200000)
self.assertEqual(ei.as_monthly(), 100000)
# class ValidationModelMixinTestCase(TestCase):
#
# class Model1(models.Model):
# name = models.CharField(max_length=20, blank=True, null=True)
#
# class Model2(ValidateModelMixin, models.Model):
# name = models.CharField(max_length=20, blank=True, null=True)
#
# class Model3(ValidateModelMixin, models.Model):
#
# a = models.CharField(null=True, blank=True, max_length=100)
# b = models.CharField(null=True, blank=True, max_length=100)
# c = models.CharField(null=True, blank=True, max_length=100)
#
# def get_dependencies(self):
# return {'a', 'b', 'c'}
#
# class Model4(ValidateModelMixin, models.Model):
# related = models.ForeignKey('Model3')
#
# def get_dependencies(self):
# return {'related__a', 'related__b', 'related__c'}
#
# def setUp(self):
# super(ValidationModelMixinTestCase, self).setUp()
# self.model1 = self.Model1()
# self.model2 = self.Model2()
# self.model3 = self.Model3()
# self.model4 = self.Model4()
# self.model4.related = self.model3
#
# def test_mixin_worked(self):
# self.assertFalse(hasattr(self.model1, 'validate'))
# self.assertTrue(hasattr(self.model2, 'validate'))
# self.assertTrue(hasattr(self.model3, 'validate'))
#
# def test_not_impl_error(self):
# with self.assertRaises(NotImplementedError):
# self.model2.get_dependencies()
#
# def test_validate_all_invalid(self):
# expected = {'warnings': {'a': ['Field "a" is required'],
# 'b': ['Field "b" is required'],
# 'c': ['Field "c" is required']}}
# self.assertEqual(expected, self.model3.validate())
#
# def test_validate_partial_invalid(self):
# self.model3.a = 'a'
# self.model3.b = 'b'
#
# expected = {'warnings': { 'c': ['Field "c" is required']}}
# self.assertEqual(expected, self.model3.validate())
#
# def test_validate_none_invalid(self):
# self.model3.a = 'a'
# self.model3.b = 'b'
# self.model3.c = 'c'
#
# expected = {'warnings': {}}
# self.assertEqual(expected, self.model3.validate())
#
# def test_validate_nested_invalid(self):
# expected = {'warnings': {'related': {'a': ['Field "a" is required'],
# 'c': ['Field "c" is required'],
# 'b': ['Field "b" is required']}}}
#
# self.assertEqual(expected, self.model4.validate())
class CloneModelsTestCaseMixin(object):
def _check_model_fields(self, Model, obj, new_obj, non_equal_fields, equal_fields, check_not_None=False):
all_fields = non_equal_fields + equal_fields
self._check_model_fields_keys(Model, all_fields)
for field in non_equal_fields:
if check_not_None:
self.assertNotEqual(getattr(new_obj, field), None)
self.assertNotEqual(getattr(obj, field), getattr(new_obj, field))
for field in equal_fields:
if check_not_None:
self.assertNotEqual(getattr(new_obj, field), None, field)
self.assertEqual(getattr(obj, field), getattr(new_obj, field))
def _check_model_fields_keys(self, Model, expected_fields):
"""
This is a bit tedious but it's just to make sure that when fields are
added or removed from a model, the developer updates the cloning logic
of the related model if necessary.
Each object which extends CloneModelMixin has a `cloning_config`.
Just make sure that it's configured properly.
"""
actual_fields = [field.name for field in Model._meta.fields]
remoded_fields = set(expected_fields) - set(actual_fields)
added_fields = set(actual_fields) - set(expected_fields)
text = ""
if added_fields:
text = (
'It seems like you have added some fields "%s". '
"This model gets cloned by the split case logic so now it's "
"up to you do decide it these new fields have to be cloned, reset "
"or just referenced. \n"
"In order to do this, you need to look for the `cloning_config` of the model: \n"
"1. if it's a fk and you want to create a new copy (with new id), add it to the "
"clone_fks. Otherwise, if you want to reference the same copy, don't do anything"
"2. if you want to exclude it (the default value will be used "
"in the cloned object), add it to the `excludes`\n"
"3. if you want to use a different value in your cloned version, you need to populate"
"the `override_values` dinamically.\n"
"After done this, just add the new field to the list of expected fields "
"in this test and you're done! \n" % list(added_fields)
)
elif remoded_fields:
text = (
'It seems like you have removed some fields "%s" from your model. '
"All fine but just double-check that nothing is missing when cloning this "
"model by the split case logic. That means, double check the `cloning_config`"
"of your model.\n"
"After done this, just remove the old field from the list of expected fields "
"in this test and you're done!" % list(remoded_fields)
)
self.assertFalse(text, text)
class CloneModelsTestCase(CloneModelsTestCaseMixin, TestCase):
def _test_clone(self, Model, instance_creator, non_equal_fields, equal_fields):
self.assertEqual(Model.objects.count(), 0)
self.obj = instance_creator()
self.cloned_obj = Model.clone_from_obj(self.obj.pk)
self.assertEqual(Model.objects.count(), 2)
self._check_model_fields(Model, self.obj, self.cloned_obj, non_equal_fields, equal_fields)
def test_clone_savings(self):
self._test_clone(
Model=Savings,
instance_creator=lambda: make_recipe(
"legalaid.savings", bank_balance=100, investment_balance=200, asset_balance=300, credit_balance=400
),
non_equal_fields=["id", "created", "modified"],
equal_fields=["bank_balance", "investment_balance", "asset_balance", "credit_balance"],
)
def test_clone_income(self):
self._test_clone(
Model=Income,
instance_creator=lambda: make_recipe("legalaid.income", self_employed=True),
non_equal_fields=["id", "created", "modified"],
equal_fields=[
"earnings_interval_period",
"earnings_per_interval_value",
"earnings",
"other_income_interval_period",
"other_income_per_interval_value",
"other_income",
"self_employment_drawings",
"self_employment_drawings_per_interval_value",
"self_employment_drawings_interval_period",
"tax_credits",
"tax_credits_interval_period",
"tax_credits_per_interval_value",
"maintenance_received",
"maintenance_received_interval_period",
"maintenance_received_per_interval_value",
"benefits",
"benefits_interval_period",
"benefits_per_interval_value",
"child_benefits",
"child_benefits_interval_period",
"child_benefits_per_interval_value",
"pension",
"pension_per_interval_value",
"pension_interval_period",
"self_employed",
],
)
def test_clone_deductions(self):
self._test_clone(
Model=Deductions,
instance_creator=lambda: make_recipe("legalaid.deductions", criminal_legalaid_contributions=100),
non_equal_fields=["id", "created", "modified"],
equal_fields=[
"income_tax_interval_period",
"income_tax_per_interval_value",
"income_tax",
"national_insurance_interval_period",
"national_insurance_per_interval_value",
"national_insurance",
"maintenance_interval_period",
"maintenance_per_interval_value",
"maintenance",
"childcare_interval_period",
"childcare_per_interval_value",
"childcare",
"mortgage_interval_period",
"mortgage_per_interval_value",
"mortgage",
"rent_interval_period",
"rent_per_interval_value",
"rent",
"criminal_legalaid_contributions",
],
)
def test_clone_personal_details(self):
self._test_clone(
Model=PersonalDetails,
instance_creator=lambda: make_recipe(
"legalaid.personal_details",
title="Title",
full_name="Full name",
postcode="Postcode",
street="Street",
mobile_phone="Mobile phone",
home_phone="Home phone",
email="email@email.com",
date_of_birth=datetime.date(day=1, month=1, year=2000),
ni_number="ni number",
contact_for_research=True,
contact_for_research_via="SMS",
vulnerable_user=True,
safe_to_contact=CONTACT_SAFETY.SAFE,
case_count=2,
),
non_equal_fields=["id", "created", "modified", "reference", "case_count"],
equal_fields=[
"title",
"full_name",
"postcode",
"street",
"mobile_phone",
"home_phone",
"email",
"date_of_birth",
"ni_number",
"contact_for_research",
"vulnerable_user",
"safe_to_contact",
"safe_to_email",
"diversity",
"diversity_modified",
"search_field",
"contact_for_research_via",
],
)
def test_clone_third_party(self):
self._test_clone(
Model=ThirdPartyDetails,
instance_creator=lambda: make_recipe(
"legalaid.thirdparty_details",
pass_phrase="Pass phrase",
reason=THIRDPARTY_REASON[0][0],
personal_relationship=THIRDPARTY_RELATIONSHIP[0][0],
personal_relationship_note="Relationship Notes",
spoke_to=True,
no_contact_reason="No Contact Reason",
organisation_name="Organisation Name",
),
non_equal_fields=["id", "created", "modified", "reference", "personal_details"],
equal_fields=[
"pass_phrase",
"reason",
"personal_relationship",
"personal_relationship_note",
"spoke_to",
"no_contact_reason",
"organisation_name",
],
)
def test_clone_adaptations(self):
self._test_clone(
Model=AdaptationDetails,
instance_creator=lambda: make_recipe(
"legalaid.adaptation_details",
bsl_webcam=True,
minicom=True,
text_relay=True,
skype_webcam=True,
language=ADAPTATION_LANGUAGES[0][0],
notes="Notes",
callback_preference=True,
),
non_equal_fields=["id", "created", "modified", "reference"],
equal_fields=[
"bsl_webcam",
"minicom",
"text_relay",
"skype_webcam",
"language",
"notes",
"callback_preference",
"no_adaptations_required",
],
)
def test_clone_person(self):
self._test_clone(
Model=Person,
instance_creator=lambda: make_recipe("legalaid.full_person"),
non_equal_fields=["id", "created", "modified", "income", "savings", "deductions"],
equal_fields=[],
)
class SplitCaseTestCase(CloneModelsTestCaseMixin, TestCase):
def build_category_data(self):
class CatData:
def __init__(self):
self.category = make_recipe("legalaid.category")
self.matter_type1 = make_recipe("legalaid.matter_type1", category=self.category)
self.matter_type2 = make_recipe("legalaid.matter_type2", category=self.category)
return CatData()
def setUp(self):
super(SplitCaseTestCase, self).setUp()
self.cat1_data = self.build_category_data()
self.cat2_data = self.build_category_data()
self.user = make_user()
def assertDiagnosis(self, diagnosis, category):
self.assertTrue(diagnosis)
self.assertEqual(diagnosis.state, DIAGNOSIS_SCOPE.INSCOPE)
self.assertEqual(diagnosis.category, category)
def assertPersonalDetails(self, pd, new_pd):
self.assertEqual(pd, new_pd)
self.assertEqual(PersonalDetails.objects.get(pk=pd.pk).case_count, 2)
def assertEligibilityCheck(self, ec, new_ec, category):
self._check_model_fields(
EligibilityCheck,
ec,
new_ec,
non_equal_fields=[
"id",
"modified",
"created",
"reference",
"category",
"you",
"partner",
"disputed_savings",
],
equal_fields=[
"your_problem_notes",
"notes",
"state",
"dependants_young",
"dependants_old",
"on_passported_benefits",
"on_nass_benefits",
"is_you_or_your_partner_over_60",
"has_partner",
"calculations",
"specific_benefits",
"disregards",
"has_passported_proceedings_letter",
],
check_not_None=True,
)
self.assertEqual(new_ec.category, category)
props = list(ec.property_set.all())
new_props = list(new_ec.property_set.all())
self.assertEqual(len(props), len(new_props))
self.assertTrue(len(new_props) > 0)
for prop, new_prop in zip(props, new_props):
self.assertProperty(prop, new_prop)
def assertProperty(self, prop, new_prop):
self._check_model_fields(
Property,
prop,
new_prop,
non_equal_fields=["id", "created", "modified", "eligibility_check"],
equal_fields=["value", "mortgage_left", "share", "disputed", "main"],
check_not_None=True,
)
def assertAlternativeHelpArticles(self, case, new_case):
kas = list(case.caseknowledgebaseassignment_set.all())
new_kas = list(new_case.caseknowledgebaseassignment_set.all())
self.assertEqual(len(kas), len(new_kas))
self.assertTrue(len(new_kas) > 0)
for ka, new_ka in zip(kas, new_kas):
self.assertNotEqual(ka.case, new_ka.case)
self.assertEqual(ka.alternative_help_article, new_ka.alternative_help_article)
self.assertNotEqual(new_ka.alternative_help_article, None)
self.assertEqual(ka.assigned_by, new_ka.assigned_by)
self.assertNotEqual(new_ka.assigned_by, None)
def test_split_bare_minimum_case(self):
case = make_recipe("legalaid.empty_case")
new_case = case.split(
user=self.user,
category=self.cat2_data.category,
matter_type1=self.cat2_data.matter_type1,
matter_type2=self.cat2_data.matter_type2,
assignment_internal=False,
)
self.assertNotEqual(case.reference, new_case.reference)
self.assertDiagnosis(new_case.diagnosis, self.cat2_data.category)
self.assertEqual(case.personal_details, None)
self.assertEqual(new_case.created_by, self.user)
self.assertEqual(new_case.requires_action_by, REQUIRES_ACTION_BY.OPERATOR)
self.assertEqual(new_case.notes, "")
self.assertEqual(new_case.provider_notes, "")
self.assertNotEqual(case.laa_reference, new_case.laa_reference)
self.assertEqual(new_case.billable_time, 0)
self.assertEqual(new_case.matter_type1, self.cat2_data.matter_type1)
self.assertEqual(new_case.matter_type2, self.cat2_data.matter_type2)
self.assertEqual(new_case.alternative_help_articles.count(), 0)
for field in [
"eligibility_check",
"locked_by",
"locked_at",
"provider",
"thirdparty_details",
"adaptation_details",
"media_code",
"level",
"exempt_user",
"exempt_user_reason",
"ecf_statement",
"provider_viewed",
"provider_accepted",
"provider_closed",
]:
self.assertEqual(getattr(new_case, field), None)
for field in ["outcome_code"]:
self.assertEqual(getattr(new_case, field), "")
def _test_split_full_case(self, internal):
case = get_full_case(self.cat1_data.matter_type1, self.cat1_data.matter_type2)
CaseKnowledgebaseAssignment.objects.create(
case=case, assigned_by=make_user(), alternative_help_article=make_recipe("knowledgebase.article")
)
new_case = case.split(
user=self.user,
category=self.cat2_data.category,
matter_type1=self.cat2_data.matter_type1,
matter_type2=self.cat2_data.matter_type2,
assignment_internal=internal,
)
non_equal_fields = [
"id",
"created",
"modified",
"eligibility_check",
"diagnosis",
"created_by",
"locked_by",
"locked_at",
"thirdparty_details",
"adaptation_details",
"billable_time",
"matter_type1",
"matter_type2",
"outcome_code",
"level",
"reference",
"laa_reference",
"from_case",
"outcome_code_id",
"requires_action_at",
"callback_attempt",
"provider_viewed",
"provider_accepted",
"provider_closed",
"search_field",
"is_urgent",
]
equal_fields = [
"personal_details",
"notes",
"provider_notes",
"media_code",
"exempt_user",
"exempt_user_reason",
"ecf_statement",
"source",
"complaint_flag",
"organisation",
"callback_window_type",
]
if internal:
equal_fields += ["provider", "requires_action_by", "provider_assigned_at", "assigned_out_of_hours"]
else:
non_equal_fields += ["provider", "requires_action_by", "provider_assigned_at", "assigned_out_of_hours"]
self._check_model_fields(Case, case, new_case, non_equal_fields, equal_fields)
self.assertEligibilityCheck(case.eligibility_check, new_case.eligibility_check, self.cat2_data.category)
self.assertDiagnosis(new_case.diagnosis, self.cat2_data.category)
self.assertPersonalDetails(case.personal_details, new_case.personal_details)
self.assertAlternativeHelpArticles(case, new_case)
for field in ["eligibility_check", "diagnosis", "thirdparty_details", "adaptation_details"]:
self.assertNotEqual(getattr(new_case, field), None)
self.assertNotEqual(getattr(case, field), getattr(new_case, field))
with self.assertRaises(Case.eod_details.RelatedObjectDoesNotExist):
new_case.eod_details.notes = ""
expected_values = {
"created_by": self.user,
"locked_by": None,
"locked_at": None,
"billable_time": 0,
"matter_type1": self.cat2_data.matter_type1,
"matter_type2": self.cat2_data.matter_type2,
"outcome_code": "",
"outcome_code_id": None,
"level": None,
"requires_action_at": None,
"callback_attempt": 0,
"is_urgent": False,
# it should keep these values from the original case
"notes": case.notes,
"provider_notes": case.provider_notes,
"media_code": case.media_code,
"source": case.source,
"exempt_user": case.exempt_user,
"exempt_user_reason": case.exempt_user_reason,
"ecf_statement": case.ecf_statement,
"personal_details": case.personal_details,
"from_case": case,
}
if internal:
expected_values.update(
{
"requires_action_by": case.requires_action_by,
"provider": case.provider,
"provider_assigned_at": case.provider_assigned_at,
}
)
else:
expected_values.update(
{"requires_action_by": REQUIRES_ACTION_BY.OPERATOR, "provider": None, "provider_assigned_at": None}
)
for field, value in expected_values.items():
self.assertEqual(getattr(new_case, field), value)
def test_split_full_case_internal_assignment(self):
self._test_split_full_case(internal=True)
def test_split_full_case_external_assignment(self):
self._test_split_full_case(internal=False)
| {
"content_hash": "f29c73c01b3fcec496ae65dda98a02c4",
"timestamp": "",
"source": "github",
"line_count": 1454,
"max_line_length": 119,
"avg_line_length": 39.72352132049519,
"alnum_prop": 0.5377783164236989,
"repo_name": "ministryofjustice/cla_backend",
"id": "52c604155fce3dc960f392053cbd464454e7461d",
"size": "57758",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cla_backend/apps/legalaid/tests/test_models.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "45941"
},
{
"name": "Dockerfile",
"bytes": "1272"
},
{
"name": "HTML",
"bytes": "14794"
},
{
"name": "JavaScript",
"bytes": "2762"
},
{
"name": "Mustache",
"bytes": "3607"
},
{
"name": "Python",
"bytes": "1577558"
},
{
"name": "Shell",
"bytes": "11204"
},
{
"name": "Smarty",
"bytes": "283906"
}
],
"symlink_target": ""
} |
"""Conditions / splits for non-leaf nodes.
A condition (e.g. a>0.5) is evaluated to a binary value (e.g. True if a=5).
Condition evaluations control the branching of an example in a tree.
"""
import abc
from typing import List, Union, Optional
import six
from tensorflow_decision_forests.component.py_tree import dataspec as dataspec_lib
from yggdrasil_decision_forests.dataset import data_spec_pb2
from yggdrasil_decision_forests.model.decision_tree import decision_tree_pb2
ColumnType = data_spec_pb2.ColumnType
SimpleColumnSpec = dataspec_lib.SimpleColumnSpec
@six.add_metaclass(abc.ABCMeta)
class AbstractCondition(object):
"""Generic condition.
Attrs:
missing_evaluation: Result of the evaluation of the condition if the feature
is missing. If None, a feature cannot be missing or a specific method run
during inference to handle missing values.
"""
def __init__(self,
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
self._missing_evaluation = missing_evaluation
self._split_score = split_score
@property
def missing_evaluation(self):
return self._missing_evaluation
@property
def split_score(self) -> Optional[float]:
return self._split_score
@abc.abstractmethod
def features(self) -> List[SimpleColumnSpec]:
"""List of features used to evaluate the condition."""
pass
def __repr__(self):
return (f"AbstractCondition({self.features()}, "
f"missing_evaluation={self._missing_evaluation}, "
f"split_score={self._split_score})")
class IsMissingInCondition(AbstractCondition):
"""Condition of the form "attribute is missing"."""
def __init__(self,
feature: SimpleColumnSpec,
split_score: Optional[float] = None):
super(IsMissingInCondition, self).__init__(None, split_score)
self._feature = feature
def features(self):
return [self._feature]
def __repr__(self):
return f"({self._feature.name} is missing, score={self._split_score})"
def __eq__(self, other):
if not isinstance(other, IsMissingInCondition):
return False
return self._feature == other._feature
@property
def feature(self):
return self._feature
class IsTrueCondition(AbstractCondition):
"""Condition of the form "attribute is true"."""
def __init__(self,
feature: SimpleColumnSpec,
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
super(IsTrueCondition, self).__init__(missing_evaluation, split_score)
self._feature = feature
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} is true; miss={self.missing_evaluation}, "
f"score={self._split_score})")
def __eq__(self, other):
if not isinstance(other, IsTrueCondition):
return False
return self._feature == other._feature
@property
def feature(self):
return self._feature
class NumericalHigherThanCondition(AbstractCondition):
"""Condition of the form "attribute >= threhsold"."""
def __init__(self,
feature: SimpleColumnSpec,
threshold: float,
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
super(NumericalHigherThanCondition, self).__init__(missing_evaluation,
split_score)
self._feature = feature
self._threshold = threshold
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} >= {self._threshold}; "
f"miss={self.missing_evaluation}, "
f"score={self._split_score})")
def __eq__(self, other):
if not isinstance(other, NumericalHigherThanCondition):
return False
return (self._feature == other._feature and
self._threshold == other._threshold)
@property
def feature(self):
return self._feature
@property
def threshold(self):
return self._threshold
class CategoricalIsInCondition(AbstractCondition):
"""Condition of the form "attribute in [...set of items...]"."""
def __init__(self,
feature: SimpleColumnSpec,
mask: Union[List[str], List[int]],
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
super(CategoricalIsInCondition, self).__init__(missing_evaluation,
split_score)
self._feature = feature
self._mask = mask
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} in {self._mask}; "
f"miss={self.missing_evaluation}, "
f"score={self._split_score})")
def __eq__(self, other):
if not isinstance(other, CategoricalIsInCondition):
return False
return self._feature == other._feature and self._mask == other._mask
@property
def feature(self):
return self._feature
@property
def mask(self):
return self._mask
class CategoricalSetContainsCondition(AbstractCondition):
"""Condition of the form "attribute intersect [...set of items...]!=empty"."""
def __init__(self,
feature: SimpleColumnSpec,
mask: Union[List[str], List[int]],
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
super(CategoricalSetContainsCondition,
self).__init__(missing_evaluation, split_score)
self._feature = feature
self._mask = mask
def features(self):
return [self._feature]
def __repr__(self):
return (f"({self._feature.name} intersect {self._mask} != empty; "
f"miss={self.missing_evaluation}, "
f"score={self._split_score})")
def __eq__(self, other):
if not isinstance(other, CategoricalSetContainsCondition):
return False
return self._feature == other._feature and self._mask == other._mask
@property
def feature(self):
return self._feature
@property
def mask(self):
return self._mask
class NumericalSparseObliqueCondition(AbstractCondition):
"""Condition of the form "attributes * weights >= threshold"."""
def __init__(self,
features: List[SimpleColumnSpec],
weights: List[float],
threshold: float,
missing_evaluation: Optional[bool],
split_score: Optional[float] = None):
super(NumericalSparseObliqueCondition,
self).__init__(missing_evaluation, split_score)
self._features = features
self._weights = weights
self._threshold = threshold
def features(self):
return self._features
def __repr__(self):
return (f"({self._features} . {self._weights} >= {self._threshold}; "
f"miss={self.missing_evaluation}, "
f"score={self._split_score})")
def __eq__(self, other):
if not isinstance(other, NumericalSparseObliqueCondition):
return False
return (self._features == other._features and
self._weights == other._weights and
self._threshold == other._threshold)
@property
def weights(self):
return self._weights
@property
def threshold(self):
return self._threshold
def core_condition_to_condition(
core_condition: decision_tree_pb2.NodeCondition,
dataspec: data_spec_pb2.DataSpecification) -> AbstractCondition:
"""Converts a condition from the core to python format."""
condition_type = core_condition.condition
attribute = dataspec_lib.make_simple_column_spec(dataspec,
core_condition.attribute)
column_spec = dataspec.columns[core_condition.attribute]
if condition_type.HasField("na_condition"):
return IsMissingInCondition(attribute, core_condition.split_score)
if condition_type.HasField("higher_condition"):
return NumericalHigherThanCondition(
attribute, condition_type.higher_condition.threshold,
core_condition.na_value, core_condition.split_score)
if condition_type.HasField("true_value_condition"):
return IsTrueCondition(attribute, core_condition.na_value,
core_condition.split_score)
if condition_type.HasField("contains_bitmap_condition"):
items = column_spec_bitmap_to_items(
dataspec.columns[core_condition.attribute],
condition_type.contains_bitmap_condition.elements_bitmap)
if attribute.type == ColumnType.CATEGORICAL:
return CategoricalIsInCondition(attribute, items, core_condition.na_value,
core_condition.split_score)
elif attribute.type == ColumnType.CATEGORICAL_SET:
return CategoricalSetContainsCondition(attribute, items,
core_condition.na_value,
core_condition.split_score)
if condition_type.HasField("contains_condition"):
items = condition_type.contains_condition.elements
if not column_spec.categorical.is_already_integerized:
items = [
dataspec_lib.categorical_value_idx_to_value(column_spec, item)
for item in items
]
if attribute.type == ColumnType.CATEGORICAL:
return CategoricalIsInCondition(attribute, items, core_condition.na_value,
core_condition.split_score)
elif attribute.type == ColumnType.CATEGORICAL_SET:
return CategoricalSetContainsCondition(attribute, items,
core_condition.na_value,
core_condition.split_score)
if condition_type.HasField("discretized_higher_condition"):
threshold = dataspec_lib.discretized_numerical_to_numerical(
column_spec, condition_type.discretized_higher_condition.threshold)
return NumericalHigherThanCondition(attribute, threshold,
core_condition.na_value,
core_condition.split_score)
if condition_type.HasField("oblique_condition"):
attributes = [
dataspec_lib.make_simple_column_spec(dataspec, attribute_idx)
for attribute_idx in condition_type.oblique_condition.attributes
]
return NumericalSparseObliqueCondition(
attributes, list(condition_type.oblique_condition.weights),
condition_type.oblique_condition.threshold, core_condition.na_value,
core_condition.split_score)
raise ValueError(f"Non supported condition type: {core_condition}")
def column_spec_bitmap_to_items(column_spec: data_spec_pb2.Column,
bitmap: bytes) -> Union[List[int], List[str]]:
"""Converts a mask-bitmap into a list of elements."""
items = []
for value_idx in range(column_spec.categorical.number_of_unique_values):
byte_idx = value_idx // 8
sub_bit_idx = value_idx & 7
has_item = (bitmap[byte_idx] & (1 << sub_bit_idx)) != 0
if has_item:
items.append(
dataspec_lib.categorical_value_idx_to_value(column_spec, value_idx))
return items
def column_spec_items_to_bitmap(column_spec: data_spec_pb2.Column,
items: List[int]) -> bytes:
"""Converts a list of elements into a mask-bitmap."""
# Allocate a zero-bitmap.
bitmap = bytearray(
b"\0" * ((column_spec.categorical.number_of_unique_values + 7) // 8))
for item in items:
bitmap[item // 8] |= 1 << (item & 7)
return bytes(bitmap)
def set_core_node(condition: AbstractCondition,
dataspec: data_spec_pb2.DataSpecification,
core_node: decision_tree_pb2.Node):
"""Sets a core node (proto format) from a python value."""
core_condition = core_node.condition
core_condition.na_value = condition.missing_evaluation
if condition.split_score is not None:
core_condition.split_score = condition.split_score
features = condition.features()
if not features:
raise ValueError("Condition without features")
core_condition.attribute = dataspec_lib.column_name_to_column_idx(
features[0].name, dataspec)
feature_column = dataspec.columns[core_condition.attribute]
if isinstance(condition, IsMissingInCondition):
core_condition.condition.na_condition.SetInParent()
elif isinstance(condition, IsTrueCondition):
core_condition.condition.true_value_condition.SetInParent()
elif isinstance(condition, NumericalHigherThanCondition):
core_condition.condition.higher_condition.threshold = condition.threshold
elif isinstance(condition,
(CategoricalIsInCondition, CategoricalSetContainsCondition)):
mask = condition.mask
if mask and isinstance(mask[0], str):
# Converts the mask to a list of integers.
mask = [feature_column.categorical.items[value].index for value in mask]
# Select the most efficient way to represent the mask
if len(mask) * 32 * 8 > feature_column.categorical.number_of_unique_values:
# A bitmap is more efficient.
core_condition.condition.contains_bitmap_condition.elements_bitmap = column_spec_items_to_bitmap(
feature_column, mask)
else:
# A list of indices is more efficient.
core_condition.condition.contains_condition.elements[:] = mask
elif isinstance(condition, NumericalSparseObliqueCondition):
oblique = core_condition.condition.oblique_condition
oblique.attributes[:] = [
dataspec_lib.column_name_to_column_idx(feature.name, dataspec)
for feature in features
]
oblique.weights[:] = condition.weights
oblique.threshold = condition.threshold
else:
raise NotImplementedError("No supported value type")
| {
"content_hash": "c130354841927fbb90fb2b238f65bb6d",
"timestamp": "",
"source": "github",
"line_count": 403,
"max_line_length": 103,
"avg_line_length": 34.19106699751861,
"alnum_prop": 0.6545467740764932,
"repo_name": "tensorflow/decision-forests",
"id": "8ecd40008ba57c19490b42493e920e5a7fc45940",
"size": "14356",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tensorflow_decision_forests/component/py_tree/condition.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2010"
},
{
"name": "C++",
"bytes": "264381"
},
{
"name": "JavaScript",
"bytes": "13659"
},
{
"name": "PureBasic",
"bytes": "5756"
},
{
"name": "Python",
"bytes": "731479"
},
{
"name": "Shell",
"bytes": "21071"
},
{
"name": "Starlark",
"bytes": "50728"
}
],
"symlink_target": ""
} |
from mongoengine import *
class AggregateRating(EmbeddedDocument):
# Percentage of rating as float value (from 0 to 1.0)
rating = FloatField(min_value=0.0, max_value=1.0, default=0.0)
count = IntField(min_value=0, default=0)
def add_rating(self, rating):
if not 0 <= rating <= 1.0:
raise ValidationError('Rating must be between 0 and 1.0')
self.rating = float(self.rating + rating) / (int(self.count) + 1)
self.count += 1
| {
"content_hash": "9358ed84c3830a2f341e35014d477ec9",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 73,
"avg_line_length": 30,
"alnum_prop": 0.6375,
"repo_name": "Mo-Talha/Nomad",
"id": "e45d7570bdfeb0d841c62f3417f7f615ec5df68b",
"size": "480",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "models/rating.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "19413"
},
{
"name": "HTML",
"bytes": "19747"
},
{
"name": "JavaScript",
"bytes": "53800"
},
{
"name": "Makefile",
"bytes": "2112"
},
{
"name": "Python",
"bytes": "383073"
},
{
"name": "Shell",
"bytes": "8972"
}
],
"symlink_target": ""
} |
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.core.files import File
from .models import Papermail
from django.conf import settings
import magic
from wand.image import Image
from os import remove
from uuid import uuid4
media_root = getattr(settings, 'MEDIA_ROOT')
@receiver(post_save, sender=Papermail)
def generate_thumbnail(sender,instance, **kwargs):
"""
generate a thumbnail of the file to display in views
only jpeg png or pdf is supported
thumbnail name is generate with uuid module
"""
mime = magic.Magic(mime=True)
type_fichier = mime.from_file(instance.paper_file.path)
nom_thumbnail = media_root + uuid4().hex + '_thumb.jpeg'
if type_fichier == 'image/png' or type_fichier == 'image/jpeg':
with Image(filename=instance.paper_file.path) as img:
with img.clone() as converted:
converted.format = 'jpeg'
converted.resize(300,400)
converted.save(filename= nom_thumbnail)
fich = File(open(nom_thumbnail,'rb'))
post_save.disconnect(generate_thumbnail, sender=Papermail)
instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)
post_save.connect(generate_thumbnail, sender=Papermail)
remove(nom_thumbnail)
elif type_fichier == 'application/pdf':
with Image(filename=instance.paper_file.path + '[0]') as img:
with img.clone() as converted:
converted.format = 'jpeg'
converted.save(filename= nom_thumbnail)
fich = File(open(nom_thumbnail,'rb'))
post_save.disconnect(generate_thumbnail, sender=Papermail)
instance.thumbnail.save(name = uuid4().hex + '_thumb.jpeg', content = fich)
post_save.connect(generate_thumbnail, sender=Papermail)
remove(nom_thumbnail) | {
"content_hash": "64069ffb603f67cc9550100e6cc4c692",
"timestamp": "",
"source": "github",
"line_count": 51,
"max_line_length": 91,
"avg_line_length": 39.745098039215684,
"alnum_prop": 0.6206216082881105,
"repo_name": "Humch/django-paperworks",
"id": "a0c5c09fbbd5e8539c2a375f31ba0261ec41130e",
"size": "2027",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "paperworks/signals.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "19672"
},
{
"name": "Python",
"bytes": "21502"
}
],
"symlink_target": ""
} |
'''OpenGL extension ARB.texture_rectangle
This module customises the behaviour of the
OpenGL.raw.GL.ARB.texture_rectangle to provide a more
Python-friendly API
Overview (from the spec)
OpenGL texturing is limited to images with power-of-two dimensions
and an optional 1-texel border. The ARB_texture_rectangle extension
adds a new texture target that supports 2D textures without requiring
power-of-two dimensions.
Non-power-of-two sized (NPOTS) textures are useful for storing video
images that do not have power-of-two sized (POTS). Re-sampling
artifacts are avoided and less texture memory may be required by
using non-power-of-two sized textures. Non-power-of-two sized
textures are also useful for shadow maps and window-space texturing.
However, non-power-of-two sized textures have limitations that
do not apply to power-of-two sized textures. NPOTS textures may
not use mipmap filtering; POTS textures support both mipmapped
and non-mipmapped filtering. NPOTS textures support only the
GL_CLAMP, GL_CLAMP_TO_EDGE, and GL_CLAMP_TO_BORDER wrap modes;
POTS textures support GL_CLAMP_TO_EDGE, GL_REPEAT, GL_CLAMP,
GL_MIRRORED_REPEAT, and GL_CLAMP_TO_BORDER (and GL_MIRROR_CLAMP_ATI
and GL_MIRROR_CLAMP_TO_EDGE_ATI if ATI_texture_mirror_once is
supported) . NPOTS textures do not support an optional 1-texel
border; POTS textures do support an optional 1-texel border.
NPOTS textures are accessed by dimension-dependent (aka
non-normalized) texture coordinates. So instead of thinking of
the texture image lying in a [0..1]x[0..1] range, the NPOTS texture
image lies in a [0..w]x[0..h] range.
This extension adds a new texture target and related state (proxy,
binding, max texture size).
The official definition of this extension is available here:
http://www.opengl.org/registry/specs/ARB/texture_rectangle.txt
'''
from OpenGL import platform, constants, constant, arrays
from OpenGL import extensions, wrapper
from OpenGL.GL import glget
import ctypes
from OpenGL.raw.GL.ARB.texture_rectangle import *
### END AUTOGENERATED SECTION | {
"content_hash": "d926b4831c6013f65f34c834927fd6be",
"timestamp": "",
"source": "github",
"line_count": 47,
"max_line_length": 70,
"avg_line_length": 44.319148936170215,
"alnum_prop": 0.7830052808449351,
"repo_name": "Universal-Model-Converter/UMC3.0a",
"id": "8a9576c88721de3a3a842fd0ae51fed0e9555b79",
"size": "2083",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "data/Python/x86/Lib/site-packages/OpenGL/GL/ARB/texture_rectangle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "226"
},
{
"name": "C",
"bytes": "1082640"
},
{
"name": "C#",
"bytes": "8440"
},
{
"name": "C++",
"bytes": "3621086"
},
{
"name": "CSS",
"bytes": "6226"
},
{
"name": "F#",
"bytes": "2310"
},
{
"name": "FORTRAN",
"bytes": "7795"
},
{
"name": "Forth",
"bytes": "506"
},
{
"name": "GLSL",
"bytes": "1040"
},
{
"name": "Groff",
"bytes": "5943"
},
{
"name": "HTML",
"bytes": "1196266"
},
{
"name": "Java",
"bytes": "5793"
},
{
"name": "Makefile",
"bytes": "1109"
},
{
"name": "Mask",
"bytes": "969"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "33351557"
},
{
"name": "R",
"bytes": "1370"
},
{
"name": "Shell",
"bytes": "6931"
},
{
"name": "Tcl",
"bytes": "2084458"
},
{
"name": "Visual Basic",
"bytes": "481"
}
],
"symlink_target": ""
} |
'''Assign some relative luminances to constants.
'''
BLACK = 0.0
WHITE = 1.0
| {
"content_hash": "7d75b8884f90704ca6747f999c76f8a6",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 48,
"avg_line_length": 15.6,
"alnum_prop": 0.6794871794871795,
"repo_name": "nico-hn/color_contrast_calc_py",
"id": "bd62e4d14d244da5376c44acc9bdfdfc100ec211",
"size": "78",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "color_contrast_calc/const/luminance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "123851"
}
],
"symlink_target": ""
} |
"""
Worker that receives input from Piped RDD.
"""
import os
import sys
import time
from inspect import getfullargspec
import importlib
# 'resource' is a Unix specific module.
has_resource_module = True
try:
import resource
except ImportError:
has_resource_module = False
import traceback
from pyspark.accumulators import _accumulatorRegistry
from pyspark.broadcast import Broadcast, _broadcastRegistry
from pyspark.java_gateway import local_connect_and_auth
from pyspark.taskcontext import BarrierTaskContext, TaskContext
from pyspark.files import SparkFiles
from pyspark.resource import ResourceInformation
from pyspark.rdd import PythonEvalType
from pyspark.serializers import write_with_length, write_int, read_long, read_bool, \
write_long, read_int, SpecialLengths, UTF8Deserializer, PickleSerializer, \
BatchedSerializer
from pyspark.sql.pandas.serializers import ArrowStreamPandasUDFSerializer, CogroupUDFSerializer
from pyspark.sql.pandas.types import to_arrow_type
from pyspark.sql.types import StructType
from pyspark.util import fail_on_stopiteration
from pyspark import shuffle
pickleSer = PickleSerializer()
utf8_deserializer = UTF8Deserializer()
def report_times(outfile, boot, init, finish):
write_int(SpecialLengths.TIMING_DATA, outfile)
write_long(int(1000 * boot), outfile)
write_long(int(1000 * init), outfile)
write_long(int(1000 * finish), outfile)
def add_path(path):
# worker can be used, so donot add path multiple times
if path not in sys.path:
# overwrite system packages
sys.path.insert(1, path)
def read_command(serializer, file):
command = serializer._read_with_length(file)
if isinstance(command, Broadcast):
command = serializer.loads(command.value)
return command
def chain(f, g):
"""chain two functions together """
return lambda *a: g(f(*a))
def wrap_udf(f, return_type):
if return_type.needConversion():
toInternal = return_type.toInternal
return lambda *a: toInternal(f(*a))
else:
return lambda *a: f(*a)
def wrap_scalar_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
def verify_result_length(result, length):
if len(result) != length:
raise RuntimeError("Result vector from pandas_udf was not the required length: "
"expected %d, got %d" % (length, len(result)))
return result
return lambda *a: (verify_result_length(
verify_result_type(f(*a)), len(a[0])), arrow_return_type)
def wrap_pandas_iter_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def verify_result_type(result):
if not hasattr(result, "__len__"):
pd_type = "Pandas.DataFrame" if type(return_type) == StructType else "Pandas.Series"
raise TypeError("Return type of the user-defined function should be "
"{}, but is {}".format(pd_type, type(result)))
return result
return lambda *iterator: map(lambda res: (res, arrow_return_type),
map(verify_result_type, f(*iterator)))
def wrap_cogrouped_map_pandas_udf(f, return_type, argspec):
def wrapped(left_key_series, left_value_series, right_key_series, right_value_series):
import pandas as pd
left_df = pd.concat(left_value_series, axis=1)
right_df = pd.concat(right_value_series, axis=1)
if len(argspec.args) == 2:
result = f(left_df, right_df)
elif len(argspec.args) == 3:
key_series = left_key_series if not left_df.empty else right_key_series
key = tuple(s[0] for s in key_series)
result = f(key, left_df, right_df)
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda kl, vl, kr, vr: [(wrapped(kl, vl, kr, vr), to_arrow_type(return_type))]
def wrap_grouped_map_pandas_udf(f, return_type, argspec):
def wrapped(key_series, value_series):
import pandas as pd
if len(argspec.args) == 1:
result = f(pd.concat(value_series, axis=1))
elif len(argspec.args) == 2:
key = tuple(s[0] for s in key_series)
result = f(key, pd.concat(value_series, axis=1))
if not isinstance(result, pd.DataFrame):
raise TypeError("Return type of the user-defined function should be "
"pandas.DataFrame, but is {}".format(type(result)))
if not len(result.columns) == len(return_type):
raise RuntimeError(
"Number of columns of the returned pandas.DataFrame "
"doesn't match specified schema. "
"Expected: {} Actual: {}".format(len(return_type), len(result.columns)))
return result
return lambda k, v: [(wrapped(k, v), to_arrow_type(return_type))]
def wrap_grouped_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result])
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_window_agg_pandas_udf(f, return_type, runner_conf, udf_index):
window_bound_types_str = runner_conf.get('pandas_window_bound_types')
window_bound_type = [t.strip().lower() for t in window_bound_types_str.split(',')][udf_index]
if window_bound_type == 'bounded':
return wrap_bounded_window_agg_pandas_udf(f, return_type)
elif window_bound_type == 'unbounded':
return wrap_unbounded_window_agg_pandas_udf(f, return_type)
else:
raise RuntimeError("Invalid window bound type: {} ".format(window_bound_type))
def wrap_unbounded_window_agg_pandas_udf(f, return_type):
# This is similar to grouped_agg_pandas_udf, the only difference
# is that window_agg_pandas_udf needs to repeat the return value
# to match window length, where grouped_agg_pandas_udf just returns
# the scalar value.
arrow_return_type = to_arrow_type(return_type)
def wrapped(*series):
import pandas as pd
result = f(*series)
return pd.Series([result]).repeat(len(series[0]))
return lambda *a: (wrapped(*a), arrow_return_type)
def wrap_bounded_window_agg_pandas_udf(f, return_type):
arrow_return_type = to_arrow_type(return_type)
def wrapped(begin_index, end_index, *series):
import pandas as pd
result = []
# Index operation is faster on np.ndarray,
# So we turn the index series into np array
# here for performance
begin_array = begin_index.values
end_array = end_index.values
for i in range(len(begin_array)):
# Note: Create a slice from a series for each window is
# actually pretty expensive. However, there
# is no easy way to reduce cost here.
# Note: s.iloc[i : j] is about 30% faster than s[i: j], with
# the caveat that the created slices shares the same
# memory with s. Therefore, user are not allowed to
# change the value of input series inside the window
# function. It is rare that user needs to modify the
# input series in the window function, and therefore,
# it is be a reasonable restriction.
# Note: Calling reset_index on the slices will increase the cost
# of creating slices by about 100%. Therefore, for performance
# reasons we don't do it here.
series_slices = [s.iloc[begin_array[i]: end_array[i]] for s in series]
result.append(f(*series_slices))
return pd.Series(result)
return lambda *a: (wrapped(*a), arrow_return_type)
def read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index):
num_arg = read_int(infile)
arg_offsets = [read_int(infile) for i in range(num_arg)]
chained_func = None
for i in range(read_int(infile)):
f, return_type = read_command(pickleSer, infile)
if chained_func is None:
chained_func = f
else:
chained_func = chain(chained_func, f)
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
func = chained_func
else:
# make sure StopIteration's raised in the user code are not ignored
# when they are processed in a for loop, raise them as RuntimeError's instead
func = fail_on_stopiteration(chained_func)
# the last returnType will be the return type of UDF
if eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF:
return arg_offsets, wrap_scalar_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF:
return arg_offsets, wrap_pandas_iter_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_grouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
argspec = getfullargspec(chained_func) # signature was lost when wrapping it
return arg_offsets, wrap_cogrouped_map_pandas_udf(func, return_type, argspec)
elif eval_type == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF:
return arg_offsets, wrap_grouped_agg_pandas_udf(func, return_type)
elif eval_type == PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF:
return arg_offsets, wrap_window_agg_pandas_udf(func, return_type, runner_conf, udf_index)
elif eval_type == PythonEvalType.SQL_BATCHED_UDF:
return arg_offsets, wrap_udf(func, return_type)
else:
raise ValueError("Unknown eval type: {}".format(eval_type))
def read_udfs(pickleSer, infile, eval_type):
runner_conf = {}
if eval_type in (PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF,
PythonEvalType.SQL_MAP_PANDAS_ITER_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF,
PythonEvalType.SQL_WINDOW_AGG_PANDAS_UDF):
# Load conf used for pandas_udf evaluation
num_conf = read_int(infile)
for i in range(num_conf):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
runner_conf[k] = v
# NOTE: if timezone is set here, that implies respectSessionTimeZone is True
timezone = runner_conf.get("spark.sql.session.timeZone", None)
safecheck = runner_conf.get("spark.sql.execution.pandas.convertToArrowArraySafely",
"false").lower() == 'true'
# Used by SQL_GROUPED_MAP_PANDAS_UDF and SQL_SCALAR_PANDAS_UDF when returning StructType
assign_cols_by_name = runner_conf.get(
"spark.sql.legacy.execution.pandas.groupedMap.assignColumnsByName", "true")\
.lower() == "true"
if eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
ser = CogroupUDFSerializer(timezone, safecheck, assign_cols_by_name)
else:
# Scalar Pandas UDF handles struct type arguments as pandas DataFrames instead of
# pandas Series. See SPARK-27240.
df_for_struct = (eval_type == PythonEvalType.SQL_SCALAR_PANDAS_UDF or
eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF or
eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF)
ser = ArrowStreamPandasUDFSerializer(timezone, safecheck, assign_cols_by_name,
df_for_struct)
else:
ser = BatchedSerializer(PickleSerializer(), 100)
num_udfs = read_int(infile)
is_scalar_iter = eval_type == PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
is_map_iter = eval_type == PythonEvalType.SQL_MAP_PANDAS_ITER_UDF
if is_scalar_iter or is_map_iter:
if is_scalar_iter:
assert num_udfs == 1, "One SCALAR_ITER UDF expected here."
if is_map_iter:
assert num_udfs == 1, "One MAP_ITER UDF expected here."
arg_offsets, udf = read_single_udf(
pickleSer, infile, eval_type, runner_conf, udf_index=0)
def func(_, iterator):
num_input_rows = 0
def map_batch(batch):
nonlocal num_input_rows
udf_args = [batch[offset] for offset in arg_offsets]
num_input_rows += len(udf_args[0])
if len(udf_args) == 1:
return udf_args[0]
else:
return tuple(udf_args)
iterator = map(map_batch, iterator)
result_iter = udf(iterator)
num_output_rows = 0
for result_batch, result_type in result_iter:
num_output_rows += len(result_batch)
# This assert is for Scalar Iterator UDF to fail fast.
# The length of the entire input can only be explicitly known
# by consuming the input iterator in user side. Therefore,
# it's very unlikely the output length is higher than
# input length.
assert is_map_iter or num_output_rows <= num_input_rows, \
"Pandas SCALAR_ITER UDF outputted more rows than input rows."
yield (result_batch, result_type)
if is_scalar_iter:
try:
next(iterator)
except StopIteration:
pass
else:
raise RuntimeError("pandas iterator UDF should exhaust the input "
"iterator.")
if num_output_rows != num_input_rows:
raise RuntimeError(
"The length of output in Scalar iterator pandas UDF should be "
"the same with the input's; however, the length of output was %d and the "
"length of input was %d." % (num_output_rows, num_input_rows))
# profiling is not supported for UDF
return func, None, ser, ser
def extract_key_value_indexes(grouped_arg_offsets):
"""
Helper function to extract the key and value indexes from arg_offsets for the grouped and
cogrouped pandas udfs. See BasePandasGroupExec.resolveArgOffsets for equivalent scala code.
:param grouped_arg_offsets: List containing the key and value indexes of columns of the
DataFrames to be passed to the udf. It consists of n repeating groups where n is the
number of DataFrames. Each group has the following format:
group[0]: length of group
group[1]: length of key indexes
group[2.. group[1] +2]: key attributes
group[group[1] +3 group[0]]: value attributes
"""
parsed = []
idx = 0
while idx < len(grouped_arg_offsets):
offsets_len = grouped_arg_offsets[idx]
idx += 1
offsets = grouped_arg_offsets[idx: idx + offsets_len]
split_index = offsets[0] + 1
offset_keys = offsets[1: split_index]
offset_values = offsets[split_index:]
parsed.append([offset_keys, offset_values])
idx += offsets_len
return parsed
if eval_type == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because grouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
# See FlatMapGroupsInPandasExec for how arg_offsets are used to
# distinguish between grouping attributes and data attributes
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
# Create function like this:
# mapper a: f([a[0]], [a[0], a[1]])
def mapper(a):
keys = [a[o] for o in parsed_offsets[0][0]]
vals = [a[o] for o in parsed_offsets[0][1]]
return f(keys, vals)
elif eval_type == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF:
# We assume there is only one UDF here because cogrouped map doesn't
# support combining multiple UDFs.
assert num_udfs == 1
arg_offsets, f = read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=0)
parsed_offsets = extract_key_value_indexes(arg_offsets)
def mapper(a):
df1_keys = [a[0][o] for o in parsed_offsets[0][0]]
df1_vals = [a[0][o] for o in parsed_offsets[0][1]]
df2_keys = [a[1][o] for o in parsed_offsets[1][0]]
df2_vals = [a[1][o] for o in parsed_offsets[1][1]]
return f(df1_keys, df1_vals, df2_keys, df2_vals)
else:
udfs = []
for i in range(num_udfs):
udfs.append(read_single_udf(pickleSer, infile, eval_type, runner_conf, udf_index=i))
def mapper(a):
result = tuple(f(*[a[o] for o in arg_offsets]) for (arg_offsets, f) in udfs)
# In the special case of a single UDF this will return a single result rather
# than a tuple of results; this is the format that the JVM side expects.
if len(result) == 1:
return result[0]
else:
return result
func = lambda _, it: map(mapper, it)
# profiling is not supported for UDF
return func, None, ser, ser
def main(infile, outfile):
try:
boot_time = time.time()
split_index = read_int(infile)
if split_index == -1: # for unit tests
sys.exit(-1)
version = utf8_deserializer.loads(infile)
if version != "%d.%d" % sys.version_info[:2]:
raise Exception(("Python in worker has different version %s than that in " +
"driver %s, PySpark cannot run with different minor versions. " +
"Please check environment variables PYSPARK_PYTHON and " +
"PYSPARK_DRIVER_PYTHON are correctly set.") %
("%d.%d" % sys.version_info[:2], version))
# read inputs only for a barrier task
isBarrier = read_bool(infile)
boundPort = read_int(infile)
secret = UTF8Deserializer().loads(infile)
# set up memory limits
memory_limit_mb = int(os.environ.get('PYSPARK_EXECUTOR_MEMORY_MB', "-1"))
if memory_limit_mb > 0 and has_resource_module:
total_memory = resource.RLIMIT_AS
try:
(soft_limit, hard_limit) = resource.getrlimit(total_memory)
msg = "Current mem limits: {0} of max {1}\n".format(soft_limit, hard_limit)
print(msg, file=sys.stderr)
# convert to bytes
new_limit = memory_limit_mb * 1024 * 1024
if soft_limit == resource.RLIM_INFINITY or new_limit < soft_limit:
msg = "Setting mem limits to {0} of max {1}\n".format(new_limit, new_limit)
print(msg, file=sys.stderr)
resource.setrlimit(total_memory, (new_limit, new_limit))
except (resource.error, OSError, ValueError) as e:
# not all systems support resource limits, so warn instead of failing
print("WARN: Failed to set memory limit: {0}\n".format(e), file=sys.stderr)
# initialize global state
taskContext = None
if isBarrier:
taskContext = BarrierTaskContext._getOrCreate()
BarrierTaskContext._initialize(boundPort, secret)
# Set the task context instance here, so we can get it by TaskContext.get for
# both TaskContext and BarrierTaskContext
TaskContext._setTaskContext(taskContext)
else:
taskContext = TaskContext._getOrCreate()
# read inputs for TaskContext info
taskContext._stageId = read_int(infile)
taskContext._partitionId = read_int(infile)
taskContext._attemptNumber = read_int(infile)
taskContext._taskAttemptId = read_long(infile)
taskContext._resources = {}
for r in range(read_int(infile)):
key = utf8_deserializer.loads(infile)
name = utf8_deserializer.loads(infile)
addresses = []
taskContext._resources = {}
for a in range(read_int(infile)):
addresses.append(utf8_deserializer.loads(infile))
taskContext._resources[key] = ResourceInformation(name, addresses)
taskContext._localProperties = dict()
for i in range(read_int(infile)):
k = utf8_deserializer.loads(infile)
v = utf8_deserializer.loads(infile)
taskContext._localProperties[k] = v
shuffle.MemoryBytesSpilled = 0
shuffle.DiskBytesSpilled = 0
_accumulatorRegistry.clear()
# fetch name of workdir
spark_files_dir = utf8_deserializer.loads(infile)
SparkFiles._root_directory = spark_files_dir
SparkFiles._is_running_on_worker = True
# fetch names of includes (*.zip and *.egg files) and construct PYTHONPATH
add_path(spark_files_dir) # *.py files that were added will be copied here
num_python_includes = read_int(infile)
for _ in range(num_python_includes):
filename = utf8_deserializer.loads(infile)
add_path(os.path.join(spark_files_dir, filename))
importlib.invalidate_caches()
# fetch names and values of broadcast variables
needs_broadcast_decryption_server = read_bool(infile)
num_broadcast_variables = read_int(infile)
if needs_broadcast_decryption_server:
# read the decrypted data from a server in the jvm
port = read_int(infile)
auth_secret = utf8_deserializer.loads(infile)
(broadcast_sock_file, _) = local_connect_and_auth(port, auth_secret)
for _ in range(num_broadcast_variables):
bid = read_long(infile)
if bid >= 0:
if needs_broadcast_decryption_server:
read_bid = read_long(broadcast_sock_file)
assert(read_bid == bid)
_broadcastRegistry[bid] = \
Broadcast(sock_file=broadcast_sock_file)
else:
path = utf8_deserializer.loads(infile)
_broadcastRegistry[bid] = Broadcast(path=path)
else:
bid = - bid - 1
_broadcastRegistry.pop(bid)
if needs_broadcast_decryption_server:
broadcast_sock_file.write(b'1')
broadcast_sock_file.close()
_accumulatorRegistry.clear()
eval_type = read_int(infile)
if eval_type == PythonEvalType.NON_UDF:
func, profiler, deserializer, serializer = read_command(pickleSer, infile)
else:
func, profiler, deserializer, serializer = read_udfs(pickleSer, infile, eval_type)
init_time = time.time()
def process():
iterator = deserializer.load_stream(infile)
out_iter = func(split_index, iterator)
try:
serializer.dump_stream(out_iter, outfile)
finally:
if hasattr(out_iter, 'close'):
out_iter.close()
if profiler:
profiler.profile(process)
else:
process()
# Reset task context to None. This is a guard code to avoid residual context when worker
# reuse.
TaskContext._setTaskContext(None)
BarrierTaskContext._setTaskContext(None)
except Exception:
try:
exc_info = traceback.format_exc()
if isinstance(exc_info, bytes):
# exc_info may contains other encoding bytes, replace the invalid bytes and convert
# it back to utf-8 again
exc_info = exc_info.decode("utf-8", "replace").encode("utf-8")
else:
exc_info = exc_info.encode("utf-8")
write_int(SpecialLengths.PYTHON_EXCEPTION_THROWN, outfile)
write_with_length(exc_info, outfile)
except IOError:
# JVM close the socket
pass
except Exception:
# Write the error to stderr if it happened while serializing
print("PySpark worker failed with exception:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
sys.exit(-1)
finish_time = time.time()
report_times(outfile, boot_time, init_time, finish_time)
write_long(shuffle.MemoryBytesSpilled, outfile)
write_long(shuffle.DiskBytesSpilled, outfile)
# Mark the beginning of the accumulators section of the output
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
write_int(len(_accumulatorRegistry), outfile)
for (aid, accum) in _accumulatorRegistry.items():
pickleSer._write_with_length((aid, accum._value), outfile)
# check end of stream
if read_int(infile) == SpecialLengths.END_OF_STREAM:
write_int(SpecialLengths.END_OF_STREAM, outfile)
else:
# write a different value to tell JVM to not reuse this worker
write_int(SpecialLengths.END_OF_DATA_SECTION, outfile)
sys.exit(-1)
if __name__ == '__main__':
# Read information about how to connect back to the JVM from the environment.
java_port = int(os.environ["PYTHON_WORKER_FACTORY_PORT"])
auth_secret = os.environ["PYTHON_WORKER_FACTORY_SECRET"]
(sock_file, _) = local_connect_and_auth(java_port, auth_secret)
main(sock_file, sock_file)
| {
"content_hash": "77f7128cec88b739577751ab2292c76a",
"timestamp": "",
"source": "github",
"line_count": 634,
"max_line_length": 99,
"avg_line_length": 42.534700315457414,
"alnum_prop": 0.610894797344903,
"repo_name": "ptkool/spark",
"id": "84e5cca5d3c00db7946375368271d3bccdb5a49e",
"size": "27752",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "python/pyspark/worker.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "49912"
},
{
"name": "Batchfile",
"bytes": "31352"
},
{
"name": "C",
"bytes": "1493"
},
{
"name": "CSS",
"bytes": "26836"
},
{
"name": "Dockerfile",
"bytes": "9014"
},
{
"name": "HTML",
"bytes": "40529"
},
{
"name": "HiveQL",
"bytes": "1890736"
},
{
"name": "Java",
"bytes": "4138601"
},
{
"name": "JavaScript",
"bytes": "203741"
},
{
"name": "Makefile",
"bytes": "7776"
},
{
"name": "PLSQL",
"bytes": "9439"
},
{
"name": "PLpgSQL",
"bytes": "380488"
},
{
"name": "PowerShell",
"bytes": "3865"
},
{
"name": "Python",
"bytes": "3170742"
},
{
"name": "R",
"bytes": "1187040"
},
{
"name": "Roff",
"bytes": "36501"
},
{
"name": "SQLPL",
"bytes": "9325"
},
{
"name": "Scala",
"bytes": "32134649"
},
{
"name": "Shell",
"bytes": "204763"
},
{
"name": "TSQL",
"bytes": "474884"
},
{
"name": "Thrift",
"bytes": "67584"
},
{
"name": "q",
"bytes": "79845"
}
],
"symlink_target": ""
} |
import numpy as np
import scipy
from data_handler import gen_graphs
from pte_theano import PTE
import logging
class train_pte(object):
'''
class to train the pte model on the given corpus.
'''
def __init__(self):
'''
define model paramters.
'''
self.graphs = gen_graphs()
self.graphs.contruct_graphs("graph")
# Generate nnz matrices with data (i, j, w)
self.nnz_ww = []
self.nnz_wd = []
self.nnz_wl = []
self.ndims = 40
self.lr = 0.05
self.batch_size = 100
self.window_size = 10
self.k = 5
self.nepochs = 1
def train(self):
'''
run training (first pre-training and than fine tuning on graph with
parameters defined in constructor.
'''
# setting up logger
logger = logging.getLogger("graph2vec")
logger.setLevel(logging.INFO)
logger.setLevel(logging.DEBUG)
fh = logging.FileHandler("word2graph2vec.log")
formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(formatter)
logger.addHandler(fh)
p, v1, v2 = self.graphs.gen_edgeprob()
logger.info("Setting up the model")
E = self.graphs.nedge
V = self.graphs.nvertex
D = self.graphs.ndocs
L = self.graphs.nlabels
d = len(self.graphs.w2d)
l = len(self.graphs.w2l)
pte = PTE(V, self.ndims, self.graphs.ndocs, self.graphs.nlabels)
pte.ww_model()
pte.wd_model()
pte.wl_model()
logger.info("Training started")
for epoch in xrange(0, self.nepochs):
# Pre-training
np.random.shuffle(p)
c = 0
try:
# Pre-training on word 2 word model.
for i in xrange(0, E, self.batch_size):
sample = np.random.choice(p.shape[0], self.batch_size, p=p)
c = 0
for j in xrange(0, sample.shape[0]):
indm = v1[sample[j]]
indc = v2[sample[j]]
indr = np.asarray(
np.random.randint(V, size=self.k), dtype=np.int32)
cost = pte.pretraining_ww(indm, indc, indr)
c += cost
logger.info("Cost after training one sample (batch) is %f" % c)
# Pre-training on word-doc graph
logger.info("Pre-training on word-word graph done")
#for i in xrange(0, d):
# indw = nnz_wd[i, 0]
# indd = nnz_wd[i, 1]
# indr = np.asarray(
# np.random.randint(V, size=self.k), dtype=np.int32)
# if i % 5000 == 0:
# logger.info("cost is %f" % c)
# c = 0
# cost = pte.pretraining_wd(indw, indd, indr, nnz_wd[i, 2])
# c += cost
# Fine-tuning on word-label graph
#logger.info("Pre-training on word-doc done")
#for i in xrange(0, l):
# indw = nnz_wl[i, 0]
# indl = nnz_wl[i, 1]
# indr = np.asarray(
# np.random.randint(V, size=self.k), dtype=np.int32)
# if i % 5000 == 0:
# logger.info("cost is %f" % c)
# c = 0
# cost = pte.finetuning(indw, indl, indr, nnz_wl[i, 2])
# c += cost
except Exception as e:
logger.exception("Following exception occured %s" % e)
logger.info("Pre-training on word-label done")
logger.info("training done, saving model")
pte.save_model()
if __name__ == "__main__":
pte = train_pte()
pte.train()
| {
"content_hash": "8202043c83e8dfc9bf307adbef19ab7f",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 83,
"avg_line_length": 36.81132075471698,
"alnum_prop": 0.4805228088159918,
"repo_name": "shashankg7/word2graph2vec",
"id": "d3950cac002b5516c2be519f555fd50f4b57de4a",
"size": "3948",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "word2graph2vec/train.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23858"
},
{
"name": "Shell",
"bytes": "31"
}
],
"symlink_target": ""
} |
"""Public section, including homepage and signup."""
from flask import Blueprint, request
from flask_login import login_required, current_user
from data_wrangler.utils import flash_errors, render_extensions
from data_wrangler.models.post import Post
blueprint = Blueprint('admin', __name__, static_folder="../static")
@blueprint.route("/new_blog", methods=["GET", "POST"])
@login_required
def new_blog():
if not current_user.is_admin:
return render_extensions('401.html')
if request.method == 'POST':
try:
content = str(request.form['content'])
except Exception:
content = ''
try:
slug = str(request.form['slug'])
except Exception:
slug = ''
try:
title = str(request.form['title'])
except Exception:
title = ''
post = Post(title=title, body=content, slug=slug)
post.save()
current_user.posts.append(post)
current_user.save()
return render_extensions('admin/new_blog.html')
@blueprint.route("/edit_blog/<blog_id>/", methods=["GET", "POST"])
@login_required
def edit_blog(blog_id):
if not current_user.is_admin:
return render_extensions('401.html')
if request.method == 'POST':
try:
content = str(request.form['content'])
except Exception:
content = ''
try:
slug = str(request.form['slug'])
except Exception:
slug = ''
try:
title = str(request.form['title'])
except Exception:
title = ''
post = Post(title=title, body=content, slug=slug)
post.save()
current_user.posts.append(post)
current_user.save()
post_obj = Post.query.filter_by(id=int(blog_id)).first()
post_content = {
'title': str(post_obj.title),
'slug': str(post_obj.slug),
'body': str(post_obj.body),
}
return render_extensions('admin/edit_blog.html', post=post_content)
| {
"content_hash": "6cfb97348aade9ef619ceaffef03dce0",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 71,
"avg_line_length": 26.68421052631579,
"alnum_prop": 0.5818540433925049,
"repo_name": "Eleonore9/data-wrangler",
"id": "9cb075c89cd6fa6586d9e38fd154d22b3cd0ff4e",
"size": "2052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "data_wrangler/views/admin.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "33281"
},
{
"name": "JavaScript",
"bytes": "65537"
},
{
"name": "Python",
"bytes": "36803"
}
],
"symlink_target": ""
} |
def application(environ, start_response):
status = '200 OK'
output = 'Your wsgi is working fine!'
response_headers = [('Content-type', 'text/plain'),
('Content-Length', str(len(output)))]
start_response(status, response_headers)
return [output]
| {
"content_hash": "d52957c8c2ec7053c286899f59c2059a",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 61,
"avg_line_length": 32.333333333333336,
"alnum_prop": 0.6116838487972509,
"repo_name": "tiagoprn/experiments",
"id": "ccee9f3a2c11a2e977a1c0fabe2331a1824a2dab",
"size": "291",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "micro/wsgi_test_for_webservers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6285"
},
{
"name": "Dockerfile",
"bytes": "1625"
},
{
"name": "HTML",
"bytes": "66184"
},
{
"name": "JavaScript",
"bytes": "56617"
},
{
"name": "Makefile",
"bytes": "10847"
},
{
"name": "Mako",
"bytes": "824"
},
{
"name": "Python",
"bytes": "273134"
},
{
"name": "Shell",
"bytes": "9257"
}
],
"symlink_target": ""
} |
import pygame, sys, random
from pygame.locals import *
# Create the constants (go ahead and experiment with different values)
BOARDWIDTH = 4 # number of columns in the board
BOARDHEIGHT = 4 # number of rows in the board
TILESIZE = 80
WINDOWWIDTH = 640
WINDOWHEIGHT = 480
FPS = 30
BLANK = None
# R G B
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BRIGHTBLUE = ( 0, 50, 255)
DARKTURQUOISE = ( 3, 54, 73)
GREEN = ( 0, 204, 0)
BGCOLOR = DARKTURQUOISE
TILECOLOR = GREEN
TEXTCOLOR = WHITE
BORDERCOLOR = BRIGHTBLUE
BASICFONTSIZE = 20
BUTTONCOLOR = WHITE
BUTTONTEXTCOLOR = BLACK
MESSAGECOLOR = WHITE
XMARGIN = int((WINDOWWIDTH - (TILESIZE * BOARDWIDTH + (BOARDWIDTH - 1))) / 2)
YMARGIN = int((WINDOWHEIGHT - (TILESIZE * BOARDHEIGHT + (BOARDHEIGHT - 1))) / 2)
UP = 'up'
DOWN = 'down'
LEFT = 'left'
RIGHT = 'right'
def main():
global FPSCLOCK, DISPLAYSURF, BASICFONT, RESET_SURF, RESET_RECT, NEW_SURF, NEW_RECT, SOLVE_SURF, SOLVE_RECT
pygame.init()
FPSCLOCK = pygame.time.Clock()
DISPLAYSURF = pygame.display.set_mode((WINDOWWIDTH, WINDOWHEIGHT))
pygame.display.set_caption('Slide Puzzle')
BASICFONT = pygame.font.Font('freesansbold.ttf', BASICFONTSIZE)
# Store the option buttons and their rectangles in OPTIONS.
RESET_SURF, RESET_RECT = makeText('Reset', TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 90)
NEW_SURF, NEW_RECT = makeText('New Game', TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 60)
SOLVE_SURF, SOLVE_RECT = makeText('Solve', TEXTCOLOR, TILECOLOR, WINDOWWIDTH - 120, WINDOWHEIGHT - 30)
mainBoard, solutionSeq = generateNewPuzzle(80)
SOLVEDBOARD = getStartingBoard() # a solved board is the same as the board in a start state.
allMoves = [] # list of moves made from the solved configuration
while True: # main game loop
slideTo = None # the direction, if any, a tile should slide
msg = 'Click tile or press arrow keys to slide.' # contains the message to show in the upper left corner.
if mainBoard == SOLVEDBOARD:
msg = 'Solved!'
drawBoard(mainBoard, msg)
checkForQuit()
for event in pygame.event.get(): # event handling loop
if event.type == MOUSEBUTTONUP:
spotx, spoty = getSpotClicked(mainBoard, event.pos[0], event.pos[1])
if (spotx, spoty) == (None, None):
# check if the user clicked on an option button
if RESET_RECT.collidepoint(event.pos):
resetAnimation(mainBoard, allMoves) # clicked on Reset button
allMoves = []
elif NEW_RECT.collidepoint(event.pos):
mainBoard, solutionSeq = generateNewPuzzle(80) # clicked on New Game button
allMoves = []
elif SOLVE_RECT.collidepoint(event.pos):
resetAnimation(mainBoard, solutionSeq + allMoves) # clicked on Solve button
allMoves = []
else:
# check if the clicked tile was next to the blank spot
blankx, blanky = getBlankPosition(mainBoard)
if spotx == blankx + 1 and spoty == blanky:
slideTo = LEFT
elif spotx == blankx - 1 and spoty == blanky:
slideTo = RIGHT
elif spotx == blankx and spoty == blanky + 1:
slideTo = UP
elif spotx == blankx and spoty == blanky - 1:
slideTo = DOWN
elif event.type == KEYUP:
# check if the user pressed a key to slide a tile
if event.key in (K_LEFT, K_a) and isValidMove(mainBoard, LEFT):
slideTo = LEFT
elif event.key in (K_RIGHT, K_d) and isValidMove(mainBoard, RIGHT):
slideTo = RIGHT
elif event.key in (K_UP, K_w) and isValidMove(mainBoard, UP):
slideTo = UP
elif event.key in (K_DOWN, K_s) and isValidMove(mainBoard, DOWN):
slideTo = DOWN
if slideTo:
slideAnimation(mainBoard, slideTo, 'Click tile or press arrow keys to slide.', 8) # show slide on screen
makeMove(mainBoard, slideTo)
allMoves.append(slideTo) # record the slide
pygame.display.update()
FPSCLOCK.tick(FPS)
def terminate():
pygame.quit()
sys.exit()
def checkForQuit():
for event in pygame.event.get(QUIT): # get all the QUIT events
terminate() # terminate if any QUIT events are present
for event in pygame.event.get(KEYUP): # get all the KEYUP events
if event.key == K_ESCAPE:
terminate() # terminate if the KEYUP event was for the Esc key
pygame.event.post(event) # put the other KEYUP event objects back
def getStartingBoard():
# Return a board data structure with tiles in the solved state.
# For example, if BOARDWIDTH and BOARDHEIGHT are both 3, this function
# returns [[1, 4, 7], [2, 5, 8], [3, 6, BLANK]]
counter = 1
board = []
for x in range(BOARDWIDTH):
column = []
for y in range(BOARDHEIGHT):
column.append(counter)
counter += BOARDWIDTH
board.append(column)
counter -= BOARDWIDTH * (BOARDHEIGHT - 1) + BOARDWIDTH - 1
board[BOARDWIDTH-1][BOARDHEIGHT-1] = BLANK
return board
def getBlankPosition(board):
# Return the x and y of board coordinates of the blank space.
for x in range(BOARDWIDTH):
for y in range(BOARDHEIGHT):
if board[x][y] == BLANK:
return (x, y)
def makeMove(board, move):
# This function does not check if the move is valid.
blankx, blanky = getBlankPosition(board)
if move == UP:
board[blankx][blanky], board[blankx][blanky + 1] = board[blankx][blanky + 1], board[blankx][blanky]
elif move == DOWN:
board[blankx][blanky], board[blankx][blanky - 1] = board[blankx][blanky - 1], board[blankx][blanky]
elif move == LEFT:
board[blankx][blanky], board[blankx + 1][blanky] = board[blankx + 1][blanky], board[blankx][blanky]
elif move == RIGHT:
board[blankx][blanky], board[blankx - 1][blanky] = board[blankx - 1][blanky], board[blankx][blanky]
def isValidMove(board, move):
blankx, blanky = getBlankPosition(board)
return (move == UP and blanky != len(board[0]) - 1) or \
(move == DOWN and blanky != 0) or \
(move == LEFT and blankx != len(board) - 1) or \
(move == RIGHT and blankx != 0)
def getRandomMove(board, lastMove=None):
# start with a full list of all four moves
validMoves = [UP, DOWN, LEFT, RIGHT]
# remove moves from the list as they are disqualified
if lastMove == UP or not isValidMove(board, DOWN):
validMoves.remove(DOWN)
if lastMove == DOWN or not isValidMove(board, UP):
validMoves.remove(UP)
if lastMove == LEFT or not isValidMove(board, RIGHT):
validMoves.remove(RIGHT)
if lastMove == RIGHT or not isValidMove(board, LEFT):
validMoves.remove(LEFT)
# return a random move from the list of remaining moves
return random.choice(validMoves)
def getLeftTopOfTile(tileX, tileY):
left = XMARGIN + (tileX * TILESIZE) + (tileX - 1)
top = YMARGIN + (tileY * TILESIZE) + (tileY - 1)
return (left, top)
def getSpotClicked(board, x, y):
# from the x & y pixel coordinates, get the x & y board coordinates
for tileX in range(len(board)):
for tileY in range(len(board[0])):
left, top = getLeftTopOfTile(tileX, tileY)
tileRect = pygame.Rect(left, top, TILESIZE, TILESIZE)
if tileRect.collidepoint(x, y):
return (tileX, tileY)
return (None, None)
def drawTile(tilex, tiley, number, adjx=0, adjy=0):
# draw a tile at board coordinates tilex and tiley, optionally a few
# pixels over (determined by adjx and adjy)
left, top = getLeftTopOfTile(tilex, tiley)
pygame.draw.rect(DISPLAYSURF, TILECOLOR, (left + adjx, top + adjy, TILESIZE, TILESIZE))
textSurf = BASICFONT.render(str(number), True, TEXTCOLOR)
textRect = textSurf.get_rect()
textRect.center = left + int(TILESIZE / 2) + adjx, top + int(TILESIZE / 2) + adjy
DISPLAYSURF.blit(textSurf, textRect)
def makeText(text, color, bgcolor, top, left):
# create the Surface and Rect objects for some text.
textSurf = BASICFONT.render(text, True, color, bgcolor)
textRect = textSurf.get_rect()
textRect.topleft = (top, left)
return (textSurf, textRect)
def drawBoard(board, message):
DISPLAYSURF.fill(BGCOLOR)
if message:
textSurf, textRect = makeText(message, MESSAGECOLOR, BGCOLOR, 5, 5)
DISPLAYSURF.blit(textSurf, textRect)
for tilex in range(len(board)):
for tiley in range(len(board[0])):
if board[tilex][tiley]:
drawTile(tilex, tiley, board[tilex][tiley])
left, top = getLeftTopOfTile(0, 0)
width = BOARDWIDTH * TILESIZE
height = BOARDHEIGHT * TILESIZE
pygame.draw.rect(DISPLAYSURF, BORDERCOLOR, (left - 5, top - 5, width + 11, height + 11), 4)
DISPLAYSURF.blit(RESET_SURF, RESET_RECT)
DISPLAYSURF.blit(NEW_SURF, NEW_RECT)
DISPLAYSURF.blit(SOLVE_SURF, SOLVE_RECT)
def slideAnimation(board, direction, message, animationSpeed):
# Note: This function does not check if the move is valid.
blankx, blanky = getBlankPosition(board)
if direction == UP:
movex = blankx
movey = blanky + 1
elif direction == DOWN:
movex = blankx
movey = blanky - 1
elif direction == LEFT:
movex = blankx + 1
movey = blanky
elif direction == RIGHT:
movex = blankx - 1
movey = blanky
# prepare the base surface
drawBoard(board, message)
baseSurf = DISPLAYSURF.copy()
# draw a blank space over the moving tile on the baseSurf Surface.
moveLeft, moveTop = getLeftTopOfTile(movex, movey)
pygame.draw.rect(baseSurf, BGCOLOR, (moveLeft, moveTop, TILESIZE, TILESIZE))
for i in range(0, TILESIZE, animationSpeed):
# animate the tile sliding over
checkForQuit()
DISPLAYSURF.blit(baseSurf, (0, 0))
if direction == UP:
drawTile(movex, movey, board[movex][movey], 0, -i)
if direction == DOWN:
drawTile(movex, movey, board[movex][movey], 0, i)
if direction == LEFT:
drawTile(movex, movey, board[movex][movey], -i, 0)
if direction == RIGHT:
drawTile(movex, movey, board[movex][movey], i, 0)
pygame.display.update()
FPSCLOCK.tick(FPS)
def generateNewPuzzle(numSlides):
# From a starting configuration, make numSlides number of moves (and
# animate these moves).
sequence = []
board = getStartingBoard()
drawBoard(board, '')
pygame.display.update()
pygame.time.wait(500) # pause 500 milliseconds for effect
lastMove = None
for i in range(numSlides):
move = getRandomMove(board, lastMove)
slideAnimation(board, move, 'Generating new puzzle...', animationSpeed=int(TILESIZE / 3))
makeMove(board, move)
sequence.append(move)
lastMove = move
return (board, sequence)
def resetAnimation(board, allMoves):
# make all of the moves in allMoves in reverse.
revAllMoves = allMoves[:] # gets a copy of the list
revAllMoves.reverse()
for move in revAllMoves:
if move == UP:
oppositeMove = DOWN
elif move == DOWN:
oppositeMove = UP
elif move == RIGHT:
oppositeMove = LEFT
elif move == LEFT:
oppositeMove = RIGHT
slideAnimation(board, oppositeMove, '', animationSpeed=int(TILESIZE / 2))
makeMove(board, oppositeMove)
if __name__ == '__main__':
main()
| {
"content_hash": "d25fc9aea2e7607906247c5e7e6c3647",
"timestamp": "",
"source": "github",
"line_count": 327,
"max_line_length": 116,
"avg_line_length": 36.82262996941896,
"alnum_prop": 0.6142346981147745,
"repo_name": "ABHISHEK2F24/PyGames",
"id": "5759fad6e55d130792360e989b3927bf757db09f",
"size": "12041",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Slide/slidepuzzle.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "149913"
}
],
"symlink_target": ""
} |
import os
import sys
import time
from magnolia.utility import *
from magnolia.utility import LOG as L
from magnolia.script.kancolle import testcase_normal
class TestCase(testcase_normal.TestCase_Normal):
def __init__(self, *args, **kwargs):
super(TestCase, self).__init__(*args, **kwargs)
@classmethod
def setUpClass(cls):
L.info("*** Start TestCase : %s *** " % __file__)
L.info("*** Debug Flag : %s ***" % str(cls.get("args.debug")))
def test_1(self):
L.info("*** Ping ***")
try:
self.minicap_start(); time.sleep(2)
self.assertTrue(self.initialize(self.get("leveling.composition")), "Can't Login & Check Start.")
while self.expedition_result(): time.sleep(1)
self.minicap_finish(); time.sleep(2)
except Exception as e:
self.minicap_finish(); time.sleep(2)
L.warning(type(e).__name__ + ": " + str(e))
@classmethod
def tearDownClass(cls):
L.info("*** End TestCase : %s *** " % __file__)
| {
"content_hash": "1a7cb3a470c1b10d2cce7c3fec7a82cc",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 108,
"avg_line_length": 31.87878787878788,
"alnum_prop": 0.5769961977186312,
"repo_name": "setsulla/stir",
"id": "f6957e0ffc628b92579562fdffd65e44e3f45e6e",
"size": "1052",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/magnolia/script/kancolle/ping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "176459"
}
],
"symlink_target": ""
} |
"""
Force10 Backend.
This backend will only work with SSH version 2 capable Force10 switches.
This excludes most, if not all, of the etherscale series.
The backend has been developed for the E series.
The backend has been developed and tested on a Terascale E300 switch.
The switch (or router, depending on your level off pedanticness) is configured
by the backend logging via ssh, requesting a cli, and firing the necessary
command for configuring a VLAN. This approach was choosen over netconf / XML,
as a fairly reliable source said that not all the necessary functionality
needed was available via the previously mentioned interfaces.
Currently the backend does support VLAN rewriting, and I am not sure if/how it
is supported.
Configuration:
To setup a VLAN connection:
configure
interface vlan $vlan_id
name $name
description $description
no shut
tagged $source_port
tagged $dest_port
end
Teardown:
configure
no interface vlan $vlan_id
end
Ensure that the interfaces are configure to be layer 2.
Ralph developed a backend for etherscale, where a lot of the input from this
backend comes from.
Authors: Henrik Thostrup Jensen <htj@nordu.net>
Ralph Koning <R.Koning@uva.nl>
Copyright: NORDUnet (2011-2013)
"""
import string
import random
import os
from twisted.python import log
from twisted.internet import defer
from twisted.conch.ssh import session
from opennsa import constants as cnt, config
from opennsa.backends.common import ssh, genericbackend
LOG_SYSTEM = 'Force10'
COMMAND_ENABLE = 'enable'
COMMAND_CONFIGURE = 'configure'
COMMAND_END = 'end'
COMMAND_EXIT = 'exit'
COMMAND_WRITE = 'write' # writes config
COMMAND_INTERFACE_VLAN = 'interface vlan %(vlan)i'
COMMAND_NAME = 'name %(name)s'
COMMAND_NO_SHUTDOWN = 'no shutdown'
COMMAND_TAGGED = 'tagged %(interface)s'
COMMAND_NO_INTERFACE = 'no interface vlan %(vlan)i'
def _portToInterfaceVLAN(nrm_port):
interface, vlan = nrm_port.rsplit('.')
vlan = int(vlan)
return interface, vlan
def _createSetupCommands(source_nrm_port, dest_nrm_port):
s_interface, s_vlan = _portToInterfaceVLAN(source_nrm_port)
d_interface, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
name = 'opennsa-%i' % s_vlan
cmd_vlan = COMMAND_INTERFACE_VLAN % { 'vlan' : s_vlan }
cmd_name = COMMAND_NAME % { 'name' : name }
cmd_s_intf = COMMAND_TAGGED % { 'interface' : s_interface }
cmd_d_intf = COMMAND_TAGGED % { 'interface' : d_interface }
commands = [ cmd_vlan, cmd_name, cmd_s_intf, cmd_d_intf, COMMAND_NO_SHUTDOWN, COMMAND_END ]
return commands
def _createTeardownCommands(source_nrm_port, dest_nrm_port):
_, s_vlan = _portToInterfaceVLAN(source_nrm_port)
_, d_vlan = _portToInterfaceVLAN(dest_nrm_port)
assert s_vlan == d_vlan, 'Source and destination VLANs differ, unpossible!'
cmd_no_intf = COMMAND_NO_INTERFACE % { 'vlan' : s_vlan }
commands = [ cmd_no_intf, COMMAND_END ]
return commands
class SSHChannel(ssh.SSHChannel):
name = 'session'
def __init__(self, conn):
ssh.SSHChannel.__init__(self, conn=conn)
self.data = ''
self.wait_defer = None
self.wait_data = None
@defer.inlineCallbacks
def sendCommands(self, commands, enable_password):
LT = '\r' # line termination
try:
log.msg('Requesting shell for sending commands', debug=True, system=LOG_SYSTEM)
term = os.environ.get('TERM', 'xterm')
winSize = (25,80,0,0)
ptyReqData = session.packRequest_pty_req(term, winSize, '')
yield self.conn.sendRequest(self, 'pty-req', ptyReqData, wantReply=1)
yield self.conn.sendRequest(self, 'shell', '', wantReply=1)
log.msg('Got shell', system=LOG_SYSTEM, debug=True)
d = self.waitForData('>')
yield d
log.msg('Got shell ready', system=LOG_SYSTEM, debug=True)
# so far so good
d = self.waitForData(':')
self.write(COMMAND_ENABLE + LT) # This one fails for some reason
yield d
log.msg('Got enable password prompt', system=LOG_SYSTEM, debug=True)
d = self.waitForData('#')
self.write(enable_password + LT)
yield d
log.msg('Entered enabled mode', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_CONFIGURE + LT) # This one fails for some reason
yield d
log.msg('Entered configure mode', debug=True, system=LOG_SYSTEM)
for cmd in commands:
log.msg('CMD> %s' % cmd, debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(cmd + LT)
yield d
# Superfluous COMMAND_END has been removed by hopet
log.msg('Configuration done, writing configuration.', debug=True, system=LOG_SYSTEM)
d = self.waitForData('#')
self.write(COMMAND_WRITE + LT)
yield d
log.msg('Configuration written. Exiting.', debug=True, system=LOG_SYSTEM)
self.write(COMMAND_EXIT + LT)
# Waiting for the prompt removed by hopet - we could wait forever here! :(
except Exception as e:
log.msg('Error sending commands: %s' % str(e))
raise e
log.msg('Commands successfully send', system=LOG_SYSTEM)
self.sendEOF()
self.closeIt()
def waitForData(self, data):
self.wait_data = data
self.wait_defer = defer.Deferred()
return self.wait_defer
def dataReceived(self, data):
log.msg("DATA:" + data, system=LOG_SYSTEM, debug=True)
if len(data) == 0:
pass
else:
self.data += data
if self.wait_data and self.wait_data in self.data:
d = self.wait_defer
self.data = ''
self.wait_data = None
self.wait_defer = None
d.callback(self)
class Force10CommandSender:
def __init__(self, ssh_connection_creator, enable_password):
self.ssh_connection_creator = ssh_connection_creator
self.enable_password = enable_password
@defer.inlineCallbacks
def sendCommands(self, commands):
# Note: FTOS does not allow multiple channels in an SSH connection,
# so we open a connection for each request. Party like it is 1988.
# The "correct" solution for this would be to create a connection pool,
# but that won't happen just now.
log.msg('Creating new SSH connection', debug=True, system=LOG_SYSTEM)
ssh_connection = yield self.ssh_connection_creator.getSSHConnection()
try:
channel = SSHChannel(conn=ssh_connection)
ssh_connection.openChannel(channel)
log.msg("Opening channel", system=LOG_SYSTEM, debug=True)
yield channel.channel_open
log.msg("Channel open, sending commands", system=LOG_SYSTEM, debug=True)
yield channel.sendCommands(commands, self.enable_password)
finally:
ssh_connection.transport.loseConnection()
class Force10ConnectionManager:
def __init__(self, log_system, port_map, cfg):
self.log_system = log_system
self.port_map = port_map
host = cfg[config.FORCE10_HOST]
port = cfg.get(config.FORCE10_PORT, 22)
host_fingerprint = cfg[config.FORCE10_HOST_FINGERPRINT]
user = cfg[config.FORCE10_USER]
if config.FORCE10_PASSWORD in cfg:
password = cfg[config.FORCE10_PASSWORD]
ssh_connection_creator = ssh.SSHConnectionCreator(host, port, [ host_fingerprint ], user, password=password)
else:
ssh_public_key = cfg[config.FORCE10_SSH_PUBLIC_KEY]
ssh_private_key = cfg[config.FORCE10_SSH_PRIVATE_KEY]
ssh_connection_creator = ssh.SSHConnectionCreator(host, port, [ host_fingerprint ], user, ssh_public_key, ssh_private_key)
# this will blow up when used with ssh keys
self.command_sender = Force10CommandSender(ssh_connection_creator, enable_password=password)
def getResource(self, port, label):
assert label is not None and label.type == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return str(label.labelValue)
def getTarget(self, port, label):
assert label is not None and label.type == cnt.ETHERNET_VLAN, 'Label type must be ethernet-vlan'
return self.port_map[port] + '.' + label.labelValue()
def createConnectionId(self, source_target, dest_target):
return 'F10-' + ''.join( [ random.choice(string.hexdigits[:16]) for _ in range(10) ] )
def canSwapLabel(self, label_type):
return False
def setupLink(self, connection_id, source_target, dest_target, bandwidth):
def linkUp(pt):
log.msg('Link %s -> %s up' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createSetupCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkUp)
return d
def teardownLink(self, connection_id, source_target, dest_target, bandwidth):
def linkDown(pt):
log.msg('Link %s -> %s down' % (source_target, dest_target), system=self.log_system)
return pt
commands = _createTeardownCommands(source_target, dest_target)
d = self.command_sender.sendCommands(commands)
d.addCallback(linkDown)
return d
def Force10Backend(network_name, network_topology, parent_requester, port_map, configuration):
name = 'Force10 %s' % network_name
cm = Force10ConnectionManager(name, port_map, configuration)
return genericbackend.GenericBackend(network_name, network_topology, cm, parent_requester, name)
| {
"content_hash": "a55bc70550b4474c85ca17063c36deb9",
"timestamp": "",
"source": "github",
"line_count": 321,
"max_line_length": 134,
"avg_line_length": 31.660436137071652,
"alnum_prop": 0.6367214405195316,
"repo_name": "NORDUnet/opennsa",
"id": "917706b8e84b8309840848e450ced5580b146c28",
"size": "10163",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "opennsa/backends/force10.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Dockerfile",
"bytes": "1165"
},
{
"name": "HTML",
"bytes": "1480"
},
{
"name": "Makefile",
"bytes": "244"
},
{
"name": "Python",
"bytes": "746817"
},
{
"name": "SCSS",
"bytes": "63"
},
{
"name": "Shell",
"bytes": "4132"
}
],
"symlink_target": ""
} |
import unittest
import numpy
import chainer
from chainer import cuda
from chainer import gradient_check
from chainer import links
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainer.utils import array
def _check_forward(e1, e2, f, y_expect):
e1 = chainer.Variable(e1)
e2 = chainer.Variable(e2)
y = f(e1, e2)
gradient_check.assert_allclose(y_expect, y.data)
def _check_backward(e1, e2, y_grad, link, bias):
params = [link.W]
if bias:
params.append(link.b)
gradient_check.check_backward(
link, (e1, e2), y_grad, params, eps=1e-2, rtol=1e-3)
def _batch_to_gpu(*xs):
return tuple(cuda.to_gpu(x) for x in xs)
def _uniform(*shape):
return numpy.random.uniform(-1, 1, shape).astype(numpy.float32)
class TestBilinear(unittest.TestCase):
in_shape = (3, 4)
out_size = 4
batch_size = 10
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size)
self.f.W.data[...] = _uniform(*self.f.W.data.shape)
self.f.V1.data[...] = _uniform(*self.f.V1.data.shape)
self.f.V2.data[...] = _uniform(*self.f.V2.data.shape)
self.f.b.data[...] = _uniform(*self.f.b.data.shape)
self.f.zerograds()
self.W = self.f.W.data.copy()
self.V1 = self.f.V1.data.copy()
self.V2 = self.f.V2.data.copy()
self.b = self.f.b.data.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = (
numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W) +
self.e1.dot(self.V1) + self.e2.dot(self.V2) + self.b)
@condition.retry(3)
def test_forward_cpu(self):
_check_forward(self.e1, self.e2, self.f, self.y)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.f.to_gpu()
_check_forward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
self.f, self.y)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, True)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1),
cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy),
self.f, True)
class TestBilinear2(TestBilinear):
def setUp(self):
super(TestBilinear2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1] // 2, 2)
self.gy = _uniform(self.batch_size, self.out_size)
e1 = array.as_mat(self.e1)
e2 = array.as_mat(self.e2)
self.y = (
numpy.einsum('ij,ik,jkl->il', e1, e2, self.W) +
e1.dot(self.V1) + e2.dot(self.V2) + self.b)
class TestBilinear3(TestBilinear):
out_size = 1
class TestBilinear4(TestBilinear):
in_shape = (1, 2)
class TestBilinear5(TestBilinear):
in_shape = (2, 1)
class TestBilinear6(TestBilinear):
in_shape = (1, 1)
class TestBilinear7(TestBilinear):
in_shape = (1, 2)
out_size = 1
class TestBilinear8(TestBilinear):
in_shape = (2, 1)
out_size = 1
class TestBilinear9(TestBilinear):
in_shape = (1, 1)
out_size = 1
class TestBilinearWOBias(TestBilinear):
def setUp(self):
self.f = links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, True)
W = self.f.W.data
W[...] = numpy.random.uniform(-1, 1, W.shape)
self.f.zerograds()
self.W = W.copy()
self.e1 = _uniform(self.batch_size, self.in_shape[0])
self.e2 = _uniform(self.batch_size, self.in_shape[1])
self.gy = _uniform(self.batch_size, self.out_size)
self.y = numpy.einsum('ij,ik,jkl->il', self.e1, self.e2, self.W)
@condition.retry(3)
def test_backward_cpu(self):
_check_backward(self.e1, self.e2, self.gy, self.f, False)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.f.to_gpu()
_check_backward(cuda.to_gpu(self.e1), cuda.to_gpu(self.e2),
cuda.to_gpu(self.gy), self.f, False)
class TestBilinearWOBias2(TestBilinearWOBias):
def setUp(self):
super(TestBilinearWOBias2, self).setUp()
assert self.in_shape[1] % 2 == 0
self.e1 = _uniform(self.batch_size, 1, self.in_shape[0])
self.e2 = _uniform(self.batch_size, 2, self.in_shape[1] // 2)
self.gy = _uniform(self.batch_size, self.out_size)
e1 = array.as_mat(self.e1)
e2 = array.as_mat(self.e2)
self.y = numpy.einsum('ij,ik,jkl->il', e1, e2, self.W)
class TestBilinearWOBias3(TestBilinearWOBias):
out_size = 1
class TestBilinearWOBias4(TestBilinearWOBias):
in_shape = (1, 2)
class TestBilinearWOBias5(TestBilinearWOBias):
in_shape = (2, 1)
class TestBilinearWOBias6(TestBilinearWOBias):
in_shape = (1, 1)
class TestBilinearWOBias7(TestBilinearWOBias):
in_shape = (1, 2)
out_size = 1
class TestBilinearWOBias8(TestBilinearWOBias):
in_shape = (2, 1)
out_size = 1
class TestBilinearWOBias9(TestBilinearWOBias):
in_shape = (1, 1)
out_size = 1
class InitByInitialParameter(unittest.TestCase):
in_shape = (2, 3)
out_size = 4
batch_size = 10
def setUp(self):
self.W = _uniform(self.in_shape[0], self.in_shape[1], self.out_size)
self.V1 = _uniform(self.in_shape[0], self.out_size)
self.V2 = _uniform(self.in_shape[1], self.out_size)
self.b = _uniform(self.out_size,)
class NormalInitialParameter(InitByInitialParameter):
def check_normal(self, initialW, initial_bias, nobias):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_normal_cpu_bias(self):
self.check_normal(self.W, (self.V1, self.V2, self.b), False)
def test_normal_cpu_nobias(self):
self.check_normal(self.W, None, False)
class InvalidInitialParameter(InitByInitialParameter):
def setUp(self):
super(InvalidInitialParameter, self).setUp()
self.invalidW = _uniform(self.in_shape[0] + 1, self.in_shape[1],
self.out_size)
self.invalidV1 = _uniform(self.in_shape[0] + 1, self.out_size)
self.invalidV2 = _uniform(self.in_shape[1] + 1, self.out_size)
self.invalidb = _uniform(self.out_size + 1,)
def check_invalid(self, initialW, initial_bias, nobias):
with self.assertRaises(AssertionError):
links.Bilinear(
self.in_shape[0], self.in_shape[1], self.out_size, nobias,
initialW, initial_bias)
def test_invalidW_cpu(self):
self.check_invalid(self.invalidW, (self.V1, self.V2, self.b), False)
self.check_invalid(self.invalidW, None, True)
def test_invalidV1_cpu(self):
self.check_invalid(self.W, (self.invalidV1, self.V2, self.b), False)
def test_invalidV2_cpu(self):
self.check_invalid(self.W, (self.V1, self.invalidV2, self.b), False)
def test_invalidb_cpu(self):
self.check_invalid(self.W, (self.V1, self.V2, self.invalidb), False)
testing.run_module(__name__, __file__)
| {
"content_hash": "b8bfdd5942e435167bd593d5e285dd50",
"timestamp": "",
"source": "github",
"line_count": 289,
"max_line_length": 76,
"avg_line_length": 26.110726643598618,
"alnum_prop": 0.6040286244367877,
"repo_name": "cemoody/chainer",
"id": "1de470c1d659bc0824be717fa631c3942fe7d0ba",
"size": "7546",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/chainer_tests/links_tests/connection_tests/test_bilinear.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "561717"
}
],
"symlink_target": ""
} |
from typing import List, Iterable, Type
import gzip
import numpy as np
def get_string_vector_reader(dtype: Type = np.float32, columns: int = None):
"""Get a reader for vectors encoded as whitespace-separated numbers."""
def process_line(line: str, lineno: int, path: str) -> np.ndarray:
numbers = line.strip().split()
if columns is not None and len(numbers) != columns:
raise ValueError("Wrong number of columns ({}) on line {}, file {}"
.format(len(numbers), lineno, path))
return np.array(numbers, dtype=dtype)
def reader(files: List[str])-> Iterable[List[np.ndarray]]:
for path in files:
current_line = 0
if path.endswith(".gz"):
with gzip.open(path, "r") as f_data:
for line in f_data:
current_line += 1
if line.strip():
yield process_line(str(line), current_line, path)
else:
with open(path) as f_data:
for line in f_data:
current_line += 1
if line.strip():
yield process_line(line, current_line, path)
return reader
# pylint: disable=invalid-name
FloatVectorReader = get_string_vector_reader(np.float32)
IntVectorReader = get_string_vector_reader(np.int32)
# pylint: enable=invalid-name
| {
"content_hash": "4fa7037a442486376d20a2f416975d03",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 79,
"avg_line_length": 37.17948717948718,
"alnum_prop": 0.5524137931034483,
"repo_name": "juliakreutzer/bandit-neuralmonkey",
"id": "d6545b2a3317e896a5bafc7e555baf7f910a8f0c",
"size": "1450",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "neuralmonkey/readers/string_vector_reader.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "13780"
},
{
"name": "HTML",
"bytes": "3116"
},
{
"name": "JavaScript",
"bytes": "2070"
},
{
"name": "Makefile",
"bytes": "2564"
},
{
"name": "Mathematica",
"bytes": "1874"
},
{
"name": "Perl",
"bytes": "45129"
},
{
"name": "Python",
"bytes": "723799"
},
{
"name": "Shell",
"bytes": "4358"
}
],
"symlink_target": ""
} |
'''
SMTP/Email publishers
@author: Michael Eddington
@version: $Id$
'''
#
# Copyright (c) 2009 Michael Eddington
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
# Authors:
# Michael Eddington (mike@phed.org)
# $Id$
import string,time,sys,os,smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
from Peach.publisher import Publisher
# Imports to make py2exe happy
import email.iterators
import email.generator
#
class EmailAttachment(Publisher):
'''
Send fuzzed data as email attachment.
'''
def __init__(self, server, fileName, msgTo, msgFrom = "peach@peach.org",
msgSubject = "Fuzzing Test",
msgText = "Message generated by Peach Fuzzing Platform.\n\nhttp://peachfuzzer.com\n\n - Peach\n"):
Publisher.__init__(self)
self.server = server
self.fileName = fileName
self.msgFrom = msgFrom
self.msgTo = msgTo
self.msgSubject = msgSubject
self.msgText = msgText
def send(self, data):
'''
Publish some data
@type data: string
@param data: Data to publish
'''
# Build Message Body
msg = MIMEMultipart()
msg['From'] = self.msgFrom
msg['To'] = self.msgTo
msg['Date'] = formatdate(localtime=True)
msg['Subject'] = self.msgSubject
msg.attach(MIMEText(self.msgText))
# Attach file
part = MIMEBase('application', 'pdf')
part.set_payload(data)
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % self.fileName)
msg.attach(part)
# Send email
smtp = smtplib.SMTP(self.server)
smtp.sendmail(self.msgFrom, self.msgTo, msg.as_string())
smtp.close()
def connect(self):
'''
Called to connect or open a connection/file.
'''
pass
def close(self):
'''
Close current stream/connection.
'''
pass
class _OleStorage(object):
'''
This class wraps OLE Storage APIs
'''
pass
# end
| {
"content_hash": "e30293d2601d925e50b49c4cc97b7006",
"timestamp": "",
"source": "github",
"line_count": 116,
"max_line_length": 103,
"avg_line_length": 25.92241379310345,
"alnum_prop": 0.7213169271699368,
"repo_name": "thecrackofdawn/Peach2.3",
"id": "8db5ee5c238c8243a02f8dd71333fe5fcdc75d8a",
"size": "3008",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Peach/Publishers/smtp.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "29972"
},
{
"name": "C++",
"bytes": "21544"
},
{
"name": "CSS",
"bytes": "18213"
},
{
"name": "JavaScript",
"bytes": "10581"
},
{
"name": "Objective-C",
"bytes": "403"
},
{
"name": "Python",
"bytes": "25902756"
},
{
"name": "Shell",
"bytes": "898"
},
{
"name": "XSLT",
"bytes": "18658"
}
],
"symlink_target": ""
} |
import csv
import datetime
from collections import defaultdict
from dateutil import parser
from copy import deepcopy
import sys, getopt
from collections import OrderedDict, Callable
import argparse
#http://stackoverflow.com/questions/6190331/can-i-do-an-ordered-default-dict-in-python
class DefaultOrderedDict(OrderedDict):
def __init__(self, default_factory=None, *a, **kw):
if (default_factory is not None and
not isinstance(default_factory, Callable)):
raise TypeError('first argument must be callable')
OrderedDict.__init__(self, *a, **kw)
self.default_factory = default_factory
def __getitem__(self, key):
try:
return OrderedDict.__getitem__(self, key)
except KeyError:
return self.__missing__(key)
def __missing__(self, key):
if self.default_factory is None:
raise KeyError(key)
self[key] = value = self.default_factory()
return value
def __reduce__(self):
if self.default_factory is None:
args = tuple()
else:
args = self.default_factory,
return type(self), args, None, None, self.items()
def copy(self):
return self.__copy__()
def __copy__(self):
return type(self)(self.default_factory, self)
def __deepcopy__(self, memo):
import copy
return type(self)(self.default_factory,
copy.deepcopy(self.items()))
def __repr__(self):
return 'OrderedDefaultDict(%s, %s)' % (self.default_factory,
OrderedDict.__repr__(self))
def build_cohorts(start_date, cohort_size, oldest_subscription_date):
cohorts = DefaultOrderedDict(dict)
while start_date > oldest_subscription_date:
cohort_end_date = start_date
cohort_start_date = start_date - datetime.timedelta(cohort_size)
start_date = cohort_start_date
cohort_name = "%s - %s" % (cohort_start_date.strftime("%D"),
(cohort_end_date - datetime.timedelta(1)).strftime("%D"))
cohorts[cohort_name] = {}
cohorts[cohort_name]["cohort_end_date"] = cohort_end_date
cohorts[cohort_name]["cohort_start_date"] = cohort_start_date
cohorts[cohort_name]["subscriptions"] = []
return cohorts
def get_retention_by_cohort(csv_path, plan_code=None, cohort_size = 7,
retention_period_size = 7, start_date = datetime.datetime.now()):
# Assume file in same directory, fall back to full path
try:
data_file = open(os.getcwd() + "/" + csv_path)
except:
data_file = open(csv_path)
data_reader = csv.reader(data_file)
data = []
start_date = start_date.replace(hour=0, minute=0, second=0,microsecond=0)
oldest_subscription_date = start_date
for row in data_reader:
try:
if not plan_code or plan_code in row[3]:
subscription_activation_date = parser.parse(row[17]).replace(
tzinfo=None)
if subscription_activation_date < oldest_subscription_date:
oldest_subscription_date = subscription_activation_date
data.append(row)
except ValueError:
# Will choke on the first row (column headers)
pass
cohorts = build_cohorts(start_date, cohort_size, oldest_subscription_date)
for row in data:
if not plan_code or plan_code in row[3]:
subscription = {}
subscription["billing_account_code"] = row[1]
subscription["state"] = row[4]
subscription["unit_amount"] = row[8]
subscription["activated_at"] = parser.parse(row[17]).replace(
tzinfo=None)
subscription["canceled_at"] = parser.parse(row[19]).replace(
tzinfo=None)
for cohort in cohorts:
if subscription["activated_at"] >= cohorts[cohort]["cohort_start_date"]:
cohorts[cohort]["subscriptions"].append(subscription)
break
out_path = "%s_churn_by_cohort_%s.csv" % (csv_path.split(".csv")[0],
datetime.datetime.now().strftime("%m_%d_%y_%s"))
out_file = open(out_path, 'wb')
writer = csv.writer(out_file, dialect = 'excel')
periods = ["Period %d" % (x + 1) for x in xrange(
(start_date - oldest_subscription_date).days / retention_period_size)
]
columns = ["Cohort", "Total Subscriptions", "Total Churned", "Percent Retention", "Avg Subscription Length"]
columns.extend(deepcopy(periods))
writer.writerow(columns)
for cohort in cohorts:
cohorts[cohort]["churn_by_period"] = OrderedDict()
total_subscriptions = len(cohorts[cohort]["subscriptions"])
total_churned = 0
total_subscription_length = 0
period_start = cohorts[cohort]["cohort_start_date"]
for period in periods:
period_end = period_start + datetime.timedelta(retention_period_size)
if start_date > period_start:
cohorts[cohort]["churn_by_period"][period] = 0
period_start = period_end
else:
break
for sub in cohorts[cohort]["subscriptions"]:
if sub["state"] == "expired" or sub["state"] == "canceled":
total_churned += 1
subscription_length = sub["canceled_at"] - sub["activated_at"]
total_subscription_length += subscription_length.days
days_back = retention_period_size
period_start = cohorts[cohort]["cohort_start_date"]
for period in cohorts[cohort]["churn_by_period"]:
period_end = period_start + datetime.timedelta(retention_period_size)
if period_end <= start_date:
if sub["canceled_at"] <= period_end:
cohorts[cohort]["churn_by_period"][period] += 1
period_start = period_end
else:
total_subscription_length += (start_date -
sub["activated_at"]).days
if total_subscriptions > 0:
avg_subscription_len = float(total_subscription_length)/float(total_subscriptions)
percent_retention = float(total_subscriptions - total_churned)/float(total_subscriptions)
else:
avg_subscription_len = 0
percent_retention = 0
value_list = [cohort, total_subscriptions, total_churned, percent_retention, avg_subscription_len]
for period in cohorts[cohort]["churn_by_period"]:
if total_subscriptions > 0:
percent_retention = float(
total_subscriptions - cohorts[cohort]["churn_by_period"][period])/float(total_subscriptions)
else:
percent_rention = 0
value_list.append(percent_retention)
writer.writerow(value_list)
print "Your report has been generated: %s" % out_path
if __name__ == "__main__":
argparser = argparse.ArgumentParser(description='Calculates retention by cohort and outputs the result to a CSV file')
argparser.add_argument("-f", "--file", dest="file",
help="Filename of subscriptions csv from exported from recurly")
argparser.add_argument("-p", "--plan", dest="plan",
help="Filter down to a specific subscription plan")
argparser.add_argument("-c", "--cohort_size", dest="cohort_size",
help="Define size of cohorts")
argparser.add_argument("-r", "--retention_period_size", dest="retention_period_size",
help="Define size of retention periods")
argparser.add_argument("-s", "--start", dest="start_date",
help="Define the start date of the period ")
args = argparser.parse_args()
arg_dict = {}
if args.file:
arg_dict["csv_path"] = args.file
if args.cohort_size:
arg_dict["cohort_size"] = int(args.cohort_size)
if args.retention_period_size:
arg_dict["retention_period_size"] = int(args.retention_period_size)
if args.plan:
arg_dict["plan_code"] = args.plan
if args.start_date:
arg_dict["start_date"] = parser.parse(args.start_date).replace(
tzinfo=None)
get_retention_by_cohort(**arg_dict)
| {
"content_hash": "2419a7fde643f26e23a20d985d8b0872",
"timestamp": "",
"source": "github",
"line_count": 190,
"max_line_length": 122,
"avg_line_length": 43.689473684210526,
"alnum_prop": 0.5989639802433442,
"repo_name": "MattTheRed/recurly_retention_reports",
"id": "1851eed1e12d3f2fa2e3d4c2f8f23655f6b27624",
"size": "8301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "churn_by_cohort.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8301"
}
],
"symlink_target": ""
} |
"""
Implement and register our stream filters
"""
import base64
import codecs
import io
import zlib
from .stream_filter import StreamFilter
# The best are the ones that are already done for us
def a85decode(data, **kwargs):
return base64.a85decode(data)
def a85encode(data, **kwargs):
return base64.a85decode(data)
def flate_decode(data, **kwargs):
return zlib.decompress(data)
def flate_encode(data, **kwargs):
return zlib.compress(data)
StreamFilter.register('ASCII85Decode', a85decode, b'~>', a85encode)
StreamFilter.register('FlateDecode', flate_decode, None, flate_encode)
def hex_decode(data):
return codecs.decode(data, 'hex')
def hex_encode(data):
return codecs.encode(data, 'hex')
StreamFilter.register('ASCIIHexDecode', hex_decode, b'>', hex_encode)
def lzw_decode(data, **kwargs):
"""Based on code from http://rosettacode.org/wiki/LZW_compression"""
# Build the dictionary.
dict_size = 256
dictionary = {bytes((i,)): bytes((i,)) for i in range(dict_size)}
# use StringIO, otherwise this becomes O(N^2)
# due to string concatenation in a loop
result = io.BytesIO()
compressed = bytearray(data)
w = compressed.pop(0)
result.write(w)
for k in compressed:
if k in dictionary:
entry = dictionary[k]
elif k == dict_size:
entry = w + w[0]
else:
raise ValueError('Bad compressed k: {}'.format(k))
result.write(entry)
dictionary[dict_size] = w + entry[0]
dict_size += 1
w = entry
return result.getvalue()
StreamFilter.register('LZWDecode', lzw_decode)
| {
"content_hash": "b0e48fa978f1aa87d0f293421764fff8",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 72,
"avg_line_length": 28.54385964912281,
"alnum_prop": 0.6631837738168408,
"repo_name": "ajmarks/gymnast",
"id": "a02bf40a2ffa44dc96ff4573aaa767afc3408010",
"size": "1627",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gymnast/filters/filters.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "937"
},
{
"name": "Python",
"bytes": "127195"
}
],
"symlink_target": ""
} |
import os
from functools import partial
from pathlib import Path
from typing import Tuple, Union
from torchdata.datapipes.iter import FileOpener, IterableWrapper
from torchtext._download_hooks import HttpReader
from torchtext._internal.module_utils import is_module_available
from torchtext.data.datasets_utils import _create_dataset_directory
from torchtext.data.datasets_utils import _wrap_split_argument
URL = "http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"
MD5 = "7c2ac02c03563afcf9b574c7e56c153a"
NUM_LINES = {
"train": 25000,
"test": 25000,
}
MAP_LABELS = {"neg": 1, "pos": 2}
_PATH = "aclImdb_v1.tar.gz"
DATASET_NAME = "IMDB"
def _filepath_fn(root, _=None):
return os.path.join(root, _PATH)
def _decompressed_filepath_fn(root, decompressed_folder, split, labels, _=None):
return os.path.join(root, decompressed_folder, split)
def _filter_fn(filter_imdb_data, split, t):
return filter_imdb_data(split, t[0])
def _path_map_fn(t):
return Path(t[0]).parts[-2], t[1]
def _encode_map_fn(x):
return x[0], x[1].encode()
def _cache_filepath_fn(root, decompressed_folder, split, x):
return os.path.join(root, decompressed_folder, split, x)
def _modify_res(t):
return MAP_LABELS[Path(t[0]).parts[-1]], t[1]
def filter_imdb_data(key, fname):
labels = {"neg", "pos"}
# eg. fname = "aclImdb/train/neg/12416_3.txt"
*_, split, label, file = Path(fname).parts
return key == split and label in labels
@_create_dataset_directory(dataset_name=DATASET_NAME)
@_wrap_split_argument(("train", "test"))
def IMDB(root: str, split: Union[Tuple[str], str]):
"""IMDB Dataset
.. warning::
using datapipes is still currently subject to a few caveats. if you wish
to use this dataset with shuffling, multi-processing, or distributed
learning, please see :ref:`this note <datapipes_warnings>` for further
instructions.
For additional details refer to http://ai.stanford.edu/~amaas/data/sentiment/
Number of lines per split:
- train: 25000
- test: 25000
Args:
root: Directory where the datasets are saved. Default: os.path.expanduser('~/.torchtext/cache')
split: split or splits to be returned. Can be a string or tuple of strings. Default: (`train`, `test`)
:returns: DataPipe that yields tuple of label (1 to 2) and text containing the movie review
:rtype: (int, str)
"""
if not is_module_available("torchdata"):
raise ModuleNotFoundError(
"Package `torchdata` not found. Please install following instructions at https://github.com/pytorch/data"
)
url_dp = IterableWrapper([URL])
cache_compressed_dp = url_dp.on_disk_cache(
filepath_fn=partial(_filepath_fn, root),
hash_dict={_filepath_fn(root): MD5},
hash_type="md5",
)
cache_compressed_dp = HttpReader(cache_compressed_dp).end_caching(mode="wb", same_filepath_fn=True)
labels = {"neg", "pos"}
decompressed_folder = "aclImdb_v1"
cache_decompressed_dp = cache_compressed_dp.on_disk_cache(
filepath_fn=partial(_decompressed_filepath_fn, root, decompressed_folder, split, labels)
)
cache_decompressed_dp = FileOpener(cache_decompressed_dp, mode="b")
cache_decompressed_dp = cache_decompressed_dp.load_from_tar()
cache_decompressed_dp = cache_decompressed_dp.filter(partial(_filter_fn, filter_imdb_data, split))
# eg. "aclImdb/train/neg/12416_3.txt" -> "neg"
cache_decompressed_dp = cache_decompressed_dp.map(_path_map_fn)
cache_decompressed_dp = cache_decompressed_dp.readlines(decode=True)
cache_decompressed_dp = cache_decompressed_dp.lines_to_paragraphs() # group by label in cache file
cache_decompressed_dp = cache_decompressed_dp.map(_encode_map_fn)
cache_decompressed_dp = cache_decompressed_dp.end_caching(
mode="wb", filepath_fn=partial(_cache_filepath_fn, root, decompressed_folder, split), skip_read=True
)
data_dp = FileOpener(cache_decompressed_dp, encoding="utf-8")
# get label from cache file, eg. "aclImdb_v1/train/neg" -> "neg"
return data_dp.readlines().map(_modify_res).shuffle().set_shuffle(False).sharding_filter()
| {
"content_hash": "045106942e8b8df3f3a7960d4c09194d",
"timestamp": "",
"source": "github",
"line_count": 122,
"max_line_length": 117,
"avg_line_length": 34.5655737704918,
"alnum_prop": 0.690538297367797,
"repo_name": "pytorch/text",
"id": "09fba57b04f1e33fcb635fd5d887cc07519f5d06",
"size": "4217",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "torchtext/datasets/imdb.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "5989"
},
{
"name": "C",
"bytes": "1165"
},
{
"name": "C++",
"bytes": "103773"
},
{
"name": "CMake",
"bytes": "6607"
},
{
"name": "Dockerfile",
"bytes": "1632"
},
{
"name": "Python",
"bytes": "761434"
},
{
"name": "Shell",
"bytes": "19559"
}
],
"symlink_target": ""
} |
import json
import logging
import os
import time
from osc_lib.command import command
from osc_lib import utils
from tackerclient.common import exceptions
from tackerclient.i18n import _
from tackerclient.osc import sdk_utils
from tackerclient.osc import utils as tacker_osc_utils
_attr_map = (
('id', 'ID', tacker_osc_utils.LIST_BOTH),
('vnfInstanceName', 'VNF Instance Name', tacker_osc_utils.LIST_BOTH),
('instantiationState', 'Instantiation State', tacker_osc_utils.LIST_BOTH),
('vnfProvider', 'VNF Provider', tacker_osc_utils.LIST_BOTH),
('vnfSoftwareVersion', 'VNF Software Version', tacker_osc_utils.LIST_BOTH),
('vnfProductName', 'VNF Product Name', tacker_osc_utils.LIST_BOTH),
('vnfdId', 'VNFD ID', tacker_osc_utils.LIST_BOTH)
)
LOG = logging.getLogger(__name__)
_mixed_case_fields = ('vnfInstanceName', 'vnfInstanceDescription', 'vnfdId',
'vnfProvider', 'vnfProductName', 'vnfSoftwareVersion',
'vnfdVersion', 'instantiationState',
'vimConnectionInfo', 'instantiatedVnfInfo',
'vnfConfigurableProperties')
_VNF_INSTANCE = 'vnf_instance'
VNF_INSTANCE_TERMINATION_TIMEOUT = 300
EXTRA_WAITING_TIME = 10
SLEEP_TIME = 1
formatters = {'vimConnectionInfo': tacker_osc_utils.FormatComplexDataColumn,
'instantiatedVnfInfo': tacker_osc_utils.FormatComplexDataColumn,
'_links': tacker_osc_utils.FormatComplexDataColumn}
def _get_columns(vnflcm_obj, action=None):
column_map = {
'id': 'ID',
'vnfInstanceName': 'VNF Instance Name',
'vnfInstanceDescription': 'VNF Instance Description',
'vnfdId': 'VNFD ID',
'vnfProvider': 'VNF Provider',
'vnfProductName': 'VNF Product Name',
'vnfSoftwareVersion': 'VNF Software Version',
'vnfdVersion': 'VNFD Version',
'instantiationState': 'Instantiation State',
'_links': 'Links',
'vnfConfigurableProperties': 'VNF Configurable Properties',
}
if action == 'show':
if vnflcm_obj['instantiationState'] == 'INSTANTIATED':
column_map.update(
{'instantiatedVnfInfo': 'Instantiated Vnf Info'}
)
column_map.update(
{'vimConnectionInfo': 'VIM Connection Info',
'_links': 'Links'}
)
return sdk_utils.get_osc_show_columns_for_sdk_resource(vnflcm_obj,
column_map)
class CreateVnfLcm(command.ShowOne):
_description = _("Create a new VNF Instance")
def get_parser(self, prog_name):
parser = super(CreateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
'vnfd_id',
metavar="<vnfd-id>",
help=_('Identifier that identifies the VNFD which defines the '
'VNF instance to be created.'))
parser.add_argument(
'--name',
metavar="<vnf-instance-name>",
help=_('Name of the VNF instance to be created.'))
parser.add_argument(
'--description',
metavar="<vnf-instance-description>",
help=_('Description of the VNF instance to be created.'))
parser.add_argument(
'--I',
metavar="<param-file>",
help=_("Instantiate VNF subsequently after it's creation. "
"Specify instantiate request parameters in a json file."))
return parser
def args2body(self, parsed_args, file_path=None):
body = {}
if file_path:
return jsonfile2body(file_path)
body['vnfdId'] = parsed_args.vnfd_id
if parsed_args.description:
body['vnfInstanceDescription'] = parsed_args.description
if parsed_args.name:
body['vnfInstanceName'] = parsed_args.name
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
vnf = client.create_vnf_instance(self.args2body(parsed_args))
if parsed_args.I:
# Instantiate VNF instance.
result = client.instantiate_vnf_instance(
vnf['id'],
self.args2body(parsed_args, file_path=parsed_args.I))
if not result:
print((_('VNF Instance %(id)s is created and instantiation'
' request has been accepted.') % {'id': vnf['id']}))
display_columns, columns = _get_columns(vnf)
data = utils.get_item_properties(sdk_utils.DictModel(vnf),
columns, formatters=formatters,
mixed_case_fields=_mixed_case_fields)
return (display_columns, data)
class ShowVnfLcm(command.ShowOne):
_description = _("Display VNF instance details")
def get_parser(self, prog_name):
parser = super(ShowVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to display"))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
obj = client.show_vnf_instance(parsed_args.vnf_instance)
display_columns, columns = _get_columns(obj, action='show')
data = utils.get_item_properties(
sdk_utils.DictModel(obj),
columns, mixed_case_fields=_mixed_case_fields,
formatters=formatters)
return (display_columns, data)
class ListVnfLcm(command.Lister):
_description = _("List VNF Instance")
def get_parser(self, prog_name):
parser = super(ListVnfLcm, self).get_parser(prog_name)
return parser
def take_action(self, parsed_args):
_params = {}
client = self.app.client_manager.tackerclient
vnf_instances = client.list_vnf_instances(**_params)
headers, columns = tacker_osc_utils.get_column_definitions(
_attr_map, long_listing=True)
return (headers,
(utils.get_dict_properties(
s, columns, mixed_case_fields=_mixed_case_fields,
) for s in vnf_instances))
def jsonfile2body(file_path):
if file_path is not None and os.access(file_path, os.R_OK) is False:
msg = _("File %s does not exist or user does not have read "
"privileges to it")
reason = msg % file_path
raise exceptions.InvalidInput(reason=reason)
try:
with open(file_path) as f:
body = json.load(f)
except (IOError, ValueError) as ex:
msg = _("Failed to load parameter file. Error: %s")
reason = msg % ex
raise exceptions.InvalidInput(reason=reason)
if not body:
reason = _('The parameter file is empty')
raise exceptions.InvalidInput(reason=reason)
return body
class InstantiateVnfLcm(command.Command):
_description = _("Instantiate a VNF Instance")
def get_parser(self, prog_name):
parser = super(InstantiateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to instantiate"))
parser.add_argument(
'instantiation_request_file',
metavar="<param-file>",
help=_('Specify instantiate request parameters in a json file.'))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.instantiate_vnf_instance(
parsed_args.vnf_instance, jsonfile2body(
parsed_args.instantiation_request_file))
if not result:
print((_('Instantiate request for VNF Instance %(id)s has been'
' accepted.') % {'id': parsed_args.vnf_instance}))
class HealVnfLcm(command.Command):
_description = _("Heal VNF Instance")
def get_parser(self, prog_name):
parser = super(HealVnfLcm, self).get_parser(prog_name)
usage_message = ('''%(prog)s [-h] [--cause CAUSE]
[--vnfc-instance <vnfc-instance-id> '''
'''[<vnfc-instance-id> ...]]
[--additional-param-file <additional-param-file>]
-- <vnf-instance>''')
parser.usage = usage_message
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to heal"))
parser.add_argument(
'--cause',
help=_('Specify the reason why a healing procedure is required.'))
parser.add_argument(
'--vnfc-instance',
metavar="<vnfc-instance-id>",
nargs="+",
help=_("List of VNFC instances requiring a healing action.")
)
parser.add_argument(
'--additional-param-file',
metavar="<additional-param-file>",
help=_("Additional parameters passed by the NFVO as input "
"to the healing process."))
return parser
def args2body(self, parsed_args):
body = {}
if parsed_args.cause:
body['cause'] = parsed_args.cause
if parsed_args.vnfc_instance:
body['vnfcInstanceId'] = parsed_args.vnfc_instance
if parsed_args.additional_param_file:
body.update(jsonfile2body(parsed_args.additional_param_file))
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.heal_vnf_instance(
parsed_args.vnf_instance, self.args2body(parsed_args))
if not result:
print((_('Heal request for VNF Instance %(id)s has been'
' accepted.') % {'id': parsed_args.vnf_instance}))
class TerminateVnfLcm(command.Command):
_description = _("Terminate a VNF instance")
def get_parser(self, prog_name):
parser = super(TerminateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to terminate"))
parser.add_argument(
"--termination-type",
default='GRACEFUL',
metavar="<termination-type>",
choices=['GRACEFUL', 'FORCEFUL'],
help=_("Termination type can be 'GRACEFUL' or 'FORCEFUL'. "
"Default is 'GRACEFUL'"))
parser.add_argument(
'--graceful-termination-timeout',
metavar="<graceful-termination-timeout>",
type=int,
help=_('This attribute is only applicable in case of graceful '
'termination. It defines the time to wait for the VNF to be'
' taken out of service before shutting down the VNF and '
'releasing the resources. The unit is seconds.'))
parser.add_argument(
'--D',
action='store_true',
default=False,
help=_("Delete VNF Instance subsequently after it's termination"),
)
return parser
def args2body(self, parsed_args):
body = {}
body['terminationType'] = parsed_args.termination_type
if parsed_args.graceful_termination_timeout:
if parsed_args.termination_type == 'FORCEFUL':
exceptions.InvalidInput(reason='--graceful-termination-timeout'
' argument is invalid for "FORCEFUL"'
' termination')
body['gracefulTerminationTimeout'] = parsed_args.\
graceful_termination_timeout
return body
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.terminate_vnf_instance(parsed_args.vnf_instance,
self.args2body(parsed_args))
if not result:
print(_("Terminate request for VNF Instance '%(id)s' has been"
" accepted.") % {'id': parsed_args.vnf_instance})
if parsed_args.D:
print(_("Waiting for vnf instance to be terminated before "
"deleting"))
self._wait_until_vnf_is_terminated(
client, parsed_args.vnf_instance,
graceful_timeout=parsed_args.graceful_termination_timeout)
result = client.delete_vnf_instance(parsed_args.vnf_instance)
if not result:
print(_("VNF Instance '%(id)s' is deleted successfully") %
{'id': parsed_args.vnf_instance})
def _wait_until_vnf_is_terminated(self, client, vnf_instance_id,
graceful_timeout=None):
# wait until vnf instance 'instantiationState' is set to
# 'NOT_INSTANTIATED'
if graceful_timeout:
# If graceful_termination_timeout is provided,
# terminate vnf will start after this timeout period.
# Hence, it should wait for extra time of 10 seconds
# after this graceful_termination_timeout period.
timeout = graceful_timeout + EXTRA_WAITING_TIME
else:
timeout = VNF_INSTANCE_TERMINATION_TIMEOUT
start_time = int(time.time())
while True:
vnf_instance = client.show_vnf_instance(vnf_instance_id)
if vnf_instance['instantiationState'] == 'NOT_INSTANTIATED':
break
if ((int(time.time()) - start_time) > timeout):
msg = _("Couldn't verify vnf instance is terminated within "
"'%(timeout)s' seconds. Unable to delete vnf instance "
"%(id)s")
raise exceptions.CommandError(
message=msg % {'timeout': timeout, 'id': vnf_instance_id})
time.sleep(SLEEP_TIME)
class DeleteVnfLcm(command.Command):
"""Vnf lcm delete
DeleteVnfLcm class supports bulk deletion of vnf instances, and error
handling.
"""
_description = _("Delete VNF Instance(s)")
def get_parser(self, prog_name):
parser = super(DeleteVnfLcm, self).get_parser(prog_name)
parser.add_argument(
'vnf_instances',
metavar="<vnf-instance>",
nargs="+",
help=_("VNF instance ID(s) to delete"))
return parser
def take_action(self, parsed_args):
error_count = 0
client = self.app.client_manager.tackerclient
vnf_instances = parsed_args.vnf_instances
for vnf_instance in vnf_instances:
try:
client.delete_vnf_instance(vnf_instance)
except Exception as e:
error_count += 1
LOG.error(_("Failed to delete vnf instance with "
"ID '%(vnf)s': %(e)s"),
{'vnf': vnf_instance, 'e': e})
total = len(vnf_instances)
if (error_count > 0):
msg = (_("Failed to delete %(error_count)s of %(total)s "
"vnf instances.") % {'error_count': error_count,
'total': total})
raise exceptions.CommandError(message=msg)
else:
if total > 1:
print(_('All specified vnf instances are deleted '
'successfully'))
else:
print(_("Vnf instance '%s' is deleted "
"successfully") % vnf_instances[0])
class UpdateVnfLcm(command.Command):
_description = _("Update VNF Instance")
def get_parser(self, prog_name):
"""Add arguments to parser.
Args:
prog_name ([string]): program name
Returns:
parser([ArgumentParser]): [description]
"""
parser = super(UpdateVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_('VNF instance ID to update.'))
parser.add_argument(
'--I',
metavar="<param-file>",
help=_("Specify update request parameters in a json file."))
return parser
def args2body(self, file_path=None):
"""Call jsonfile2body to store request body to body(dict)
Args:
file_path ([string], optional): file path of param file(json).
Defaults to None.
Returns:
body ([dict]): Request body is stored
"""
body = {}
if file_path:
return jsonfile2body(file_path)
return body
def take_action(self, parsed_args):
"""Execute update_vnf_instance and output result comment
Args:
parsed_args ([Namespace]): [description]
"""
client = self.app.client_manager.tackerclient
if parsed_args.I:
# Update VNF instance.
result = client.update_vnf_instance(
parsed_args.vnf_instance,
self.args2body(file_path=parsed_args.I))
if not result:
print((_('Update vnf:%(id)s ') %
{'id': parsed_args.vnf_instance}))
class ScaleVnfLcm(command.Command):
_description = _("Scale a VNF Instance")
def get_parser(self, prog_name):
parser = super(ScaleVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_('VNF instance ID to scale'))
parser.add_argument(
'--number-of-steps',
metavar="<number-of-steps>",
type=int,
help=_("Number of scaling steps to be executed as part of "
"this Scale VNF operation."))
parser.add_argument(
'--additional-param-file',
metavar="<additional-param-file>",
help=_("Additional parameters passed by the NFVO as input "
"to the scaling process."))
scale_require_parameters = parser.add_argument_group(
"require arguments"
)
scale_require_parameters.add_argument(
'--type',
metavar="<type>",
required=True,
choices=['SCALE_OUT', 'SCALE_IN'],
help=_("SCALE_OUT or SCALE_IN for type of scale operation."))
scale_require_parameters.add_argument(
'--aspect-id',
required=True,
metavar="<aspect-id>",
help=_("Identifier of the scaling aspect."))
return parser
def args2body(self, parsed_args):
"""To store request body, call jsonfile2body.
Args:
parsed_args ([Namespace]): arguments of CLI.
Returns:
body ([dict]): Request body is stored
"""
body = {'type': parsed_args.type, 'aspectId': parsed_args.aspect_id}
if parsed_args.number_of_steps:
body['numberOfSteps'] = parsed_args.number_of_steps
if parsed_args.additional_param_file:
body.update(jsonfile2body(parsed_args.additional_param_file))
return body
def take_action(self, parsed_args):
"""Execute scale_vnf_instance and output result comment.
Args:
parsed_args ([Namespace]): arguments of CLI.
"""
client = self.app.client_manager.tackerclient
result = client.scale_vnf_instance(
parsed_args.vnf_instance,
self.args2body(parsed_args))
if not result:
print((_('Scale request for VNF Instance %s has been accepted.')
% parsed_args.vnf_instance))
class ChangeExtConnVnfLcm(command.Command):
_description = _("Change External VNF Connectivity")
def get_parser(self, prog_name):
parser = super(ChangeExtConnVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to Change External VNF Connectivity"))
parser.add_argument(
'request_file',
metavar="<param-file>",
help=_("Specify change-ext-conn request parameters "
"in a json file."))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.change_ext_conn_vnf_instance(
parsed_args.vnf_instance, jsonfile2body(
parsed_args.request_file))
if not result:
print((_('Change External VNF Connectivity for VNF Instance %s '
'has been accepted.') % parsed_args.vnf_instance))
class ChangeVnfPkgVnfLcm(command.Command):
_description = _("Change Current VNF Package")
def get_parser(self, prog_name):
parser = super(ChangeVnfPkgVnfLcm, self).get_parser(prog_name)
parser.add_argument(
_VNF_INSTANCE,
metavar="<vnf-instance>",
help=_("VNF instance ID to Change Current VNF Package"))
parser.add_argument(
'request_file',
metavar="<param-file>",
help=_("Specify change-vnfpkg request parameters "
"in a json file."))
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.tackerclient
result = client.change_vnfpkg_vnf_instance(
parsed_args.vnf_instance, jsonfile2body(
parsed_args.request_file))
if not result:
print((_('Change Current VNF Package for VNF Instance %s '
'has been accepted.') % parsed_args.vnf_instance))
| {
"content_hash": "1342dc1a08dce753315de43620593db6",
"timestamp": "",
"source": "github",
"line_count": 593,
"max_line_length": 79,
"avg_line_length": 36.90725126475548,
"alnum_prop": 0.5681257424837796,
"repo_name": "openstack/python-tackerclient",
"id": "2517860e22b0eb3f3d25fcd6e3609b1d59cf6141",
"size": "22514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tackerclient/osc/v1/vnflcm/vnflcm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "798637"
},
{
"name": "Shell",
"bytes": "328"
}
],
"symlink_target": ""
} |
import cv2
from drivers.firmata import *
# Vars
stArea = 20
reqMes = 3
# Pins
diPin = 2
moPin = 3
class Application:
def setup(self):
# OpenCV
self.cam = cv2.VideoCapture(0)
self.face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
cv2.namedWindow("track-tv", cv2.WINDOW_AUTOSIZE)
height, width, depth = self.cam.read()[1].shape
self.centerX = width/2
# Lists for measured data
self.mes1,self.mes2 = [],[]
def loop(self):
# Capture image
img = self.cam.read()[1]
# Convert image to gray to improve cascade analysis
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# Fix color
eq = cv2.equalizeHist(gray)
# Detect faces with cascade
faces = self.face_cascade.detectMultiScale(eq, 1.3, 5)
# Calculate average position
avX = 0
avY = 0
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
avX += x+w/2
avY += y+h/2
if len(faces)>0:
avX = avX/len(faces)
avY = avY/len(faces)
if avX>self.centerX+stArea:
self.mes1.append(HIGH)
self.mes2.append(HIGH)
elif avX<self.centerX-stArea:
self.mes1.append(HIGH)
self.mes2.append(LOW)
else:
self.mes1.append(LOW)
else:
self.mes1.append(LOW)
if len(self.mes1) == reqMes:
if self.mes1.count(HIGH) == len(self.mes1):
serialPrintln("Start motor")
#self.board.digital[moPin].write(HIGH)
digitalWrite(moPin,HIGH)
else:
serialPrintln("Stop motor")
#self.board.digital[moPin].write(LOW)
digitalWrite(moPin,LOW)
self.mes1=[]
if len(self.mes2) == reqMes:
if self.mes2.count(HIGH)==len(self.mes2):
serialPrintln("Go right")
#self.board.digital[diPin].write(LOW)
digitalWrite(diPin,LOW)
elif self.mes2.count(LOW)==len(self.mes2):
serialPrintln("Go left")
#self.board.digital[diPin].write(HIGH)
digitalWrite(diPin,HIGH)
self.mes2=[]
cv2.imshow("track-tv", img)
if __name__ == '__main__':
driver = Driver("/dev/ttyACM0")
digitalWrite = driver.digitalWrite
serialPrintln = driver.serialPrintln
app = Application()
app.setup()
while 0>cv2.waitKey(1):
app.loop()
cv2.destroyWindow("track-tv")
driver.exit()
| {
"content_hash": "70e7e1d345cc543bcb488a34c7da3dc9",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 88,
"avg_line_length": 27.90909090909091,
"alnum_prop": 0.5175533840028954,
"repo_name": "prozum/track-tv",
"id": "f49f4202d975c42e8ee0d8c13d22e5e9d37cd0da",
"size": "2781",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "25157"
},
{
"name": "C++",
"bytes": "4762"
},
{
"name": "Meson",
"bytes": "398"
},
{
"name": "Python",
"bytes": "2781"
}
],
"symlink_target": ""
} |
import copy
import uuid
from glanceclient import exc as glance_exceptions
import mock
import mox
from neutronclient.v2_0 import client as neutronclient
import six
from heat.common import exception
from heat.common import template_format
from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance
from heat.engine.clients.os import neutron
from heat.engine.clients.os import nova
from heat.engine import environment
from heat.engine import resource
from heat.engine.resources.aws.ec2 import instance as instances
from heat.engine import scheduler
from heat.engine import stack as parser
from heat.engine import template
from heat.tests import common
from heat.tests.nova import fakes as fakes_nova
from heat.tests import utils
wp_template = '''
{
"AWSTemplateFormatVersion" : "2010-09-09",
"Description" : "WordPress",
"Parameters" : {
"KeyName" : {
"Description" : "KeyName",
"Type" : "String",
"Default" : "test"
}
},
"Resources" : {
"WebServer": {
"Type": "AWS::EC2::Instance",
"Properties": {
"ImageId" : "F17-x86_64-gold",
"InstanceType" : "m1.large",
"KeyName" : "test",
"NovaSchedulerHints" : [{"Key": "foo", "Value": "spam"},
{"Key": "bar", "Value": "eggs"},
{"Key": "foo", "Value": "ham"},
{"Key": "foo", "Value": "baz"}],
"UserData" : "wordpress",
"BlockDeviceMappings": [
{
"DeviceName": "vdb",
"Ebs": {"SnapshotId": "9ef5496e-7426-446a-bbc8-01f84d9c9972",
"DeleteOnTermination": "True"}
}]
}
}
}
}
'''
class InstancesTest(common.HeatTestCase):
def setUp(self):
super(InstancesTest, self).setUp()
self.fc = fakes_nova.FakeClient()
def _setup_test_stack(self, stack_name):
t = template_format.parse(wp_template)
tmpl = template.Template(
t, env=environment.Environment({'KeyName': 'test'}))
stack = parser.Stack(utils.dummy_context(), stack_name, tmpl,
stack_id=str(uuid.uuid4()))
return (tmpl, stack)
def _mock_get_image_id_success(self, imageId_input, imageId):
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(
imageId_input).MultipleTimes().AndReturn(imageId)
def _mock_get_image_id_fail(self, image_id, exp):
self.m.StubOutWithMock(glance.GlanceClientPlugin, 'get_image_id')
glance.GlanceClientPlugin.get_image_id(image_id).AndRaise(exp)
def _get_test_template(self, stack_name, image_id=None):
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties'][
'ImageId'] = image_id or 'CentOS 5.2'
tmpl.t['Resources']['WebServer']['Properties'][
'InstanceType'] = '256 MB Server'
return tmpl, stack
def _setup_test_instance(self, return_server, name, image_id=None,
stub_create=True):
stack_name = '%s_s' % name
tmpl, self.stack = self._get_test_template(stack_name, image_id)
resource_defns = tmpl.resource_definitions(self.stack)
instance = instances.Instance(name, resource_defns['WebServer'],
self.stack)
bdm = {"vdb": "9ef5496e-7426-446a-bbc8-01f84d9c9972:snap::True"}
self._mock_get_image_id_success(image_id or 'CentOS 5.2', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self.stub_SnapshotConstraint_validate()
if stub_create:
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name='test',
name=utils.PhysName(
stack_name,
instance.name,
limit=instance.physical_resource_name_limit),
security_groups=None,
userdata=mox.IgnoreArg(),
scheduler_hints={'foo': ['spam', 'ham', 'baz'], 'bar': 'eggs'},
meta=None, nics=None, availability_zone=None,
block_device_mapping=bdm).AndReturn(
return_server)
return instance
def _create_test_instance(self, return_server, name, stub_create=True):
instance = self._setup_test_instance(return_server, name,
stub_create=stub_create)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
return instance
def test_instance_create(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_create')
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
expected_ip = return_server.networks['public'][0]
expected_az = getattr(return_server, 'OS-EXT-AZ:availability_zone')
self.assertEqual(expected_ip, instance.FnGetAtt('PublicIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PublicDnsName'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.assertEqual(expected_az, instance.FnGetAtt('AvailabilityZone'))
self.m.VerifyAll()
def test_instance_create_with_BlockDeviceMappings(self):
return_server = self.fc.servers.list()[4]
instance = self._create_test_instance(return_server,
'create_with_bdm')
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
expected_ip = return_server.networks['public'][0]
expected_az = getattr(return_server, 'OS-EXT-AZ:availability_zone')
self.assertEqual(expected_ip, instance.FnGetAtt('PublicIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PublicDnsName'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.assertEqual(expected_az, instance.FnGetAtt('AvailabilityZone'))
self.m.VerifyAll()
def test_build_block_device_mapping(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_build_bdm')
self.assertIsNone(instance._build_block_device_mapping([]))
self.assertIsNone(instance._build_block_device_mapping(None))
self.assertEqual({
'vdb': '1234:snap:',
'vdc': '5678:snap::False',
}, instance._build_block_device_mapping([
{'DeviceName': 'vdb', 'Ebs': {'SnapshotId': '1234'}},
{'DeviceName': 'vdc', 'Ebs': {'SnapshotId': '5678',
'DeleteOnTermination': False}},
]))
self.assertEqual({
'vdb': '1234:snap:1',
'vdc': '5678:snap:2:True',
}, instance._build_block_device_mapping([
{'DeviceName': 'vdb', 'Ebs': {'SnapshotId': '1234',
'VolumeSize': '1'}},
{'DeviceName': 'vdc', 'Ebs': {'SnapshotId': '5678',
'VolumeSize': '2',
'DeleteOnTermination': True}},
]))
def test_validate_Volumes_property(self):
stack_name = 'validate_volumes'
tmpl, stack = self._setup_test_stack(stack_name)
volumes = [{'Device': 'vdb', 'VolumeId': '1234'}]
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['Volumes'] = volumes
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('validate_volumes',
resource_defns['WebServer'], stack)
self._mock_get_image_id_success('F17-x86_64-gold', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
self.stub_SnapshotConstraint_validate()
self.m.StubOutWithMock(cinder.CinderClientPlugin, 'get_volume')
ex = exception.EntityNotFound(entity='Volume', name='1234')
cinder.CinderClientPlugin.get_volume('1234').AndRaise(ex)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertIn("WebServer.Properties.Volumes[0].VolumeId: "
"Error validating value '1234': The Volume "
"(1234) could not be found.",
six.text_type(exc))
self.m.VerifyAll()
def test_validate_BlockDeviceMappings_VolumeSize_valid_str(self):
stack_name = 'val_VolumeSize_valid'
tmpl, stack = self._setup_test_stack(stack_name)
bdm = [{'DeviceName': 'vdb',
'Ebs': {'SnapshotId': '1234',
'VolumeSize': '1'}}]
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['BlockDeviceMappings'] = bdm
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('validate_volume_size',
resource_defns['WebServer'], stack)
self._mock_get_image_id_success('F17-x86_64-gold', 1)
self.stub_SnapshotConstraint_validate()
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
self.assertIsNone(instance.validate())
self.m.VerifyAll()
def test_validate_BlockDeviceMappings_without_Ebs_property(self):
stack_name = 'without_Ebs'
tmpl, stack = self._setup_test_stack(stack_name)
bdm = [{'DeviceName': 'vdb'}]
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['BlockDeviceMappings'] = bdm
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('validate_without_Ebs',
resource_defns['WebServer'], stack)
self._mock_get_image_id_success('F17-x86_64-gold', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertIn("Ebs is missing, this is required",
six.text_type(exc))
self.m.VerifyAll()
def test_validate_BlockDeviceMappings_without_SnapshotId_property(self):
stack_name = 'without_SnapshotId'
tmpl, stack = self._setup_test_stack(stack_name)
bdm = [{'DeviceName': 'vdb',
'Ebs': {'VolumeSize': '1'}}]
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['BlockDeviceMappings'] = bdm
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('validate_without_SnapshotId',
resource_defns['WebServer'], stack)
self._mock_get_image_id_success('F17-x86_64-gold', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
self.assertIn("SnapshotId is missing, this is required",
six.text_type(exc))
self.m.VerifyAll()
def test_validate_BlockDeviceMappings_without_DeviceName_property(self):
stack_name = 'without_DeviceName'
tmpl, stack = self._setup_test_stack(stack_name)
bdm = [{'Ebs': {'SnapshotId': '1234',
'VolumeSize': '1'}}]
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['BlockDeviceMappings'] = bdm
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('validate_without_DeviceName',
resource_defns['WebServer'], stack)
self._mock_get_image_id_success('F17-x86_64-gold', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().MultipleTimes().AndReturn(self.fc)
self.m.ReplayAll()
exc = self.assertRaises(exception.StackValidationFailed,
instance.validate)
excepted_error = (
'Property error: '
'Resources.WebServer.Properties.BlockDeviceMappings[0]: '
'Property DeviceName not assigned')
self.assertIn(excepted_error, six.text_type(exc))
self.m.VerifyAll()
def test_instance_create_with_image_id(self):
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server,
'in_create_imgid',
image_id='1')
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
expected_ip = return_server.networks['public'][0]
expected_az = getattr(return_server, 'OS-EXT-AZ:availability_zone')
self.assertEqual(expected_ip, instance.FnGetAtt('PublicIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateIp'))
self.assertEqual(expected_ip, instance.FnGetAtt('PublicDnsName'))
self.assertEqual(expected_ip, instance.FnGetAtt('PrivateDnsName'))
self.assertEqual(expected_az, instance.FnGetAtt('AvailabilityZone'))
self.m.VerifyAll()
def test_instance_create_resolve_az_attribute(self):
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server,
'create_resolve_az_attribute')
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
expected_az = getattr(return_server, 'OS-EXT-AZ:availability_zone')
actual_az = instance._availability_zone()
self.assertEqual(expected_az, actual_az)
self.m.VerifyAll()
def test_instance_create_image_name_err(self):
stack_name = 'test_instance_create_image_name_err_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
# create an instance with non exist image name
tmpl.t['Resources']['WebServer']['Properties']['ImageId'] = 'Slackware'
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image_err',
resource_defns['WebServer'], stack)
self._mock_get_image_id_fail('Slackware',
exception.EntityNotFound(
entity='Image', name='Slackware'))
self.m.ReplayAll()
create = scheduler.TaskRunner(instance.create)
error = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual(
"StackValidationFailed: resources.instance_create_image_err: "
"Property error: "
"WebServer.Properties.ImageId: Error validating value "
"'Slackware': The Image (Slackware) could not be found.",
six.text_type(error))
self.m.VerifyAll()
def test_instance_create_duplicate_image_name_err(self):
stack_name = 'test_instance_create_image_name_err_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
# create an instance with a non unique image name
wsp = tmpl.t['Resources']['WebServer']['Properties']
wsp['ImageId'] = 'CentOS 5.2'
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image_err',
resource_defns['WebServer'], stack)
self._mock_get_image_id_fail('CentOS 5.2',
exception.PhysicalResourceNameAmbiguity(
name='CentOS 5.2'))
self.m.ReplayAll()
create = scheduler.TaskRunner(instance.create)
error = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual(
'StackValidationFailed: resources.instance_create_image_err: '
'Property error: '
'WebServer.Properties.ImageId: Multiple physical '
'resources were found with name (CentOS 5.2).',
six.text_type(error))
self.m.VerifyAll()
def test_instance_create_image_id_err(self):
stack_name = 'test_instance_create_image_id_err_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
# create an instance with non exist image Id
tmpl.t['Resources']['WebServer']['Properties']['ImageId'] = '1'
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image_err',
resource_defns['WebServer'], stack)
self._mock_get_image_id_fail('1', glance_exceptions.NotFound(404))
self.m.ReplayAll()
create = scheduler.TaskRunner(instance.create)
error = self.assertRaises(exception.ResourceFailure, create)
self.assertEqual(
'StackValidationFailed: resources.instance_create_image_err: '
'Property error: WebServer.Properties.ImageId: 404 (HTTP 404)',
six.text_type(error))
self.m.VerifyAll()
def test_handle_check(self):
(tmpl, stack) = self._setup_test_stack('test_instance_check_active')
res_definitions = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image',
res_definitions['WebServer'], stack)
instance.nova = mock.Mock()
self.patchobject(nova.NovaClientPlugin, '_check_active',
return_value=True)
self.assertIsNone(instance.handle_check())
def test_handle_check_raises_exception_if_instance_not_active(self):
(tmpl, stack) = self._setup_test_stack('test_instance_check_inactive')
res_definitions = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image',
res_definitions['WebServer'], stack)
instance.nova = mock.Mock()
instance.nova.return_value.servers.get.return_value.status = 'foo'
self.patchobject(nova.NovaClientPlugin, '_check_active',
return_value=False)
exc = self.assertRaises(exception.Error, instance.handle_check)
self.assertIn('foo', six.text_type(exc))
class FakeVolumeAttach(object):
def started(self):
return False
def test_instance_create_unexpected_status(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_instance_create')
return_server.get = lambda: None
return_server.status = 'BOGUS'
e = self.assertRaises(resource.ResourceUnknownStatus,
instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
self.assertEqual('Instance is not active - Unknown status BOGUS '
'due to "Unknown"', six.text_type(e))
def test_instance_create_error_status(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'test_instance_create')
return_server.status = 'ERROR'
return_server.fault = {
'message': 'NoValidHost',
'code': 500,
'created': '2013-08-14T03:12:10Z'
}
self.m.StubOutWithMock(return_server, 'get')
return_server.get()
self.m.ReplayAll()
e = self.assertRaises(resource.ResourceInError,
instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
self.assertEqual(
'Went to status ERROR due to "Message: NoValidHost, Code: 500"',
six.text_type(e))
self.m.VerifyAll()
def test_instance_create_error_no_fault(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_create')
return_server.status = 'ERROR'
self.m.StubOutWithMock(return_server, 'get')
return_server.get()
self.m.ReplayAll()
e = self.assertRaises(
resource.ResourceInError, instance.check_create_complete,
(return_server, self.FakeVolumeAttach()))
self.assertEqual(
'Went to status ERROR due to "Message: Unknown, Code: Unknown"',
six.text_type(e))
self.m.VerifyAll()
def test_instance_create_with_stack_scheduler_hints(self):
return_server = self.fc.servers.list()[1]
instances.cfg.CONF.set_override('stack_scheduler_hints', True)
# Unroll _create_test_instance, to enable check
# for addition of heat ids (stack id, resource name)
stack_name = 'test_instance_create_with_stack_scheduler_hints'
(t, stack) = self._get_test_template(stack_name)
resource_defns = t.resource_definitions(stack)
instance = instances.Instance('in_create_with_sched_hints',
resource_defns['WebServer'], stack)
bdm = {"vdb": "9ef5496e-7426-446a-bbc8-01f84d9c9972:snap::True"}
self._mock_get_image_id_success('CentOS 5.2', 1)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self.stub_SnapshotConstraint_validate()
self.m.StubOutWithMock(self.fc.servers, 'create')
self.fc.servers.create(
image=1, flavor=1, key_name='test',
name=utils.PhysName(
stack_name,
instance.name,
limit=instance.physical_resource_name_limit),
security_groups=None,
userdata=mox.IgnoreArg(),
scheduler_hints={'heat_root_stack_id': stack.root_stack_id(),
'heat_stack_id': stack.id,
'heat_stack_name': stack.name,
'heat_path_in_stack': [(None, stack.name)],
'heat_resource_name': instance.name,
'foo': ['spam', 'ham', 'baz'], 'bar': 'eggs'},
meta=None, nics=None, availability_zone=None,
block_device_mapping=bdm).AndReturn(
return_server)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertTrue(instance.id > 0)
self.m.VerifyAll()
def test_instance_validate(self):
stack_name = 'test_instance_validate_stack'
(tmpl, stack) = self._setup_test_stack(stack_name)
tmpl.t['Resources']['WebServer']['Properties']['ImageId'] = '1'
resource_defns = tmpl.resource_definitions(stack)
instance = instances.Instance('instance_create_image',
resource_defns['WebServer'], stack)
self.m.StubOutWithMock(nova.NovaClientPlugin, '_create')
nova.NovaClientPlugin._create().AndReturn(self.fc)
self._mock_get_image_id_success('1', 1)
self.stub_SnapshotConstraint_validate()
self.m.ReplayAll()
self.assertIsNone(instance.validate())
self.m.VerifyAll()
def _test_instance_create_delete(self, vm_status='ACTIVE',
vm_delete_status='NotFound'):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_cr_del')
instance.resource_id = '1234'
instance.status = vm_status
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d1['server']['status'] = vm_status
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
d2 = copy.deepcopy(d1)
if vm_delete_status == 'DELETED':
d2['server']['status'] = vm_delete_status
get().AndReturn((200, d2))
else:
get().AndRaise(fakes_nova.fake_exception())
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.assertEqual((instance.DELETE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_create_delete_notfound(self):
self._test_instance_create_delete()
def test_instance_create_delete(self):
self._test_instance_create_delete(vm_delete_status='DELETED')
def test_instance_create_notfound_on_delete(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_cr_del')
instance.resource_id = '1234'
# this makes sure the auto increment worked on instance creation
self.assertTrue(instance.id > 0)
self.m.StubOutWithMock(self.fc.client, 'delete_servers_1234')
self.fc.client.delete_servers_1234().AndRaise(
fakes_nova.fake_exception())
self.m.ReplayAll()
scheduler.TaskRunner(instance.delete)()
self.assertEqual((instance.DELETE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_metadata(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'ud_md')
ud_tmpl = self._get_test_template('update_stack')[0]
ud_tmpl.t['Resources']['WebServer']['Metadata'] = {'test': 123}
resource_defns = ud_tmpl.resource_definitions(instance.stack)
scheduler.TaskRunner(instance.update, resource_defns['WebServer'])()
self.assertEqual({'test': 123}, instance.metadata_get())
def test_instance_update_instance_type(self):
"""
Instance.handle_update supports changing the InstanceType, and makes
the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_type')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def activate_status(server):
server.status = 'VERIFY_RESIZE'
return_server.get = activate_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.fc.client.post_servers_1234_action(
body={'confirmResize': None}).AndReturn((202, None))
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_instance_type_failed(self):
"""
If the status after a resize is not VERIFY_RESIZE, it means the resize
call failed, so we raise an explicit error.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_type_f')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['InstanceType'] = 'm1.small'
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
def fail_status(server):
server.status = 'ERROR'
return_server.get = fail_status.__get__(return_server)
self.m.StubOutWithMock(self.fc.client, 'post_servers_1234_action')
self.fc.client.post_servers_1234_action(
body={'resize': {'flavorRef': 2}}).AndReturn((202, None))
self.m.ReplayAll()
updater = scheduler.TaskRunner(instance.update, update_template)
error = self.assertRaises(exception.ResourceFailure, updater)
self.assertEqual(
"Error: resources.ud_type_f: "
"Resizing to 'm1.small' failed, status 'ERROR'",
six.text_type(error))
self.assertEqual((instance.UPDATE, instance.FAILED), instance.state)
self.m.VerifyAll()
def create_fake_iface(self, port, net, ip):
class fake_interface(object):
def __init__(self, port_id, net_id, fixed_ip):
self.port_id = port_id
self.net_id = net_id
self.fixed_ips = [{'ip_address': fixed_ip}]
return fake_interface(port, net, ip)
def test_instance_update_network_interfaces(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
# if new overlaps with old, detach the different ones in old, and
# attach the different ones in new
old_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'},
{'NetworkInterfaceId': 'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'DeviceIndex': '1'}]
new_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'},
{'NetworkInterfaceId': '34b752ec-14de-416a-8722-9531015e04a5',
'DeviceIndex': '3'}]
instance.t['Properties']['NetworkInterfaces'] = old_interfaces
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('34b752ec-14de-416a-8722-9531015e04a5',
None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_network_interfaces_old_include_new(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
# if old include new, just detach the different ones in old
old_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'},
{'NetworkInterfaceId': 'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'DeviceIndex': '1'}]
new_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'}]
instance.t['Properties']['NetworkInterfaces'] = old_interfaces
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
def test_instance_update_network_interfaces_new_include_old(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
# if new include old, just attach the different ones in new
old_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'}]
new_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'},
{'NetworkInterfaceId': 'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'DeviceIndex': '1'}]
instance.t['Properties']['NetworkInterfaces'] = old_interfaces
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
def test_instance_update_network_interfaces_new_old_all_different(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
# if different, detach the old ones and attach the new ones
old_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'}]
new_interfaces = [
{'NetworkInterfaceId': '34b752ec-14de-416a-8722-9531015e04a5',
'DeviceIndex': '3'},
{'NetworkInterfaceId': 'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'DeviceIndex': '1'}]
instance.t['Properties']['NetworkInterfaces'] = old_interfaces
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'ea29f957-cd35-4364-98fb-57ce9732c10d').AndReturn(None)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
None, None).AndReturn(None)
return_server.interface_attach('34b752ec-14de-416a-8722-9531015e04a5',
None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
def test_instance_update_network_interfaces_no_old(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
new_interfaces = [
{'NetworkInterfaceId': 'ea29f957-cd35-4364-98fb-57ce9732c10d',
'DeviceIndex': '2'},
{'NetworkInterfaceId': '34b752ec-14de-416a-8722-9531015e04a5',
'DeviceIndex': '3'}]
iface = self.create_fake_iface('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'c4485ba1-283a-4f5f-8868-0cd46cdda52f',
'10.0.0.4')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = new_interfaces
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('ea29f957-cd35-4364-98fb-57ce9732c10d',
None, None).AndReturn(None)
return_server.interface_attach('34b752ec-14de-416a-8722-9531015e04a5',
None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_network_interfaces_no_old_no_new(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
iface = self.create_fake_iface('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'c4485ba1-283a-4f5f-8868-0cd46cdda52f',
'10.0.0.4')
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = []
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach(None, None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_network_interfaces_no_old_new_with_subnet(self):
"""
Instance.handle_update supports changing the NetworkInterfaces,
and makes the change making a resize API call against Nova.
"""
return_server = self.fc.servers.list()[1]
return_server.id = '1234'
instance = self._create_test_instance(return_server,
'ud_network_interfaces')
iface = self.create_fake_iface('d1e9c73c-04fe-4e9e-983c-d5ef94cd1a46',
'c4485ba1-283a-4f5f-8868-0cd46cdda52f',
'10.0.0.4')
subnet_id = '8c1aaddf-e49e-4f28-93ea-ca9f0b3c6240'
nics = [{'port-id': 'ea29f957-cd35-4364-98fb-57ce9732c10d'}]
update_template = copy.deepcopy(instance.t)
update_template['Properties']['NetworkInterfaces'] = []
update_template['Properties']['SubnetId'] = subnet_id
self.m.StubOutWithMock(self.fc.servers, 'get')
self.fc.servers.get('1234').AndReturn(return_server)
self.m.StubOutWithMock(return_server, 'interface_list')
return_server.interface_list().AndReturn([iface])
self.m.StubOutWithMock(return_server, 'interface_detach')
return_server.interface_detach(
'd1e9c73c-04fe-4e9e-983c-d5ef94cd1a46').AndReturn(None)
self.m.StubOutWithMock(instance, '_build_nics')
instance._build_nics([], security_groups=None,
subnet_id=subnet_id).AndReturn(nics)
self.m.StubOutWithMock(return_server, 'interface_attach')
return_server.interface_attach('ea29f957-cd35-4364-98fb-57ce9732c10d',
None, None).AndReturn(None)
self.m.ReplayAll()
scheduler.TaskRunner(instance.update, update_template)()
self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_update_properties(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_update2')
self.stub_ImageConstraint_validate()
self.m.ReplayAll()
update_template = copy.deepcopy(instance.t)
update_template['Properties']['ImageId'] = 'mustreplace'
updater = scheduler.TaskRunner(instance.update, update_template)
self.assertRaises(resource.UpdateReplace, updater)
self.m.VerifyAll()
def test_instance_status_build(self):
return_server = self.fc.servers.list()[0]
instance = self._setup_test_instance(return_server,
'in_sts_build')
instance.resource_id = '1234'
# Bind fake get method which Instance.check_create_complete will call
def activate_status(server):
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
def _test_instance_status_suspend(self, name,
state=('CREATE', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server, name)
instance.resource_id = '1234'
instance.state_set(state[0], state[1])
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'ACTIVE'
d2['server']['status'] = 'SUSPENDED'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
scheduler.TaskRunner(instance.suspend)()
self.assertEqual((instance.SUSPEND, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_suspend_in_create_complete(self):
self._test_instance_status_suspend(
name='test_suspend_in_create_complete')
def test_instance_suspend_in_suspend_failed(self):
self._test_instance_status_suspend(
name='test_suspend_in_suspend_failed',
state=('SUSPEND', 'FAILED'))
def test_server_suspend_in_suspend_complete(self):
self._test_instance_status_suspend(
name='test_suspend_in_suspend_complete',
state=('SUSPEND', 'COMPLETE'))
def _test_instance_status_resume(self, name,
state=('SUSPEND', 'COMPLETE')):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server, name)
instance.resource_id = '1234'
instance.state_set(state[0], state[1])
d1 = {'server': self.fc.client.get_servers_detail()[1]['servers'][0]}
d2 = copy.deepcopy(d1)
d1['server']['status'] = 'SUSPENDED'
d2['server']['status'] = 'ACTIVE'
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndReturn((200, d1))
get().AndReturn((200, d1))
get().AndReturn((200, d2))
self.m.ReplayAll()
instance.state_set(instance.SUSPEND, instance.COMPLETE)
scheduler.TaskRunner(instance.resume)()
self.assertEqual((instance.RESUME, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_instance_resume_in_suspend_complete(self):
self._test_instance_status_resume(
name='test_resume_in_suspend_complete')
def test_instance_resume_in_resume_failed(self):
self._test_instance_status_resume(
name='test_resume_in_resume_failed',
state=('RESUME', 'FAILED'))
def test_instance_resume_in_resume_complete(self):
self._test_instance_status_resume(
name='test_resume_in_resume_complete',
state=('RESUME', 'COMPLETE'))
def test_server_resume_other_exception(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'in_resume_wait')
instance.resource_id = '1234'
self.m.ReplayAll()
self.m.StubOutWithMock(self.fc.client, 'get_servers_1234')
get = self.fc.client.get_servers_1234
get().AndRaise(fakes_nova.fake_exception(status_code=500,
message='VIKINGS!'))
self.m.ReplayAll()
instance.state_set(instance.SUSPEND, instance.COMPLETE)
resumer = scheduler.TaskRunner(instance.resume)
ex = self.assertRaises(exception.ResourceFailure, resumer)
self.assertIn('VIKINGS!', ex.message)
self.m.VerifyAll()
def test_instance_status_build_spawning(self):
self._test_instance_status_not_build_active('BUILD(SPAWNING)')
def test_instance_status_hard_reboot(self):
self._test_instance_status_not_build_active('HARD_REBOOT')
def test_instance_status_password(self):
self._test_instance_status_not_build_active('PASSWORD')
def test_instance_status_reboot(self):
self._test_instance_status_not_build_active('REBOOT')
def test_instance_status_rescue(self):
self._test_instance_status_not_build_active('RESCUE')
def test_instance_status_resize(self):
self._test_instance_status_not_build_active('RESIZE')
def test_instance_status_revert_resize(self):
self._test_instance_status_not_build_active('REVERT_RESIZE')
def test_instance_status_shutoff(self):
self._test_instance_status_not_build_active('SHUTOFF')
def test_instance_status_suspended(self):
self._test_instance_status_not_build_active('SUSPENDED')
def test_instance_status_verify_resize(self):
self._test_instance_status_not_build_active('VERIFY_RESIZE')
def _test_instance_status_not_build_active(self, uncommon_status):
return_server = self.fc.servers.list()[0]
instance = self._setup_test_instance(return_server,
'in_sts_bld')
instance.resource_id = '1234'
# Bind fake get method which Instance.check_create_complete will call
def activate_status(server):
if hasattr(server, '_test_check_iterations'):
server._test_check_iterations += 1
else:
server._test_check_iterations = 1
if server._test_check_iterations == 1:
server.status = uncommon_status
if server._test_check_iterations > 2:
server.status = 'ACTIVE'
return_server.get = activate_status.__get__(return_server)
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)
self.m.VerifyAll()
def test_build_nics(self):
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'build_nics')
self.assertIsNone(instance._build_nics([]))
self.assertIsNone(instance._build_nics(None))
self.assertEqual([
{'port-id': 'id3'}, {'port-id': 'id1'}, {'port-id': 'id2'}],
instance._build_nics([
'id3', 'id1', 'id2']))
self.assertEqual(
[{'port-id': 'id1'},
{'port-id': 'id2'},
{'port-id': 'id3'}],
instance._build_nics([
{'NetworkInterfaceId': 'id3', 'DeviceIndex': '3'},
{'NetworkInterfaceId': 'id1', 'DeviceIndex': '1'},
{'NetworkInterfaceId': 'id2', 'DeviceIndex': 2},
]))
self.assertEqual(
[{'port-id': 'id1'},
{'port-id': 'id2'},
{'port-id': 'id3'},
{'port-id': 'id4'},
{'port-id': 'id5'}],
instance._build_nics([
{'NetworkInterfaceId': 'id3', 'DeviceIndex': '3'},
{'NetworkInterfaceId': 'id1', 'DeviceIndex': '1'},
{'NetworkInterfaceId': 'id2', 'DeviceIndex': 2},
'id4',
'id5']
))
def test_build_nics_with_security_groups(self):
"""
Test the security groups defined in heat template can be associated
to a new created port.
"""
return_server = self.fc.servers.list()[1]
instance = self._create_test_instance(return_server,
'build_nics2')
security_groups = ['security_group_1']
self._test_security_groups(instance, security_groups)
security_groups = ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
self._test_security_groups(instance, security_groups, all_uuids=True)
security_groups = ['0389f747-7785-4757-b7bb-2ab07e4b09c3',
'384ccd91-447c-4d83-832c-06974a7d3d05']
self._test_security_groups(instance, security_groups,
sg='two', all_uuids=True)
security_groups = ['security_group_1',
'384ccd91-447c-4d83-832c-06974a7d3d05']
self._test_security_groups(instance, security_groups, sg='two')
security_groups = ['wrong_group_name']
self._test_security_groups(
instance,
security_groups,
sg='zero',
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['wrong_group_name',
'0389f747-7785-4757-b7bb-2ab07e4b09c3']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['wrong_group_name', 'security_group_1']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNotFound)
security_groups = ['duplicate_group_name', 'security_group_1']
self._test_security_groups(
instance,
security_groups,
get_secgroup_raises=exception.PhysicalResourceNameAmbiguity)
def _test_security_groups(self, instance, security_groups, sg='one',
all_uuids=False, get_secgroup_raises=None):
fake_groups_list, props = self._get_fake_properties(sg)
nclient = neutronclient.Client()
self.m.StubOutWithMock(instance, 'neutron')
instance.neutron().MultipleTimes().AndReturn(nclient)
if not all_uuids:
# list_security_groups only gets called when none of the requested
# groups look like UUIDs.
self.m.StubOutWithMock(
neutronclient.Client, 'list_security_groups')
neutronclient.Client.list_security_groups().AndReturn(
fake_groups_list)
self.m.StubOutWithMock(neutron.NeutronClientPlugin,
'network_id_from_subnet_id')
neutron.NeutronClientPlugin.network_id_from_subnet_id(
'fake_subnet_id').MultipleTimes().AndReturn('fake_network_id')
if not get_secgroup_raises:
self.m.StubOutWithMock(neutronclient.Client, 'create_port')
neutronclient.Client.create_port(
{'port': props}).MultipleTimes().AndReturn(
{'port': {'id': 'fake_port_id'}})
self.stub_keystoneclient()
self.m.ReplayAll()
if get_secgroup_raises:
self.assertRaises(get_secgroup_raises, instance._build_nics, None,
security_groups=security_groups,
subnet_id='fake_subnet_id')
else:
self.assertEqual(
[{'port-id': 'fake_port_id'}],
instance._build_nics(None,
security_groups=security_groups,
subnet_id='fake_subnet_id'))
self.m.VerifyAll()
self.m.UnsetStubs()
def _get_fake_properties(self, sg='one'):
fake_groups_list = {
'security_groups': [
{
'tenant_id': 'test_tenant_id',
'id': '0389f747-7785-4757-b7bb-2ab07e4b09c3',
'name': 'security_group_1',
'security_group_rules': [],
'description': 'no protocol'
},
{
'tenant_id': 'test_tenant_id',
'id': '384ccd91-447c-4d83-832c-06974a7d3d05',
'name': 'security_group_2',
'security_group_rules': [],
'description': 'no protocol'
},
{
'tenant_id': 'test_tenant_id',
'id': 'e91a0007-06a6-4a4a-8edb-1d91315eb0ef',
'name': 'duplicate_group_name',
'security_group_rules': [],
'description': 'no protocol'
},
{
'tenant_id': 'test_tenant_id',
'id': '8be37f3c-176d-4826-aa17-77d1d9df7b2e',
'name': 'duplicate_group_name',
'security_group_rules': [],
'description': 'no protocol'
}
]
}
fixed_ip = {'subnet_id': 'fake_subnet_id'}
props = {
'admin_state_up': True,
'network_id': 'fake_network_id',
'fixed_ips': [fixed_ip],
'security_groups': ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
}
if sg == 'zero':
props['security_groups'] = []
elif sg == 'one':
props['security_groups'] = ['0389f747-7785-4757-b7bb-2ab07e4b09c3']
elif sg == 'two':
props['security_groups'] = ['0389f747-7785-4757-b7bb-2ab07e4b09c3',
'384ccd91-447c-4d83-832c-06974a7d3d05']
return fake_groups_list, props
def test_instance_without_ip_address(self):
return_server = self.fc.servers.list()[3]
instance = self._create_test_instance(return_server,
'wo_ipaddr')
self.assertEqual('0.0.0.0', instance.FnGetAtt('PrivateIp'))
def test_default_instance_user(self):
"""The default value for instance_user in heat.conf is ec2-user."""
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server, 'default_user')
metadata = instance.metadata_get()
self.m.StubOutWithMock(nova.NovaClientPlugin, 'build_userdata')
nova.NovaClientPlugin.build_userdata(
metadata, 'wordpress', 'ec2-user')
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.m.VerifyAll()
def test_custom_instance_user(self):
"""Test instance_user in heat.conf being set to a custom value.
Launching the instance should call build_userdata with the custom user
name.
This option is deprecated and will be removed in Juno.
"""
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server, 'custom_user')
self.m.StubOutWithMock(instances.cfg.CONF, 'instance_user')
instances.cfg.CONF.instance_user = 'custom_user'
metadata = instance.metadata_get()
self.m.StubOutWithMock(nova.NovaClientPlugin, 'build_userdata')
nova.NovaClientPlugin.build_userdata(
metadata, 'wordpress', 'custom_user')
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.m.VerifyAll()
def test_empty_instance_user(self):
"""Test instance_user in heat.conf being empty.
Launching the instance should call build_userdata with
"ec2-user".
This behaviour is compatible with CloudFormation and will be
the default in Juno once the instance_user option gets removed.
"""
return_server = self.fc.servers.list()[1]
instance = self._setup_test_instance(return_server, 'empty_user')
self.m.StubOutWithMock(instances.cfg.CONF, 'instance_user')
instances.cfg.CONF.instance_user = ''
metadata = instance.metadata_get()
self.m.StubOutWithMock(nova.NovaClientPlugin, 'build_userdata')
nova.NovaClientPlugin.build_userdata(
metadata, 'wordpress', 'ec2-user')
self.m.ReplayAll()
scheduler.TaskRunner(instance.create)()
self.m.VerifyAll()
| {
"content_hash": "0f70eb1a7fe0379e2380b7ad6ad328ed",
"timestamp": "",
"source": "github",
"line_count": 1393,
"max_line_length": 79,
"avg_line_length": 42.74012921751615,
"alnum_prop": 0.5895661521406856,
"repo_name": "rh-s/heat",
"id": "bdf7a84ebd7b4c63c127453175f49060b8d2fa8e",
"size": "60112",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/tests/aws/test_instance.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "6288599"
},
{
"name": "Shell",
"bytes": "32845"
}
],
"symlink_target": ""
} |
import sqlite3
import urllib
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from phyllo.phyllo_logger import logger
# Case 1: Sections split by numbers (Roman or not) followed by a period, or bracketed. Subsections split by <p> tags
def parsecase1(ptags, c, colltitle, title, author, date, URL):
# ptags contains all <p> tags. c is the cursor object.
chapter = '-1'
verse = 1
for p in ptags:
# make sure it's not a paragraph without the main text
try:
if p['class'][0].lower() in ['border', 'pagehead', 'shortborder', 'smallboarder', 'margin',
'internal_navigation']: # these are not part of the main t
continue
except:
pass
passage = ''
text = p.get_text().strip()
# Skip empty paragraphs. and skip the last part with the collection link.
if len(text) <= 0 or text.startswith('The Miscellany\n'):
continue
text = re.split('^([IVX]+)\.\s|^([0-9]+)\.\s|^\[([IVXL]+)\]\s|^\[([0-9]+)\]\s', text)
for element in text:
if element is None or element == '' or element.isspace():
text.remove(element)
# The split should not alter sections with no prefixed roman numeral.
if len(text) > 1:
i = 0
while text[i] is None:
i+=1
chapter = text[i]
i+=1
while text[i] is None:
i+=1
passage = text[i].strip()
verse = 1
else:
passage = text[0]
verse+=1
# check for that last line with the author name that doesn't need to be here
if passage.startswith('The Miscellany'):
continue
c.execute("INSERT INTO texts VALUES (?,?,?,?,?,?,?, ?, ?, ?, ?)",
(None, colltitle, title, 'Latin', author, date, chapter,
verse, passage.strip(), URL, 'prose'))
# main code
def main():
# The collection URL below. In this example, we have a link to Cicero.
collURL = 'http://www.thelatinlibrary.com/balbus.html'
collOpen = urllib.request.urlopen(collURL)
collSOUP = BeautifulSoup(collOpen, 'html5lib')
author = collSOUP.title.string.strip()
colltitle = 'BALBI EXPOSITIO ET RATIO OMNIUM FORMARUM'
date = '-'
textsURL = [collURL]
with sqlite3.connect('texts.db') as db:
c = db.cursor()
c.execute(
'CREATE TABLE IF NOT EXISTS texts (id INTEGER PRIMARY KEY, title TEXT, book TEXT,'
' language TEXT, author TEXT, date TEXT, chapter TEXT, verse TEXT, passage TEXT,'
' link TEXT, documentType TEXT)')
c.execute("DELETE FROM texts WHERE author='Balbus'")
for url in textsURL:
openurl = urllib.request.urlopen(url)
textsoup = BeautifulSoup(openurl, 'html5lib')
try:
title = textsoup.title.string.split(':')[1].strip()
except:
title = textsoup.title.string.strip()
getp = textsoup.find_all('p')
# finally, pick ONE case to parse with.
parsecase1(getp, c, colltitle, title, author, date, url)
if __name__ == '__main__':
main()
| {
"content_hash": "73326136c075c852a8b9e9769a4c9a09",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 116,
"avg_line_length": 38.50588235294118,
"alnum_prop": 0.5633974946532233,
"repo_name": "oudalab/phyllo",
"id": "be243d9265597f7636a903f49e99917d9d64348e",
"size": "3273",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "phyllo/extractors/balbus.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "919"
},
{
"name": "HTML",
"bytes": "3428"
},
{
"name": "Python",
"bytes": "1253920"
},
{
"name": "Shell",
"bytes": "1077"
}
],
"symlink_target": ""
} |
import argparse
import datetime
import logging
import os
import statistics
import sys
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
from functools import partial
from multiprocessing.pool import ThreadPool
import math
from eviltransform import gcj2wgs_exact
from tqdm import tqdm
from .api.booking_api import BookingApi, BookingListApi, LIMIT_REQUESTS_PER_MINUTE
from .api.exceptions import GettingMinPriceError
SUPPORTED_LANGUAGES = ("en", "ru", "ar", "cs", "da", "nl", "fi", "fr", "de",
"hu", "id", "it", "ja", "ko", "pl", "pt", "ro", "es",
"sv", "th", "tr", "uk", "vi", "zh", "he", "sk", "el")
class BookingGen:
def __init__(self, api, country):
self.api = api
self.country_code = country["country"]
self.country_name = country["name"]
logging.info(f"Download[{self.country_code}]: {self.country_name}")
extras = ["hotel_info", "room_info"]
self.hotels = self._download_hotels(extras=extras)
self.translations = self._download_translations()
self.currency_medians = self._currency_medians_by_cities()
def generate_tsv_rows(self, sep="\t"):
self._fix_hotels()
return (self._create_tsv_hotel_line(hotel, sep) for hotel in self.hotels)
@staticmethod
def _get_hotel_min_price(hotel):
prices = (float(x["room_info"]["min_price"]) for x in hotel["room_data"])
flt = filter(lambda x: not math.isclose(x, 0.0), prices)
try:
return min(flt)
except ValueError:
raise GettingMinPriceError(f"Getting min price error: {prices}.")
@staticmethod
def _format_string(s):
s = s.strip()
for x in (("\t", " "), ("\n", " "), ("\r", "")):
s = s.replace(*x)
return s
def _download_hotels(self, **params):
return self.api.hotels(country_ids=self.country_code, **params)
def _download_translations(self):
extras = ["hotel_info", ]
translations = defaultdict(dict)
with ThreadPoolExecutor(max_workers=len(SUPPORTED_LANGUAGES)) as executor:
m = {executor.submit(self._download_hotels, extras=extras, language=lang): lang
for lang in SUPPORTED_LANGUAGES}
for future in as_completed(m):
lang = m[future]
hotels = future.result()
for hotel in hotels:
hotel_id = hotel["hotel_id"]
hotel_data = hotel["hotel_data"]
translations[hotel_id][lang] = {
"name": BookingGen._format_string(hotel_data["name"]),
"address": BookingGen._format_string(hotel_data["address"])
}
return translations
def _fix_hotels(self):
if self.country_code == "cn":
# Fix chinese coordinates.
# https://en.wikipedia.org/wiki/Restrictions_on_geographic_data_in_China
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
try:
location["latitude"], location["longitude"] = gcj2wgs_exact(
float(location["latitude"]), float(location["longitude"])
)
except ValueError:
logging.exception(f"Converting error {location}")
def _currency_medians_by_cities(self):
cities = defaultdict(lambda: defaultdict(list))
for hotel in self.hotels:
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
continue
cities[city_id][currency].append(price)
for city in cities:
for currency in cities[city]:
cities[city][currency] = statistics.median(cities[city][currency])
return cities
def _get_rate(self, hotel):
# Price rate ranges, relative to the median price for a city
rates = (0.7, 1.3)
rate = 0
hotel_data = hotel["hotel_data"]
city_id = hotel_data["city_id"]
currency = hotel_data["currency"]
price = None
try:
price = BookingGen._get_hotel_min_price(hotel)
except GettingMinPriceError:
logging.exception("Getting min price error.")
return rate
avg = self.currency_medians[city_id][currency]
rate = 1
# Find a range that contains the price
while rate <= len(rates) and price > avg * rates[rate - 1]:
rate += 1
return rate
def _get_translations(self, hotel):
try:
tr = self.translations[hotel["hotel_id"]]
except KeyError:
return ""
hotel_data = hotel["hotel_data"]
name = hotel_data["name"]
address = hotel_data["address"]
tr_ = defaultdict(dict)
for k, v in tr.items():
n = v["name"] if v["name"] != name else ""
a = v["address"] if v["address"] != address else ""
if a or n:
tr_[k]["name"] = n
tr_[k]["address"] = a
tr_list = []
for tr_lang, tr_values in tr_.items():
tr_list.append(tr_lang)
tr_list.extend([tr_values[e] for e in ("name", "address")])
return "|".join(s.replace("|", ";") for s in tr_list)
def _create_tsv_hotel_line(self, hotel, sep="\t"):
hotel_data = hotel["hotel_data"]
location = hotel_data["location"]
row = (
hotel["hotel_id"],
f"{location['latitude']:.6f}",
f"{location['longitude']:.6f}",
hotel_data["name"],
hotel_data["address"],
hotel_data["class"],
self._get_rate(hotel),
hotel_data["ranking"],
hotel_data["review_score"],
hotel_data["url"],
hotel_data["hotel_type_id"],
self._get_translations(hotel)
)
return sep.join(BookingGen._format_string(str(x)) for x in row)
def download_hotels_by_country(api, country):
generator = BookingGen(api, country)
rows = list(generator.generate_tsv_rows())
logging.info(f"For {country['name']} {len(rows)} lines were generated.")
return rows
def download(country_code, user, password, path, threads_count,
progress_bar=tqdm(disable=True)):
api = BookingApi(user, password, "2.4")
list_api = BookingListApi(api)
countries = list_api.countries(languages="en")
if country_code is not None:
countries = list(filter(lambda x: x["country"] in country_code, countries))
logging.info(f"There is {len(countries)} countries.")
progress_bar.desc = "Countries"
progress_bar.total = len(countries)
with open(path, "w") as f:
with ThreadPool(threads_count) as pool:
for lines in pool.imap_unordered(partial(download_hotels_by_country, list_api),
countries):
f.writelines([f"{x}\n" for x in lines])
progress_bar.update()
logging.info(f"Hotels were saved to {path}.")
def process_options():
parser = argparse.ArgumentParser(description="Download and process booking hotels.")
parser.add_argument("-v", "--verbose", action="store_true")
parser.add_argument("--logfile", default="",
help="Name and destination for log file")
parser.add_argument("--password", required=True, dest="password",
help="Booking.com account password")
parser.add_argument("--user", required=True, dest="user",
help="Booking.com account user name")
parser.add_argument("--threads_count", default=1, type=int,
help="The number of threads for processing countries.")
parser.add_argument("--output", required=True, dest="output",
help="Name and destination for output file")
parser.add_argument("--country_code", default=None, action="append",
help="Download hotels of this country.")
options = parser.parse_args()
return options
def main():
options = process_options()
logfile = ""
if options.logfile:
logfile = options.logfile
else:
now = datetime.datetime.now()
name = f"{now.strftime('%d_%m_%Y-%H_%M_%S')}_booking_hotels.log"
logfile = os.path.join(os.path.dirname(os.path.realpath(__file__)), name)
print(f"Logs saved to {logfile}.", file=sys.stdout)
if options.threads_count > 1:
print(f"Limit requests per minute is {LIMIT_REQUESTS_PER_MINUTE}.", file=sys.stdout)
logging.basicConfig(level=logging.DEBUG, filename=logfile,
format="%(thread)d [%(asctime)s] %(levelname)s: %(message)s")
with tqdm(disable=not options.verbose) as progress_bar:
download(options.country_code, options.user, options.password,
options.output, options.threads_count, progress_bar)
if __name__ == "__main__":
main()
| {
"content_hash": "824a1a9715484f489131d904d4519291",
"timestamp": "",
"source": "github",
"line_count": 237,
"max_line_length": 92,
"avg_line_length": 39.48945147679325,
"alnum_prop": 0.572283363607223,
"repo_name": "alexzatsepin/omim",
"id": "1374459a6168240d1c4be039724ed2e48659e124",
"size": "9381",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/python/booking/download_hotels.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Awk",
"bytes": "3962"
},
{
"name": "Batchfile",
"bytes": "5586"
},
{
"name": "C",
"bytes": "13984459"
},
{
"name": "C++",
"bytes": "148411082"
},
{
"name": "CMake",
"bytes": "249320"
},
{
"name": "CSS",
"bytes": "26798"
},
{
"name": "Common Lisp",
"bytes": "17587"
},
{
"name": "DIGITAL Command Language",
"bytes": "36710"
},
{
"name": "GLSL",
"bytes": "58384"
},
{
"name": "Gherkin",
"bytes": "305230"
},
{
"name": "Go",
"bytes": "12771"
},
{
"name": "HTML",
"bytes": "9503594"
},
{
"name": "Inno Setup",
"bytes": "4337"
},
{
"name": "Java",
"bytes": "2486120"
},
{
"name": "JavaScript",
"bytes": "29076"
},
{
"name": "Lua",
"bytes": "57672"
},
{
"name": "M4",
"bytes": "53992"
},
{
"name": "Makefile",
"bytes": "429637"
},
{
"name": "Metal",
"bytes": "77540"
},
{
"name": "Module Management System",
"bytes": "2080"
},
{
"name": "Objective-C",
"bytes": "2046640"
},
{
"name": "Objective-C++",
"bytes": "1300948"
},
{
"name": "PHP",
"bytes": "2841"
},
{
"name": "Perl",
"bytes": "57807"
},
{
"name": "PowerShell",
"bytes": "1885"
},
{
"name": "Python",
"bytes": "584274"
},
{
"name": "Roff",
"bytes": "13545"
},
{
"name": "Ruby",
"bytes": "66800"
},
{
"name": "Shell",
"bytes": "1317925"
},
{
"name": "Swift",
"bytes": "511409"
},
{
"name": "sed",
"bytes": "236"
}
],
"symlink_target": ""
} |
from unittest import mock
import unittest
from parameterized import parameterized
from airflow.exceptions import AirflowException
from airflow.sensors.s3_key_sensor import S3KeySensor
class S3KeySensorTests(unittest.TestCase):
def test_bucket_name_None_and_bucket_key_as_relative_path(self):
"""
Test if exception is raised when bucket_name is None
and bucket_key is provided as relative path rather than s3:// url.
:return:
"""
with self.assertRaises(AirflowException):
S3KeySensor(
task_id='s3_key_sensor',
bucket_key="file_in_bucket")
def test_bucket_name_provided_and_bucket_key_is_s3_url(self):
"""
Test if exception is raised when bucket_name is provided
while bucket_key is provided as a full s3:// url.
:return:
"""
with self.assertRaises(AirflowException):
S3KeySensor(
task_id='s3_key_sensor',
bucket_key="s3://test_bucket/file",
bucket_name='test_bucket')
@parameterized.expand([
['s3://bucket/key', None, 'key', 'bucket'],
['key', 'bucket', 'key', 'bucket'],
])
def test_parse_bucket_key(self, key, bucket, parsed_key, parsed_bucket):
s = S3KeySensor(
task_id='s3_key_sensor',
bucket_key=key,
bucket_name=bucket,
)
self.assertEqual(s.bucket_key, parsed_key)
self.assertEqual(s.bucket_name, parsed_bucket)
@mock.patch('airflow.hooks.S3_hook.S3Hook')
def test_poke(self, mock_hook):
s = S3KeySensor(
task_id='s3_key_sensor',
bucket_key='s3://test_bucket/file')
mock_check_for_key = mock_hook.return_value.check_for_key
mock_check_for_key.return_value = False
self.assertFalse(s.poke(None))
mock_check_for_key.assert_called_with(s.bucket_key, s.bucket_name)
mock_hook.return_value.check_for_key.return_value = True
self.assertTrue(s.poke(None))
@mock.patch('airflow.hooks.S3_hook.S3Hook')
def test_poke_wildcard(self, mock_hook):
s = S3KeySensor(
task_id='s3_key_sensor',
bucket_key='s3://test_bucket/file',
wildcard_match=True)
mock_check_for_wildcard_key = mock_hook.return_value.check_for_wildcard_key
mock_check_for_wildcard_key.return_value = False
self.assertFalse(s.poke(None))
mock_check_for_wildcard_key.assert_called_with(s.bucket_key, s.bucket_name)
mock_check_for_wildcard_key.return_value = True
self.assertTrue(s.poke(None))
| {
"content_hash": "45f129aff1e49a255b1f7b52372aef96",
"timestamp": "",
"source": "github",
"line_count": 74,
"max_line_length": 83,
"avg_line_length": 35.83783783783784,
"alnum_prop": 0.6165158371040724,
"repo_name": "r39132/airflow",
"id": "320fff3a1f56460b80597b44989c41d09c2e16ad",
"size": "3464",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/sensors/test_s3_key_sensor.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "12126"
},
{
"name": "Dockerfile",
"bytes": "4111"
},
{
"name": "HTML",
"bytes": "128531"
},
{
"name": "JavaScript",
"bytes": "22118"
},
{
"name": "Mako",
"bytes": "1284"
},
{
"name": "Python",
"bytes": "5928206"
},
{
"name": "Shell",
"bytes": "41869"
}
],
"symlink_target": ""
} |
import lz4.stream
import pytest
import sys
import os
if sys.version_info < (3, ):
from struct import pack, unpack
def _get_format(length, byteorder, signed):
_order = {'l': '<', 'b': '>'}
_fmt = {1: 'b', 2: 'h', 4: 'i', 8: 'q'}
_sign = {True: lambda x: x.lower(), False: lambda x: x.upper()}
return _sign[signed](_order[byteorder[0].lower()] + _fmt[length])
def int_to_bytes(value, length=4, byteorder='little', signed=False):
return bytearray(pack(_get_format(length, byteorder, signed), value))
def int_from_bytes(bytes, byteorder='little', signed=False):
return unpack(_get_format(len(bytes), byteorder, signed), bytes)[0]
else:
def int_to_bytes(value, length=4, byteorder='little', signed=False):
return value.to_bytes(length, byteorder, signed=signed)
def int_from_bytes(bytes, byteorder='little', signed=False):
return int.from_bytes(bytes, byteorder, signed=signed)
# This test requires allocating a big lump of memory. In order to
# avoid a massive memory allocation during byte compilation, we have
# to declare a variable for the size of the buffer we're going to
# create outside the scope of the function below. See:
# https://bugs.python.org/issue21074
_4GB = 0x100000000 # 4GB
def compress(x, c_kwargs, return_block_offset=False, check_block_type=False):
o = [0, ]
if c_kwargs.get('return_bytearray', False):
c = bytearray()
else:
c = bytes()
with lz4.stream.LZ4StreamCompressor(**c_kwargs) as proc:
for start in range(0, len(x), c_kwargs['buffer_size']):
chunk = x[start:start + c_kwargs['buffer_size']]
block = proc.compress(chunk)
c += block
if return_block_offset:
o.append(len(c))
if check_block_type:
assert isinstance(block, c.__class__)
if return_block_offset:
return c, o
else:
return c
def decompress(x, d_kwargs, check_chunk_type=False):
if d_kwargs.get('return_bytearray', False):
d = bytearray()
else:
d = bytes()
with lz4.stream.LZ4StreamDecompressor(**d_kwargs) as proc:
start = 0
while start < len(x):
block = proc.get_block(x[start:])
chunk = proc.decompress(block)
d += chunk
start += d_kwargs['store_comp_size'] + len(block)
if check_chunk_type:
assert isinstance(chunk, d.__class__)
return d
def test_invalid_config_c_1():
c_kwargs = {}
c_kwargs['strategy'] = "ring_buffer"
c_kwargs['buffer_size'] = 1024
with pytest.raises(NotImplementedError):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
def test_invalid_config_d_1():
d_kwargs = {}
d_kwargs['strategy'] = "ring_buffer"
d_kwargs['buffer_size'] = 1024
with pytest.raises(NotImplementedError):
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
def test_invalid_config_c_2():
c_kwargs = {}
c_kwargs['strategy'] = "foo"
c_kwargs['buffer_size'] = 1024
with pytest.raises(ValueError):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
def test_invalid_config_d_2():
d_kwargs = {}
d_kwargs['strategy'] = "foo"
d_kwargs['buffer_size'] = 1024
with pytest.raises(ValueError):
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
def test_invalid_config_c_3(store_comp_size):
c_kwargs = {}
c_kwargs['strategy'] = "double_buffer"
c_kwargs['buffer_size'] = 1024
c_kwargs['store_comp_size'] = store_comp_size['store_comp_size'] + 5
with pytest.raises(ValueError):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
def test_invalid_config_d_3(store_comp_size):
d_kwargs = {}
d_kwargs['strategy'] = "double_buffer"
d_kwargs['buffer_size'] = 1024
d_kwargs['store_comp_size'] = store_comp_size['store_comp_size'] + 5
with pytest.raises(ValueError):
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
def test_invalid_config_c_4(store_comp_size):
c_kwargs = {}
c_kwargs['strategy'] = "double_buffer"
c_kwargs['buffer_size'] = 1 << (8 * store_comp_size['store_comp_size'])
c_kwargs.update(store_comp_size)
if store_comp_size['store_comp_size'] >= 4:
# No need for skiping this test case, since arguments check is
# expecting to raise an error.
# Make sure the page size is larger than what the input bound will be,
# but still fit in 4 bytes
c_kwargs['buffer_size'] -= 1
if c_kwargs['buffer_size'] > lz4.stream.LZ4_MAX_INPUT_SIZE:
message = r"^Invalid buffer_size argument: \d+. Cannot define output buffer size. Must be lesser or equal to 2113929216$" # noqa
err_class = ValueError
else:
message = r"^Inconsistent buffer_size/store_comp_size values. Maximal compressed length \(\d+\) cannot fit in a \d+ byte-long integer$" # noqa
err_class = lz4.stream.LZ4StreamError
with pytest.raises(err_class, match=message):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
def test_invalid_config_d_4(store_comp_size):
d_kwargs = {}
d_kwargs['strategy'] = "double_buffer"
d_kwargs['buffer_size'] = 1 << (8 * store_comp_size['store_comp_size'])
d_kwargs.update(store_comp_size)
if store_comp_size['store_comp_size'] >= 4:
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
# Make sure the page size is larger than what the input bound will be,
# but still fit in 4 bytes
d_kwargs['buffer_size'] -= 1
# No failure expected during instanciation/initialization
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
def test_invalid_config_c_5():
c_kwargs = {}
c_kwargs['strategy'] = "double_buffer"
c_kwargs['buffer_size'] = lz4.stream.LZ4_MAX_INPUT_SIZE
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
# No failure expected
lz4.stream.LZ4StreamCompressor(**c_kwargs)
c_kwargs['buffer_size'] = lz4.stream.LZ4_MAX_INPUT_SIZE + 1
with pytest.raises(ValueError):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
# Make sure the page size is larger than what the input bound will be,
# but still fit in 4 bytes
c_kwargs['buffer_size'] = _4GB - 1 # 4GB - 1 (to fit in 4 bytes)
with pytest.raises(ValueError):
lz4.stream.LZ4StreamCompressor(**c_kwargs)
def test_invalid_config_d_5():
d_kwargs = {}
d_kwargs['strategy'] = "double_buffer"
# No failure expected during instanciation/initialization
d_kwargs['buffer_size'] = lz4.stream.LZ4_MAX_INPUT_SIZE
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
# No failure expected during instanciation/initialization
d_kwargs['buffer_size'] = lz4.stream.LZ4_MAX_INPUT_SIZE + 1
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
# No failure expected during instanciation/initialization
d_kwargs['buffer_size'] = _4GB - 1 # 4GB - 1 (to fit in 4 bytes)
if sys.maxsize < 0xffffffff:
pytest.skip('Py_ssize_t too small for this test')
lz4.stream.LZ4StreamDecompressor(**d_kwargs)
def test_decompress_corrupted_input_1():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = compress(b'A' * 512, c_kwargs)
decompress(data, d_kwargs)
message = r"^Requested input size \(\d+\) larger than source size \(\d+\)$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data[4:], d_kwargs)
def test_decompress_corrupted_input_2():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = compress(b'A' * 512, c_kwargs)
decompress(data, d_kwargs)
message = r"^Decompression failed. error: \d+$"
# Block size corruption in the first block
# Block size longer than actual:
data = int_to_bytes(int_from_bytes(data[:4], 'little') + 1, 4, 'little') + data[4:]
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
# Block size shorter than actual:
data = int_to_bytes(int_from_bytes(data[:4], 'little') - 2, 4, 'little') + data[4:]
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
def test_decompress_corrupted_input_3():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = compress(b'A' * 512, c_kwargs)
decompress(data, d_kwargs)
message = r"^Decompression failed. error: \d+$"
# Block size corruption in a block in the middle of the stream
offset = 4 + int_from_bytes(data[:4], 'little')
# Block size longer than actual:
block_len = int_from_bytes(data[offset:offset + 4], 'little') + 1
data = data[:offset] + int_to_bytes(block_len, 4, 'little') + data[offset + 4:]
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
# Block size shorter than actual:
block_len = int_from_bytes(data[offset:offset + 4], 'little') - 2
data = data[:offset] + int_to_bytes(block_len, 4, 'little') + data[offset + 4:]
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
def test_decompress_corrupted_input_4():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = compress(b'A' * 256, c_kwargs)
decompress(data, d_kwargs)
# Block size corruption in the last block of the stream
offset = 4 + int_from_bytes(data[:4], 'little')
# Block size longer than actual:
block_len = int_from_bytes(data[offset:offset + 4], 'little') + 1
data = data[:offset] + int_to_bytes(block_len, 4, 'little') + data[offset + 4:]
message = r"^Requested input size \(\d+\) larger than source size \(\d+\)$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
# Block size shorter than actual:
block_len = int_from_bytes(data[offset:offset + 4], 'little') - 2
data = data[:offset] + int_to_bytes(block_len, 4, 'little') + data[offset + 4:]
message = r"^Decompression failed. error: \d+$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(data, d_kwargs)
def test_decompress_truncated():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
compressed, block_offsets = compress(input_data, c_kwargs, return_block_offset=True)
last_block_offset = 0
for n in range(len(compressed)):
if n in block_offsets:
# end of input matches end of block, so decompression must succeed
last_block_offset = n
decompress(compressed[:n], d_kwargs)
else:
# end of input does not match end of block, so decompression failure is expected
if n - last_block_offset < c_kwargs['store_comp_size']:
message = "^Invalid source, too small for holding any block$"
else:
message = r"^Requested input size \(\d+\) larger than source size \(\d+\)$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(compressed[:n], d_kwargs)
# This next test is probably redundant given test_decompress_truncated above
# since the trailing bytes will be considered as the truncated last block, but
# we will keep them for now
def test_decompress_with_trailer():
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = b'A' * 64
comp = compress(data, c_kwargs)
message = "^Invalid source, too small for holding any block$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(comp + b'A', d_kwargs)
message = r"^Requested input size \(\d+\) larger than source size \(\d+\)$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(comp + b'A' * 10, d_kwargs)
for n in range(1, 10):
if n < d_kwargs['store_comp_size']:
message = "^Invalid source, too small for holding any block$"
else:
message = r"^Decompression failed. error: \d+$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(comp + b'\x00' * n, d_kwargs)
def test_unicode():
if sys.version_info < (3,):
return # skip
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
DATA = b'x'
with pytest.raises(TypeError):
compress(DATA.decode('latin1'), c_kwargs)
decompress(compress(DATA, c_kwargs).decode('latin1'), d_kwargs)
# These next two are probably redundant given test_1 above but we'll keep them
# for now
def test_return_bytearray():
if sys.version_info < (3,):
return # skip
c_kwargs_r = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
c_kwargs = {'return_bytearray': True}
c_kwargs.update(c_kwargs_r)
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = os.urandom(128 * 1024) # Read 128kb
compressed = compress(data, c_kwargs_r, check_block_type=True)
b = compress(data, c_kwargs, check_block_type=True)
assert isinstance(b, bytearray)
assert bytes(b) == compressed
b = decompress(compressed, d_kwargs, check_chunk_type=True)
assert isinstance(b, bytearray)
assert bytes(b) == data
def test_memoryview():
if sys.version_info < (2, 7):
return # skip
c_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
d_kwargs = {}
d_kwargs.update(c_kwargs)
data = os.urandom(128 * 1024) # Read 128kb
compressed = compress(data, c_kwargs)
assert compress(memoryview(data), c_kwargs) == compressed
assert decompress(memoryview(compressed), d_kwargs) == data
def test_with_dict_none():
kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
for mode in ['default', 'high_compression']:
c_kwargs = {'mode': mode, 'dictionary': None}
c_kwargs.update(kwargs)
d_kwargs = {}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
c_kwargs = {'mode': mode}
c_kwargs.update(kwargs)
d_kwargs = {'dictionary': None}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
c_kwargs = {'mode': mode, 'dictionary': b''}
c_kwargs.update(kwargs)
d_kwargs = {}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
c_kwargs = {'mode': mode}
c_kwargs.update(kwargs)
d_kwargs = {'dictionary': b''}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
c_kwargs = {'mode': mode, 'dictionary': ''}
c_kwargs.update(kwargs)
d_kwargs = {}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
c_kwargs = {'mode': mode}
c_kwargs.update(kwargs)
d_kwargs = {'dictionary': ''}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
def test_with_dict():
kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
input_data = b"2099023098234882923049823094823094898239230982349081231290381209380981203981209381238901283098908123109238098123" * 24
dict1 = input_data[10:30]
dict2 = input_data[20:40]
message = r"^Decompression failed. error: \d+$"
for mode in ['default', 'high_compression']:
c_kwargs = {'mode': mode, 'dictionary': dict1}
c_kwargs.update(kwargs)
compressed = compress(input_data, c_kwargs)
d_kwargs = {}
d_kwargs.update(kwargs)
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(compressed, d_kwargs)
d_kwargs = {'dictionary': dict1[:2]}
d_kwargs.update(kwargs)
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(compressed, d_kwargs)
d_kwargs = {'dictionary': dict2}
d_kwargs.update(kwargs)
assert decompress(compressed, d_kwargs) != input_data
d_kwargs = {'dictionary': dict1}
d_kwargs.update(kwargs)
assert decompress(compressed, d_kwargs) == input_data
c_kwargs = {}
c_kwargs.update(kwargs)
d_kwargs = {'dictionary': dict1}
d_kwargs.update(kwargs)
assert decompress(compress(input_data, c_kwargs), d_kwargs) == input_data
def test_known_decompress_1():
d_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
output = b''
input = b'\x00\x00\x00\x00'
message = "^Decompression failed. error: 1$"
with pytest.raises(lz4.stream.LZ4StreamError, match=message):
decompress(input, d_kwargs)
input = b'\x01\x00\x00\x00\x00'
assert decompress(input, d_kwargs) == output
def test_known_decompress_2():
d_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
input = b'\x02\x00\x00\x00\x10 '
output = b' '
assert decompress(input, d_kwargs) == output
def test_known_decompress_3():
d_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
# uncompressed data size smaller than buffer_size
input = b'%\x00\x00\x00\xff\x0bLorem ipsum dolor sit amet\x1a\x006P amet'
output = b'Lorem ipsum dolor sit amet' * 4
assert decompress(input, d_kwargs) == output
def test_known_decompress_4():
d_kwargs = {'strategy': "double_buffer", 'buffer_size': 128, 'store_comp_size': 4}
input = b'%\x00\x00\x00\xff\x0bLorem ipsum dolor sit amet\x1a\x00NPit am\n\x00\x00\x00\x0fh\x00hP sit \x05\x00\x00\x00@amet'
output = b'Lorem ipsum dolor sit amet' * 10
assert decompress(input, d_kwargs) == output
| {
"content_hash": "bb054705bc4128c8724bdd7923850897",
"timestamp": "",
"source": "github",
"line_count": 555,
"max_line_length": 151,
"avg_line_length": 34.02162162162162,
"alnum_prop": 0.6379091197966317,
"repo_name": "python-lz4/python-lz4",
"id": "6b49267e262b01f282ed6da27bc3a479278ff026",
"size": "18882",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/stream/test_stream_1.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "524582"
},
{
"name": "Python",
"bytes": "126306"
}
],
"symlink_target": ""
} |
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class cmppolicy_cmppolicylabel_binding(base_resource) :
""" Binding class showing the cmppolicylabel that can be bound to cmppolicy.
"""
def __init__(self) :
self._boundto = ""
self._priority = 0
self._activepolicy = 0
self._gotopriorityexpression = ""
self._labeltype = ""
self._labelname = ""
self._name = ""
self.___count = 0
@property
def name(self) :
"""Name of the HTTP compression policy for which to display details.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
"""Name of the HTTP compression policy for which to display details.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def boundto(self) :
"""The name of the entity to which the policy is bound.
"""
try :
return self._boundto
except Exception as e:
raise e
@boundto.setter
def boundto(self, boundto) :
"""The name of the entity to which the policy is bound.
"""
try :
self._boundto = boundto
except Exception as e:
raise e
@property
def priority(self) :
try :
return self._priority
except Exception as e:
raise e
@property
def labelname(self) :
"""Name of the label to invoke if the current policy rule evaluates to TRUE.
"""
try :
return self._labelname
except Exception as e:
raise e
@property
def gotopriorityexpression(self) :
"""Expression specifying the priority of the next policy which will get evaluated if the current policy rule evaluates to TRUE.
"""
try :
return self._gotopriorityexpression
except Exception as e:
raise e
@property
def labeltype(self) :
"""Type of policy label invocation.<br/>Possible values = reqvserver, resvserver, policylabel.
"""
try :
return self._labeltype
except Exception as e:
raise e
@property
def activepolicy(self) :
try :
return self._activepolicy
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(cmppolicy_cmppolicylabel_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.cmppolicy_cmppolicylabel_binding
except Exception as e :
raise e
def _get_object_name(self) :
""" Returns the value of object identifier argument
"""
try :
if (self.name) :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(cls, service, name) :
""" Use this API to fetch cmppolicy_cmppolicylabel_binding resources.
"""
try :
obj = cmppolicy_cmppolicylabel_binding()
obj.name = name
response = obj.get_resources(service)
return response
except Exception as e:
raise e
@classmethod
def get_filtered(cls, service, name, filter_) :
""" Use this API to fetch filtered set of cmppolicy_cmppolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy_cmppolicylabel_binding()
obj.name = name
option_ = options()
option_.filter = filter_
response = obj.getfiltered(service, option_)
return response
except Exception as e:
raise e
@classmethod
def count(cls, service, name) :
""" Use this API to count cmppolicy_cmppolicylabel_binding resources configued on NetScaler.
"""
try :
obj = cmppolicy_cmppolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
response = obj.get_resources(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
@classmethod
def count_filtered(cls, service, name, filter_) :
""" Use this API to count the filtered set of cmppolicy_cmppolicylabel_binding resources.
Filter string should be in JSON format.eg: "port:80,servicetype:HTTP".
"""
try :
obj = cmppolicy_cmppolicylabel_binding()
obj.name = name
option_ = options()
option_.count = True
option_.filter = filter_
response = obj.getfiltered(service, option_)
if response :
return response[0].__dict__['___count']
return 0
except Exception as e:
raise e
class Labeltype:
reqvserver = "reqvserver"
resvserver = "resvserver"
policylabel = "policylabel"
class cmppolicy_cmppolicylabel_binding_response(base_response) :
def __init__(self, length=1) :
self.cmppolicy_cmppolicylabel_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.cmppolicy_cmppolicylabel_binding = [cmppolicy_cmppolicylabel_binding() for _ in range(length)]
| {
"content_hash": "dacf0fa3f4297971f67bba6406bc1454",
"timestamp": "",
"source": "github",
"line_count": 201,
"max_line_length": 134,
"avg_line_length": 27.17910447761194,
"alnum_prop": 0.6977850997620355,
"repo_name": "mahabs/nitro",
"id": "3f5174ed83d969f1abd64e59aa753f086767d43c",
"size": "6077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "nssrc/com/citrix/netscaler/nitro/resource/config/cmp/cmppolicy_cmppolicylabel_binding.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "498"
},
{
"name": "Python",
"bytes": "10647176"
}
],
"symlink_target": ""
} |
from django.conf import settings
from django.contrib import messages, auth
from django.contrib.auth import views as django_views
from django.contrib.auth.decorators import login_required
from django.shortcuts import redirect
from django.utils.translation import ugettext_lazy as _
from django.template.response import TemplateResponse
from saleor.cart.utils import find_and_assign_anonymous_cart
from .forms import LoginForm, SignupForm, SetPasswordForm
@find_and_assign_anonymous_cart()
def login(request):
kwargs = {
'template_name': 'account/login.html', 'authentication_form': LoginForm}
return django_views.login(request, **kwargs)
@login_required
def logout(request):
auth.logout(request)
messages.success(request, _('You have been successfully logged out.'))
return redirect(settings.LOGIN_REDIRECT_URL)
def signup(request):
form = SignupForm(request.POST or None)
if form.is_valid():
form.save()
password = form.cleaned_data.get('password')
email = form.cleaned_data.get('email')
user = auth.authenticate(email=email, password=password)
if user:
auth.login(request, user)
messages.success(request, _('User has been created'))
return redirect(settings.LOGIN_REDIRECT_URL)
ctx = {'form': form}
return TemplateResponse(request, 'account/signup.html', ctx)
def password_reset(request):
template_name = 'account/password_reset.html'
post_reset_redirect = 'account_reset_password_done'
email_template_name = 'account/email/password_reset_message.txt'
subject_template_name = 'account/email/password_reset_subject.txt'
return django_views.password_reset(
request, template_name=template_name,
post_reset_redirect=post_reset_redirect,
email_template_name=email_template_name,
subject_template_name=subject_template_name)
def password_reset_confirm(request, uidb64=None, token=None):
template_name = 'account/password_reset_from_key.html'
post_reset_redirect = 'account_reset_password_complete'
set_password_form = SetPasswordForm
return django_views.password_reset_confirm(
request, uidb64=uidb64, token=token, template_name=template_name,
post_reset_redirect=post_reset_redirect,
set_password_form=set_password_form)
| {
"content_hash": "c53196e40c8951d2c7b2ae841c99d25c",
"timestamp": "",
"source": "github",
"line_count": 61,
"max_line_length": 80,
"avg_line_length": 38.24590163934426,
"alnum_prop": 0.7261037291041578,
"repo_name": "tfroehlich82/saleor",
"id": "265681ccde629b6d1d2d2356efaced5854db4984",
"size": "2333",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "saleor/registration/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "65271"
},
{
"name": "HTML",
"bytes": "389308"
},
{
"name": "JavaScript",
"bytes": "61318"
},
{
"name": "Python",
"bytes": "658328"
}
],
"symlink_target": ""
} |
"""\
========================
Session Example
========================
A simple persistent request handler component.
Each time a URL that is handled by this component is requested, the page's
'hit counter' is incremented and shown to the user as text.
"""
from Axon.Component import component
from Axon.Ipc import producerFinished, shutdown
import Kamaelia.Protocol.HTTP.ErrorPages
Sessions = {}
def SessionExampleWrapper(request):
sessionid = request["uri-suffix"]
if Sessions.has_key(sessionid):
session = Sessions[sessionid]
if session["busy"]:
return ErrorPages.websiteErrorPage(500, "Session handler busy")
else:
return session["handler"]
else:
session = { "busy" : True, "handler" : SessionExample(sessionid) }
Sessions[sessionid] = session
return session["handler"]
class SessionExample(component):
def __init__(self, sessionid):
super(SessionExample, self).__init__()
self.sessionid = sessionid
def main(self):
counter = 0
while 1:
counter += 1
resource = {
"statuscode" : "200",
"data" : u"<html><body>%d</body></html>" % counter,
"incomplete" : False,
"content-type" : "text/html"
}
self.send(resource, "outbox")
self.send(producerFinished(self), "signal")
Sessions[self.sessionid]["busy"] = False
self.pause()
yield 1
__kamaelia_components__ = ( SessionExample, )
| {
"content_hash": "6c9c3b34cf2053182b3fd861a9b346b6",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 75,
"avg_line_length": 30.28846153846154,
"alnum_prop": 0.5784126984126984,
"repo_name": "sparkslabs/kamaelia_",
"id": "7e2487f211ed21c283b8388482e5d44f2e8c82f9",
"size": "2536",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Sketches/MPS/BugReports/FixTests/Kamaelia/Kamaelia/Protocol/HTTP/Handlers/SessionExample.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "3814"
},
{
"name": "C",
"bytes": "212854"
},
{
"name": "C++",
"bytes": "327546"
},
{
"name": "CSS",
"bytes": "114434"
},
{
"name": "ChucK",
"bytes": "422"
},
{
"name": "HTML",
"bytes": "1288960"
},
{
"name": "Java",
"bytes": "31832"
},
{
"name": "JavaScript",
"bytes": "829491"
},
{
"name": "Makefile",
"bytes": "5768"
},
{
"name": "NSIS",
"bytes": "18867"
},
{
"name": "PHP",
"bytes": "49059"
},
{
"name": "Perl",
"bytes": "504"
},
{
"name": "Processing",
"bytes": "2885"
},
{
"name": "Pure Data",
"bytes": "7485482"
},
{
"name": "Python",
"bytes": "18896248"
},
{
"name": "Ruby",
"bytes": "4165"
},
{
"name": "Shell",
"bytes": "707430"
}
],
"symlink_target": ""
} |
from . import dao, validator, webserver, updater, api
import yaml
class PrefixListMain:
updaterThread = None
webServerThread = None
def __init__(self, configfile):
with open(configfile, "r") as stream:
config = yaml.load(stream)
if "dao" in config:
# Start database abstraction layer
dao.DAO.setup(config["dao"]["class"], config["dao"])
else:
raise ValueError("Please configure a DAO")
if "validation" in config:
validation = config["validation"]
else:
validation = {}
self.validator = validator.Validator(validation)
if "rpsl_objects" in config:
self.rpsl_objects = config["rpsl_objects"]
self.config = config
self.updaterThread = updater.UpdaterWorker(self.validator, self.rpsl_objects)
self.webServerThread = webserver.WebServer(5000, api.app)
self.updaterThread.start()
self.webServerThread.start()
self.updaterThread.join()
self.webServerThread.join()
| {
"content_hash": "c2504cf1cb339c7748710cddecdaf59f",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 85,
"avg_line_length": 28.342105263157894,
"alnum_prop": 0.6128133704735376,
"repo_name": "emjemj/pre-fixlist",
"id": "e7fa24037ade7db296379150bd0a95cc3a243fe3",
"size": "1077",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "prefixlist/main.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "PLSQL",
"bytes": "3413"
},
{
"name": "Python",
"bytes": "15124"
}
],
"symlink_target": ""
} |
import urllib2
from bs4 import BeautifulSoup
import pickle
years = range(1875, 1980)
horse_list = []
def horse_pair(td, year):
name = str(td.string)
a = td.find("a")
if a is not None and "redlink" not in a.get("href"):
url = str("https://en.wikipedia.org" + a.get("href"))
else:
url = "https://en.wikipedia.org/wiki/{}_Kentucky_Derby".format(year)
return (name, url)
for year in years:
if year in [2005, 2010, 2011, 2012, 2013]:
continue
print year
page_url = "https://en.wikipedia.org/wiki/{}_Kentucky_Derby".format(year)
page = urllib2.urlopen(page_url)
soup = BeautifulSoup(page)
result_head = soup.find("th", string="Horse").parent
heads = [th.string for th in result_head.find_all("th")]
horse_index = heads.index("Horse")
horse_rows = result_head.parent.find_all("tr")[1:]
horses = [horse_pair(row.find_all("td")[horse_index],
year)
for row in horse_rows]
print horses
horse_list += horses
horse_dict = {}
for name, url in horse_list:
horse_dict[name] = url
with open("horselist", 'w') as fi:
pickle.dump(horse_list, fi)
with open("horsedict", 'w') as fi:
pickle.dump(horse_dict, fi)
| {
"content_hash": "cc2ae6d68622ffd8ccccb30f3e06ae22",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 77,
"avg_line_length": 24.25,
"alnum_prop": 0.6050753370340999,
"repo_name": "tjweisman/horseorfris",
"id": "f7bb6a5d27efb95cd6a9a0b78060d63732e102e1",
"size": "1261",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scrape_horse.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3693"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import AbstractUser
class UserProfile(AbstractUser):
name_cn = models.CharField('中文名', max_length=30)
phone = models.CharField('手机', max_length=11, null=True, blank=True)
class Meta:
verbose_name = '用户信息'
verbose_name_plural = verbose_name
def __unicode__(self):
return self.username
| {
"content_hash": "e601010edaef674148934947766e07af",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 72,
"avg_line_length": 26.875,
"alnum_prop": 0.6906976744186046,
"repo_name": "1032231418/python",
"id": "f21f7c91ea6bccd946fb2d8a61e7cc8d66812bff",
"size": "472",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lesson10/apps/dashboard/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3604672"
},
{
"name": "HTML",
"bytes": "15255764"
},
{
"name": "JavaScript",
"bytes": "3123125"
},
{
"name": "Makefile",
"bytes": "9572"
},
{
"name": "PHP",
"bytes": "2830"
},
{
"name": "Python",
"bytes": "213304"
},
{
"name": "Ruby",
"bytes": "20492"
}
],
"symlink_target": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.