repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
SasView/sasview | src/sas/qtgui/GUITests.py | Python | bsd-3-clause | 11,242 | 0.008806 | import os
import unittest
import sys
from PyQt5 import QtGui
from PyQt5 import QtWidgets
"""
Unit tests for the QT GUI
=========================
In order to run the tests, first install SasView and sasmodels to site-packages
by running ``python setup.py install`` in both repositories.
The tests can be run with ``python GUITests.py``, or
``python GUITests.py suiteName1 suiteName2 ...`` for a subset of tests.
To get more verbose console output (recommended), use ``python GUITests.py -v``
"""
# List of all suite names. Every time a new suite is added, its name should
# also be added here
ALL_SUITES = [
'calculatorsSuite',
'mainSuite',
'fittingSuite',
'plottingSuite',
'utilitiesSuite',
'corfuncPerspectiveSuite',
'invariantPerspectiveSuite',
'inversionPerspectiveSuite',
]
# Prepare the general QApplication instance
app = QtWidgets.QApplication(sys.argv)
# Main Window
from MainWindow.UnitTesting import AboutBoxTest
from MainWindow.UnitTesting import DataExplorerTest
from MainWindow.UnitTesting import WelcomePanelTest
from MainWindow.UnitTesting import DroppableDataLoadWidgetTest
from MainWindow.UnitTesting import GuiManagerTest
from MainWindow.UnitTesting import MainWindowTest
## Plotting
from Plotting.UnitTesting import AddTextTest
from Plotting.UnitTesting import PlotHelperTest
from Plotting.UnitTesting import WindowTitleTest
from Plotting.UnitTesting import ScalePropertiesTest
from Plotting.UnitTesting import SetGraphRangeTest
from Plotting.UnitTesting import LinearFitTest
from Plotting.UnitTesting import PlotPropertiesTest
from Plotting.UnitTesting import PlotUtilitiesTest
from Plotting.UnitTesting import ColorMapTest
from Plotting.UnitTesting import BoxSumTest
from Plotting.UnitTesting import SlicerModelTest
from Plotting.UnitTesting import SlicerParametersTest
from Plotting.UnitTesting import PlotterBaseTest
from Plotting.UnitTesting import PlotterTest
from Plotting.UnitTesting import Plotter2DTest
from Plotting.UnitTesting import QRangeSliderTests
# Calculators
from Calculators.UnitTesting import KiessigCalculatorTest
from Calculators.UnitTesting import DensityCalculatorTest
from Calculators.UnitTesting import GenericScatteringCalculatorTest
from Calculators.UnitTesting import SLDCalculatorTest
from Calculators.UnitTesting import SlitSizeCalculatorTest
from Calculators.UnitTesting import ResolutionCalculatorPanelTest
from Calculators.UnitTesting import DataOperationUtilityTest
# Utilities
from Utilities.UnitTesting import GuiUtilsTest
from Utilities.UnitTesting import SasviewLoggerTest
from Utilities.UnitTesting import GridPanelTest
from Utilities.UnitTesting import ModelEditorTest
from Utilities.UnitTesting import PluginDefinitionTest
from Utilities.UnitTesting import TabbedModelEditorTest
from Utilities.UnitTesting import AddMultEditorTest
from Utilities.UnitTesting import ReportDialogTest
from Utilities.UnitTesting import FileConverterTest
# Unit Testing
from UnitTesting import TestUtilsTest
# Perspectives
# Fitting
from Perspectives.Fitting.UnitTesting import FittingWidgetTest
from Perspectives.Fitting.UnitTesting import FittingPerspectiveTest
from Perspectives.Fitting.UnitTesting import FittingLogicTest
from Perspectives.Fitting.UnitTesting import FittingUtilitiesTest
from Perspectives.Fitting.UnitTesting import FitPageTest
from Perspectives.Fitting.UnitTesting import FittingOptionsTest
from Perspectives.Fitting.UnitTesting import MultiConstraintTest
from Perspectives.Fitting.UnitTesting import ComplexConstraintTest
from Perspectives.Fitting.UnitTesting import ConstraintWidgetTest
# Invariant
from Perspectives.Invariant.UnitTesting import InvariantPerspectiveTest
from Perspectives.Invariant.UnitTesting import InvariantDetailsTest
# Inversion
from Perspectives.Inversion.UnitTesting import InversionPerspectiveTest
# Corfunc
from Perspectives.Corfunc.UnitTesting import CorfuncTest
def plottingSuite():
suites = (
# Plotting
unittest.makeSuite(Plotter2DTest.Plotter2DTest, 'test'),
unittest.makeSuite(PlotHelperTest.PlotHelperTest, 'test'),
unittest.makeSuite(AddTextTest.AddTextTest, 'test'),
unittest.makeSuite(WindowTitleTest.WindowTitleTest, 'test'),
| unittest.makeSuite(ScalePropertiesTest.ScalePropertiesTest, 'test'),
unittest.makeSuite(SetG | raphRangeTest.SetGraphRangeTest, 'test'),
unittest.makeSuite(LinearFitTest.LinearFitTest, 'test'),
unittest.makeSuite(PlotPropertiesTest.PlotPropertiesTest, 'test'),
unittest.makeSuite(PlotUtilitiesTest.PlotUtilitiesTest, 'test'),
unittest.makeSuite(ColorMapTest.ColorMapTest, 'test'),
unittest.makeSuite(BoxSumTest.BoxSumTest, 'test'),
unittest.makeSuite(SlicerModelTest.SlicerModelTest, 'test'),
unittest.makeSuite(SlicerParametersTest.SlicerParametersTest, 'test'),
unittest.makeSuite(PlotterBaseTest.PlotterBaseTest, 'test'),
unittest.makeSuite(PlotterTest.PlotterTest, 'test'),
unittest.makeSuite(QRangeSliderTests.QRangeSlidersTest, 'test'),
)
return unittest.TestSuite(suites)
def mainSuite():
suites = (
# Main window
unittest.makeSuite(DataExplorerTest.DataExplorerTest, 'test'),
unittest.makeSuite(DroppableDataLoadWidgetTest.DroppableDataLoadWidgetTest, 'test'),
unittest.makeSuite(MainWindowTest.MainWindowTest, 'test'),
unittest.makeSuite(GuiManagerTest.GuiManagerTest, 'test'),
unittest.makeSuite(AboutBoxTest.AboutBoxTest, 'test'),
unittest.makeSuite(WelcomePanelTest.WelcomePanelTest, 'test'),
)
return unittest.TestSuite(suites)
def utilitiesSuite():
suites = (
## Utilities
unittest.makeSuite(TestUtilsTest.TestUtilsTest, 'test'),
unittest.makeSuite(SasviewLoggerTest.SasviewLoggerTest, 'test'),
unittest.makeSuite(GuiUtilsTest.GuiUtilsTest, 'test'),
unittest.makeSuite(GuiUtilsTest.DoubleValidatorTest, 'test'),
unittest.makeSuite(GuiUtilsTest.HashableStandardItemTest, 'test'),
unittest.makeSuite(GridPanelTest.BatchOutputPanelTest, 'test'),
unittest.makeSuite(ModelEditorTest.ModelEditorTest, 'test'),
unittest.makeSuite(PluginDefinitionTest.PluginDefinitionTest, 'test'),
unittest.makeSuite(TabbedModelEditorTest.TabbedModelEditorTest,'test'),
unittest.makeSuite(AddMultEditorTest.AddMultEditorTest, 'test'),
unittest.makeSuite(ReportDialogTest.ReportDialogTest, 'test'),
unittest.makeSuite(FileConverterTest.FileConverterTest, 'test'),
)
return unittest.TestSuite(suites)
def calculatorsSuite():
suites = (
# Calculators
unittest.makeSuite(KiessigCalculatorTest.KiessigCalculatorTest, 'test'),
unittest.makeSuite(DensityCalculatorTest.DensityCalculatorTest, 'test'),
unittest.makeSuite(GenericScatteringCalculatorTest.GenericScatteringCalculatorTest, 'test'),
unittest.makeSuite(SLDCalculatorTest.SLDCalculatorTest, 'test'),
unittest.makeSuite(SlitSizeCalculatorTest.SlitSizeCalculatorTest, 'test'),
unittest.makeSuite(ResolutionCalculatorPanelTest.ResolutionCalculatorPanelTest, 'test'),
unittest.makeSuite(DataOperationUtilityTest.DataOperationUtilityTest, 'test'),
)
return unittest.TestSuite(suites)
def fittingSuite():
suites = (
# Perspectives
# Fitting
unittest.makeSuite(FittingPerspectiveTest.FittingPerspectiveTest, 'test'),
unittest.makeSuite(FittingWidgetTest.FittingWidgetTest, 'test'),
unittest.makeSuite(FittingLogicTest.FittingLogicTest, 'test'),
unittest.makeSuite(FittingUtilitiesTest.FittingUtilitiesTest, 'test'),
unittest.makeSuite(FitPageTest.FitPageTest, 'test'),
unittest.makeSuite(FittingOptionsTest.FittingOptionsTest, 'test'), |
angelapper/edx-platform | cms/djangoapps/contentstore/management/commands/delete_course.py | Python | agpl-3.0 | 2,796 | 0.005007 | """
Command for deleting courses
Arguments:
arg1 (str): Course key of the course to delete
Returns:
none
"""
from django.core.management.base import BaseCommand, CommandError
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from contentstore.utils import delete_cou | rse
from xmodule.modulestore import ModuleStoreEnum
from xmodule.modulestore.django import modulestore
from .prompt import query_yes_no
class Command(BaseCom | mand):
"""
Delete a MongoDB backed course
Example usage:
$ ./manage.py cms delete_course 'course-v1:edX+DemoX+Demo_Course' --settings=devstack
$ ./manage.py cms delete_course 'course-v1:edX+DemoX+Demo_Course' --keep-instructors --settings=devstack
Note:
keep-instructors option is added in effort to delete duplicate courses safely.
There happens to be courses with difference of casing in ids, for example
course-v1:DartmouthX+DART.ENGL.01.X+2016_T1 is a duplicate of course-v1:DartmouthX+DART.ENGL.01.x+2016_T1
(Note the differene in 'x' of course number). These two are independent courses in MongoDB.
Current MYSQL setup is case-insensitive which essentially means there are not
seperate entries (in all course related mysql tables, but here we are concerned about accesses)
for duplicate courses.
This option will make us able to delete course (duplicate one) from
mongo while perserving course's related access data in mysql.
"""
help = '''Delete a MongoDB backed course'''
def add_arguments(self, parser):
"""
Add arguments to the command parser.
"""
parser.add_argument('course_key', help="ID of the course to delete.")
parser.add_argument(
'--keep-instructors',
action='store_true',
default=False,
help='Do not remove permissions of users and groups for course',
)
def handle(self, *args, **options):
try:
course_key = CourseKey.from_string(options['course_key'])
except InvalidKeyError:
raise CommandError("Invalid course_key: '%s'." % options['course_key'])
if not modulestore().get_course(course_key):
raise CommandError("Course with '%s' key not found." % options['course_key'])
print 'Going to delete the %s course from DB....' % options['course_key']
if query_yes_no("Deleting course {0}. Confirm?".format(course_key), default="no"):
if query_yes_no("Are you sure. This action cannot be undone!", default="no"):
delete_course(course_key, ModuleStoreEnum.UserID.mgmt_command, options['keep_instructors'])
print "Deleted course {}".format(course_key)
|
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/github/test_CVE_2017_17724.py | Python | gpl-3.0 | 654 | 0 | # -*- coding: utf-8 -*-
import system_tests
class TestFuzzedPoC(metaclass=system_tests.CaseMeta):
url = [
"https://github.com/Exiv2/exiv2/issues/210",
"https://github.com/Exiv2/exiv2/issues/209"
]
filename = system_test | s.path("$data_path/2018-01-09-exiv2-crash-002.tiff")
commands = [
"$e | xiv2 -pR $filename",
"$exiv2 -pS $filename",
"$exiv2 $filename"
]
retval = [1, 1, 0]
compare_stderr = system_tests.check_no_ASAN_UBSAN_errors
def compare_stdout(self, i, command, got_stdout, expected_stdout):
""" We don't care about the stdout, just don't crash """
pass
|
jedevc/EuPy | examples/hello.py | Python | mit | 736 | 0.005435 | import euphoria as eu
class HiBot(eu.ping_room.PingRoom, eu.standard_room.StandardRoo | m):
def __init__(self, roomname, password=None):
super().__init__(roomname, password, at | tempts=2)
self.nickname = "HiBot"
self.short_help_text = "Say hello to @HiBot!"
self.help_text = self.short_help_text + 2 * '\n' + "Just a quick demo bot."
def handle_chat(self, message):
if "hi" in message["content"].lower():
self.send_chat("Hi there!", message["id"])
def ready(self):
self.send_chat("/me Hello!")
def cleanup(self):
self.send_chat("/me Goodbye...")
def main():
hi = HiBot("testing")
eu.executable.start(hi)
if __name__ == "__main__":
main()
|
Xoristzatziki/Sample-App | _lib/forconfig.py | Python | lgpl-3.0 | 1,587 | 0.016383 | #!/usr/bin/python3
#Copyright Xoristzatziki
import configparser
import os,sys
class MyConfigs():
def __init__(self,filename):
self.myconfigfilename = filename
def readconfigvalue(self, wichsection, wichoption, default):
try:
cp = configparser.ConfigParser()
cp.read(self.myconfigfilename)
return cp.get(wichsection,wichoption)
except configparser.NoSectionError:
return default
except:#oops...
print("Exception: ", str(sys.exc_info()) )
return default
def writeconfigvalue(self, whichsection, whichoption, whichvalue):
b = os.path.split(self.myconfigfilename)
b = os.path.join(b[:-1])
if not os.path.isdir(b[0]):
os.makedirs(b[0])#self.myconfigfilename)
cp = configparser.ConfigParser()
cp.read(self.myconfigfilename)
#print dir(cp)
if cp.has_section(whichsection):
pass
else:
cp.add_section(whichsection)
cp.set(whichsection, whichoption, whichvalue)
#cp.set('main','width',whichvalue)
| with open(self.myconfigfilename, 'w') as f:
cp.write(f)
#print self.myconfigfilename
if __name__ == "__main__":
#print pygtk
# get the real location of this launcher file ( | not the link location)
#realfile = os.path.realpath(__file__)
inifile = os.path.join(os.path.expanduser('~'),'OCPany.conf')
test = MyConfigs(inifile)
test.writeconfigvalue('somesection',someparameter','avalue')
|
nibrahim/PlasTeX | unittests/FunctionalTests.py | Python | mit | 4,122 | 0.009461 | #!/usr/bin/env python
import unittest, re, os, tempfile, shutil, glob, difflib, subprocess
from unittest import TestCase
class Process(object):
""" Simple subprocess wrapper """
def __init__(self, *args, **kwargs):
if 'stdin' not in kwargs:
kwargs['stdin'] = subprocess.PIPE
if 'stdout' not in kwargs:
kwargs['stdout'] = subprocess.PIPE |
if 'stderr' not in kwargs:
kwargs['stderr'] = subprocess.STDOUT
self.process = subprocess.Popen(args, **kwargs)
self.log = self.process.stdout.read()
self.ret | urncode = self.process.returncode
self.process.stdout.close()
self.process.stdin.close()
class Benched(TestCase):
""" Compile LaTeX file and compare to benchmark file """
filename = None
def runTest(self):
if not self.filename:
return
src = self.filename
root = os.path.dirname(os.path.dirname(src))
# Create temp dir and files
outdir = tempfile.mkdtemp()
texfile = os.path.join(outdir, os.path.basename(src))
shutil.copyfile(src, texfile)
# Run preprocessing commands
for line in open(src):
if line.startswith('%*'):
command = line[2:].strip()
p = Process(cwd=outdir, *command.split())
if p.returncode:
raise OSError, 'Preprocessing command exited abnormally with return code %s: %s' % (command, p.log)
elif line.startswith('%#'):
filename = line[2:].strip()
shutil.copyfile(os.path.join(root,'extras',filename),
os.path.join(outdir,filename))
elif line.startswith('%'):
continue
elif not line.strip():
continue
else:
break
# Run plastex
outfile = os.path.join(outdir, os.path.splitext(os.path.basename(src))[0]+'.html')
p = Process('plastex','--split-level=0','--no-theme-extras',
'--dir=%s' % outdir,'--theme=minimal',
'--filename=%s' % os.path.basename(outfile), os.path.basename(src),
cwd=outdir)
if p.returncode:
shutil.rmtree(outdir, ignore_errors=True)
raise OSError, 'plastex failed with code %s: %s' % (p.returncode, p.log)
# Read output file
output = open(outfile)
# Get name of output file / benchmark file
benchfile = os.path.join(root,'benchmarks',os.path.basename(outfile))
if os.path.isfile(benchfile):
bench = open(benchfile).readlines()
output = output.readlines()
else:
try: os.makedirs(os.path.join(root,'new'))
except: pass
newfile = os.path.join(root,'new',os.path.basename(outfile))
open(newfile,'w').write(output.read())
shutil.rmtree(outdir, ignore_errors=True)
raise OSError, 'No benchmark file: %s' % benchfile
# Compare files
diff = ''.join(list(difflib.unified_diff(bench, output))).strip()
if diff:
shutil.rmtree(outdir, ignore_errors=True)
try: os.makedirs(os.path.join(root,'new'))
except: pass
newfile = os.path.join(root,'new',os.path.basename(outfile))
open(newfile,'w').writelines(output)
assert not(diff), 'Differences were found: %s' % diff
# Clean up
shutil.rmtree(outdir, ignore_errors=True)
def testSuite():
""" Locate all .tex files and create a test suite from them """
suite = unittest.TestSuite()
for root, dirs, files in os.walk('.'):
for f in files:
if os.path.splitext(f)[-1] != '.tex':
continue
test = Benched()
test.filename = os.path.abspath(os.path.join(root, f))
suite.addTest(test)
return suite
def test():
""" Execute test suite """
unittest.TextTestRunner().run(testSuite())
if __name__ == '__main__':
test()
|
ioparaskev/cenotes | manage.py | Python | gpl-3.0 | 3,014 | 0.000995 | import json
from datetime import date
from urllib.parse import unquote
import functools
from flask_migrate import MigrateCommand
from flask_script import Manager
from cenotes_lib.crypto import get_supported_algorithm_options
from cenotes import create_app
from cenotes.api import craft_response, CENParams
from cenotes.models import Note
manager = Manager(create_app)
manager.add_option("-c", "--config", dest="app_settings", required=False)
manager.add_command('db', MigrateCommand)
def show_json_request_format(indent=False):
with create_app().app_context():
func = functools.partial(json.dumps, indent=4) if indent else json.dumps
return func(CENParams(plaintext="", key="",
expiration_date=date.today().isoformat(),
visits_count=0, max_visits=0).__dict__)
def show_json_response_format(indent=False):
func = functools.partial(json.dumps, indent=4) if indent else json.dumps
return func(craft_response(
error="", success=True, plaintext="", key=" | ", dkey="",
payload="ciphertext",
enote=Note(payload="ciphertext", expirat | ion_date=date.today())))
def list_url_endpoints():
output = []
for rule in manager.app.url_map.iter_rules():
if rule.endpoint == "static":
continue
options = {}
for arg in rule.arguments:
options[arg] = "[{0}]".format(arg)
methods = ','.join(rule.methods)
line = unquote("{:50s} {:20s} {}".format(rule.endpoint, methods, rule))
output.append(line)
return output
@manager.option('--response', dest="response", action="store_true", default=False)
@manager.option('--request', dest="request", action="store_true", default=False)
@manager.option('--both', dest="both", action="store_true", default=False)
def api(response, request, both):
def response_format():
print("Response format is always like this:\n"
"(Some fields may be left empty depending on endpoint call)")
print(show_json_response_format(indent=True))
print("")
def request_format():
print("Request format is always like this:\n"
"(Some fields may be left empty depending on endpoint call)")
print(show_json_request_format(indent=True))
print("")
if both:
response = request = True
if response:
response_format()
if request:
request_format()
@manager.command
def routes():
for line in sorted(list_url_endpoints()):
print(line)
@manager.option("--keygen", dest="enc", action="store_true", default=False)
def settings(enc):
def craft_algo_params_format():
return {algo: {"hardness": hardness}
for algo, hardness in tuple(get_supported_algorithm_options())}
if enc:
print("Valid algorithm/hardness settings for your device are:\n"
"{params}".format(params=craft_algo_params_format()))
if __name__ == '__main__':
manager.run()
|
minghuascode/pyj | examples/funnysortedgridthing/SortedGridThing.py | Python | apache-2.0 | 2,659 | 0.00188 | import pyjd # this is dummy in pyjs.
from pyjamas.ui.RootPanel import RootPanel
from pyjamas.ui.Button import Button
from pyjamas.ui.HTML import HTML
from pyjamas.ui.HorizontalPanel import HorizontalPanel
from pyjamas.ui.VerticalPanel import VerticalPanel
from pyjamas.ui.DockPanel import DockPanel
from pyjamas.ui.ScrollPanel import ScrollPanel
from pyjamas.ui.Grid import Grid
from pyjamas.ui.DisclosurePanel import DisclosurePanel
class OddGridWidget(DockPanel):
def __init__(self, **kwargs):
DockPanel.__init__(self, **kwargs)
self.grid = Grid(StyleName="datagrid")
self.sp = ScrollPanel(self.grid, Width="100%", Height="100%")
self.header = Grid(Height="50px")
self.add(self.header, DockPanel.NORTH)
self.add(self.sp, DockPanel.CENTER)
cf = self.setCellHeight(self.header, "50px")
cf = self.setCellHeight(self.sp, "100%")
self.sortcol = 0
def setData(self, data):
self.data = data
self.redraw()
def sortfn(self, row1, row2):
return cmp(row1[self.sortcol], row2[self.sortcol])
def redraw(self):
self.data.sort(self.sortfn)
rows = len(self.data)
cols = 0
if rows > 0:
cols = len(self.data[0])
self.grid.resize(rows, cols)
self.header.resize(1, cols)
cf = self.grid.getCellFormatter()
for (nrow, row) in enumerate(self.data):
for (ncol, item) in | enumerate(row):
self.grid.setHTML(nrow, ncol, str(item))
cf.setWidth(nrow, ncol, "200px")
cf = self.header.getCellFormatter()
self.sortbuttons = []
for ncol in range(cols):
sb = Button("s | ort col %d" % ncol)
sb.addClickListener(self)
self.header.setWidget(0, ncol, sb)
cf.setWidth(0, ncol, "200px")
self.sortbuttons.append(sb)
def onClick(self, sender):
for (ncol, b) in enumerate(self.sortbuttons):
if sender == b:
self.sortcol = ncol
self.redraw()
data = [["hello", "fred", 52],
["bye", "joe", 98],
["greetings", "alien", 0],
["sayonara", "jun", 1],
["gutentaag", "volker", 2],
["bonjour", "francois", 5],
["au reservoir", "fabrice", 8],
["go away", "mary", 73]
]
if __name__ == '__main__':
pyjd.setup("public/SortedGridThing.html")
ogw = OddGridWidget(Width="600px", Height="200px", StyleName="ogw")
ogw.setData(data)
dp = DisclosurePanel("Click to disclose / hide", True, Width="602px")
dp.add(ogw)
RootPanel().add(dp)
pyjd.run()
|
robmoggach/django-token-auth | src/token_auth/test_settings.py | Python | bsd-3-clause | 91 | 0.010989 | from settings im | port *
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = '/tmp/token_a | uth.db'
|
facebookresearch/ParlAI | parlai/tasks/dbll_movie/build.py | Python | mit | 756 | 0 | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
# Download and build the data if it does not exist.
from parlai.core.build_data import DownloadableFi | le
import parlai.tasks.dbll_babi.build as dbll_babi_build
import parlai.tasks.wikimovies.build as wikimovies_build
RESOURCES = [
DownloadableFile(
'http://parl.ai/downloads/dbll/dbll.tgz',
'dbll.tgz',
'd8c727dac498b652c7f5de6f72155dce711ff46c88401a303399d3fad4db1e68',
)
]
def build(opt):
# Depends upon another dataset, wikimovies, build that first.
wikimovies_build.build(opt)
| dbll_babi_build.build(opt)
|
dineshrajpurohit/ds_al | queue.py | Python | mit | 1,234 | 0.00081 | """Queue implementation using Linked list."""
from __future__ import print_function
from linked_list import Linked_List, Node
class Queue(Linked_List):
def __init__(self):
Linked_List.__init__(self)
def enqueue(self, item):
node = Node(item)
self.insert_front(node)
def dequeue(self):
return self.remove_end()
def is_empty(self):
return self.count == 0
def size(self):
return self.count
def test_queue():
print("\n\n QUEUE TESTING")
queue = Queue()
print("Queue Empty?", queue.is_empty())
queue.enqueue("A")
queue.enqueue | ("B")
queue.enqueue("C")
print("Queue Size:", queue.size())
print(queue)
print("DEQU | EUEING 1")
print(queue.dequeue())
print(queue)
print("Queue Size:", queue.size())
print("DEQUEUEING 2")
print(queue.dequeue())
print(queue)
print("Queue Size:", queue.size())
print("Queue Empty?", queue.is_empty())
print("DEQUEUEING 3")
print(queue.dequeue())
print(queue)
print("Queue Size:", queue.size())
print("Queue Empty?", queue.is_empty())
print(queue)
print("DEQUEUEING 4")
print(queue.dequeue())
if __name__ == '__main__':
test_queue()
|
google/loaner | loaner/web_app/backend/testing/loanertest_test.py | Python | apache-2.0 | 2,782 | 0.005392 | # Copyright 2018 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for backend.testing.loanertest."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import mock
import endpoints
from loaner.web_app.backend.lib import action_loader # pylint: disable=unused-import
from loaner.web_app.backend.testing import loanertest
class EndpointsTestCaseTest(loanertest.EndpointsTestCase):
"""Test the test loanertest EndpointsTestCase methods."""
def test_successful_login_endpoints_user(self):
"""Test the successful login of an endpoints current user."""
self.assertFalse(endpoints.get_current_user())
self.login_endpoints_user()
self.assertTrue(endpoints.get_current_user())
self.assertEqual(
endpoints.get_current_user().email(), loanertest.USER_EMAIL)
class ActionTestCaseTest(loanertest.ActionTestCase):
"""Test a successful use of the TestCase."""
@mock.patch('__main__.action_loader.load_actions')
def setUp(self, mock_importactions):
self.testing_action = 'action_sample'
self.fake_action = 'fake_action'
mock_importactions.return_value = {
'sy | nc': {'action_sample': self.fake_action}}
super(ActionTestCaseTest, self).setUp()
def test_success(self):
self.assertEqual(self.action, self.fake_action)
class ActionTestCaseTestNoTestingAction(loanertest.ActionTestCase):
"""Test what happens when you forget to specify self.testing_action."""
def setUp(self):
pass
def test_fail(self):
self.assertRaisesRegexp(
E | nvironmentError, '.*Create a TestCase setUp .* variable named.*',
super(ActionTestCaseTestNoTestingAction, self).setUp)
class ActionTestCaseTestNoActions(loanertest.ActionTestCase):
"""Test what happens when there are no matching action modules available."""
def setUp(self):
pass
@mock.patch('__main__.action_loader.load_actions')
def test_fail(self, mock_importactions):
self.testing_action = 'action_sample'
mock_importactions.return_value = {}
self.assertRaisesRegexp(
EnvironmentError, '.*must import at least one.*',
super(ActionTestCaseTestNoActions, self).setUp)
if __name__ == '__main__':
loanertest.main()
|
walidsa3d/shaman | shaman/providers/api.py | Python | mit | 951 | 0 | from allocine import allocine
from constants import *
from elcinema imp | ort elcinema
from imdb import imdby as Imdb
from rotten import rot | ten
from tmdb import tmdb
def search(query, site):
if site == "imdb":
provider = Imdb()
elif site == "elcinema":
provider = elcinema()
elif site == "rottentomatoes":
provider = rotten(rotten_key)
elif site == "themoviedatabase":
provider = tmdb(tmdb_key)
elif site == "allocine":
provider = allocine()
results = provider.search(query)
return results
def info(movie_id, site):
if site == "imdb":
provider = Imdb()
elif site == "elcinema":
provider = elcinema()
elif site == "rottentomatoes":
provider = rotten(rotten_key)
elif site == "themoviedatabase":
provider = tmdb(tmdb_key)
elif site == "allocine":
provider = allocine()
result = provider.info(movie_id)
return result
|
dsanders11/django-newsletter | test_project/test_project/settings.py | Python | agpl-3.0 | 2,081 | 0.000961 | import os
test_dir = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlit | e3',
'NAME': os.pat | h.join(test_dir, 'db.sqlite3'),
}
}
INSTALLED_APPS = [
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.auth',
'django.contrib.messages',
'django.contrib.admin',
'django.contrib.sites',
'django.contrib.staticfiles',
'imperavi',
'tinymce',
'newsletter'
]
# Imperavi is not compatible with Django 1.9+
import django
if django.VERSION > (1, 8):
INSTALLED_APPS.remove('imperavi')
MIDDLEWARE = [
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
]
ROOT_URLCONF = 'test_project.urls'
FIXTURE_DIRS = [os.path.join(test_dir, 'fixtures'), ]
SITE_ID = 1
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'APP_DIRS': True,
'DIRS': [os.path.join(test_dir, 'templates')],
'OPTIONS': {
'context_processors': [
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
# Enable time-zone support
USE_TZ = True
TIME_ZONE = 'UTC'
# Required for django-webtest to work
STATIC_URL = '/static/'
# Random secret key
import random
key_chars = 'abcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*(-_=+)'
SECRET_KEY = ''.join([
random.SystemRandom().choice(key_chars) for i in range(50)
])
# Logs all newsletter app messages to the console
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'class': 'logging.StreamHandler',
},
},
'loggers': {
'newsletter': {
'handlers': ['console'],
'propagate': True,
},
},
}
DEFAULT_AUTO_FIELD = "django.db.models.AutoField"
|
68foxboris/enigma2-openpli-vuplus | lib/python/Components/About.py | Python | gpl-2.0 | 4,073 | 0.035372 | # -*- coding: utf-8 -*-
import sys, os, time
from Tools.HardwareInfo import HardwareInfo
def getVersionString():
return getImageVersionString()
def getImageVersionString():
try:
if os.path.isfile('/var/lib/opkg/status'):
st = os.stat('/var/lib/opkg/status')
else:
st = os.stat('/usr/lib/ipkg/status')
tm = time.localtime(st.st_mtime)
if tm.tm_year >= 2011:
return time.strftime("%Y-%m-%d %H:%M:%S", tm)
except:
pass
return _("unavailable")
def getFlashDateString():
try:
return time.strftime(_("%Y-%m-%d %H:%M"), time.strptime(open("/etc/version").read().strip(), '%Y%m%d%H%M'))
except:
return _("unknown")
def getEnigmaVersionString():
import enigma
enigma_version = enigma.getEnigmaVersionString()
if '-(no branch)' in enigma_version:
enigma_version = enigma_version [:-12]
return enigma_version
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
return open("/proc/version","r").read().split(' ', 4)[2].split('-',2)[0]
except:
return _("unknown")
def getHardwareTypeString():
return HardwareInfo().get_device_string()
def getImageTypeString():
try:
return "Taapat based on " + open("/etc/issue").readlines()[-2].capitalize().strip()[:-6]
except:
return _("undefined")
def getCPUInfoString():
try:
cpu_count = 0
cpu_speed = 0
temperature = None
for line in open("/proc/cpuinfo").readlines():
line = [x.strip() for x in line.strip().split(":")]
if line[0] in ("system type", "model name"):
processor = line[1].split()[0]
elif line[0] == "cpu MHz":
cpu_speed = "%1.0f" % float(line[1])
elif line[0] == "processor":
cpu_count += 1
if not cpu_speed:
try:
cpu_speed = int(open("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq").read()) / 1000
except:
try:
import binascii
cpu_speed = int(int(binascii.hexlify(open('/sys/firmware/devicetree/base/cpus/cpu@0/clock-frequency', 'rb').read()), 16) / 100000000) * 100
except:
cpu_speed = "-"
if os.path.isfile('/proc/stb/fp/temp_sensor_avs'):
temperature = open("/proc/stb/fp/temp_sensor_avs").readline().replace('\n','')
if os.path.isfile("/sys/devices/virtual/thermal/thermal_zone0/temp"):
try:
temperature = int(open("/sys/devices/virtual/thermal/thermal_zone0/temp").read().strip())/1000
except:
pass
if temperature:
return "%s %s MHz (%s) %s�C" % (processor, cpu_speed, ngettext("%d core", "%d cores", cpu_count) % cpu_count, temperature)
return "%s %s MHz (%s)" % (processor, cpu_speed, ngettext("%d core", "%d cores", cpu_count) % cpu_count)
except:
return _("undefined")
def getDriverInstalledDate():
try:
from glob import g | lob
driver = [x.split("-")[1][:8] for x in open(glob("/var/lib/opkg/info/vuplus-dvb-*.control")[0], "r") if x.startswith("Version:")][0]
return "%s-%s-%s" % (driver[:4], driver[4:6], driver[6:])
except:
return _("unknown")
def getPythonVersionString():
try:
import commands
status, output = comm | ands.getstatusoutput("python -V")
return output.split(' ')[1]
except:
return _("unknown")
def GetIPsFromNetworkInterfaces():
import socket, fcntl, struct, array, sys
is_64bits = sys.maxsize > 2**32
struct_size = 40 if is_64bits else 32
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
max_possible = 8 # initial value
while True:
_bytes = max_possible * struct_size
names = array.array('B')
for i in range(0, _bytes):
names.append(0)
outbytes = struct.unpack('iL', fcntl.ioctl(
s.fileno(),
0x8912, # SIOCGIFCONF
struct.pack('iL', _bytes, names.buffer_info()[0])
))[0]
if outbytes == _bytes:
max_possible *= 2
else:
break
namestr = names.tostring()
ifaces = []
for i in range(0, outbytes, struct_size):
iface_name = bytes.decode(namestr[i:i+16]).split('\0', 1)[0].encode('ascii')
if iface_name != 'lo':
iface_addr = socket.inet_ntoa(namestr[i+20:i+24])
ifaces.append((iface_name, iface_addr))
return ifaces
# For modules that do "from About import about"
about = sys.modules[__name__]
|
danakj/chromium | chrome/browser/resources/chromeos/chromevox/tools/publish_webstore_extension.py | Python | bsd-3-clause | 4,983 | 0.009231 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
'''Publishes a set of extensions to the webstore.
Given an unpacked extension, compresses and sends to the Chrome webstore.
Releasing to the webstore should involve the following manual steps before
running this script:
1. clean the output directory.
2. make a release build.
3. run manual smoke tests.
4. run automated tests.
'''
import webstore_extension_util
import generate_manifest
import json
import optparse
import os
import sys
import tempfile
from zipfile import ZipFile
_CHROMEVOX_ID = 'kgejglhpjiefppelpmljglcjbhoiplfn'
_SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
_CHROME_SOURCE_DIR = os.path.normpath(
os.path.join(
_SCRIPT_DIR, *[os.path.pardir] * 6))
sys.path.insert(
0, os.path.join(_CHROME_SOURCE_DIR, 'build', 'util'))
import version
# A list of files (or directories) to exclude from the webstore build.
EXCLUDE_PATHS = [
'manifest.json',
'manifest_guest.json',
]
def CreateOptionParser():
parser = optparse.OptionParser(description=__doc__)
parser.usage = (
'%prog --client_secret <client_secret> extension_id:extension_path ...')
parser.add_option('-c', '--client_secret', dest='client_secret',
action='store', metavar='CLIENT_SECRET')
parser.add_option('-p', '--publish', action='store_true',
help='publish the extension(s)')
return parser
def GetVersion():
'''Returns the chrome version string.'''
filename = os.path.join(_CHROME_SOURCE_DIR, 'chrome' | , 'VERSION')
values = version.fetch_values([filename])
return version | .subst_template('@MAJOR@.@MINOR@.@BUILD@.@PATCH@', values)
def MakeChromeVoxManifest():
'''Create a manifest for the webstore.
Returns:
Temporary file with generated manifest.
'''
new_file = tempfile.NamedTemporaryFile(mode='w+a', bufsize=0)
in_file_name = os.path.join(_SCRIPT_DIR, os.path.pardir,
'manifest.json.jinja2')
context = {
'is_guest_manifest': '0',
'is_js_compressed': '1',
'is_webstore': '1',
'set_version': GetVersion()
}
generate_manifest.processJinjaTemplate(in_file_name, new_file.name, context)
return new_file
def RunInteractivePrompt(client_secret, output_path):
input = ''
while True:
print 'u upload'
print 'g get upload status'
print 't publish trusted tester'
print 'p publish public'
print 'q quit'
input = raw_input('Please select an option: ')
input = input.strip()
if input == 'g':
print ('Upload status: %s' %
webstore_extension_util.GetUploadStatus(client_secret).read())
elif input == 'u':
print ('Uploaded with status: %s' %
webstore_extension_util.PostUpload(output_path.name, client_secret))
elif input == 't':
print ('Published to trusted testers with status: %s' %
webstore_extension_util.PostPublishTrustedTesters(
client_secret).read())
elif input == 'p':
print ('Published to public with status: %s' %
webstore_extension_util.PostPublish(client_secret).read())
elif input == 'q':
sys.exit()
else:
print 'Unrecognized option: %s' % input
def main():
options, args = CreateOptionParser().parse_args()
if len(args) < 1 or not options.client_secret:
print 'Expected at least one argument and --client_secret flag'
print str(args)
sys.exit(1)
client_secret = options.client_secret
for extension in args:
webstore_extension_util.g_app_id, extension_path = extension.split(':')
output_path = tempfile.NamedTemporaryFile()
extension_path = os.path.expanduser(extension_path)
is_chromevox = webstore_extension_util.g_app_id == _CHROMEVOX_ID
with ZipFile(output_path, 'w') as zip:
for root, dirs, files in os.walk(extension_path):
rel_path = os.path.join(os.path.relpath(root, extension_path), '')
if is_chromevox and rel_path in EXCLUDE_PATHS:
continue
for extension_file in files:
if is_chromevox and extension_file in EXCLUDE_PATHS:
continue
zip.write(os.path.join(root, extension_file),
os.path.join(rel_path, extension_file))
if is_chromevox:
manifest_file = MakeChromeVoxManifest()
zip.write(manifest_file.name, 'manifest.json')
print 'Created extension zip file in %s' % output_path.name
print 'Please run manual smoke tests before proceeding.'
if options.publish:
print('Uploading...%s' %
webstore_extension_util.PostUpload(output_path.name, client_secret))
print('publishing...%s' %
webstore_extension_util.PostPublish(client_secret).read())
else:
RunInteractivePrompt(client_secret, output_path)
if __name__ == '__main__':
main()
|
dangillet/cocos | cocos/scenes/pause.py | Python | bsd-3-clause | 3,820 | 0.000524 | # ----------------------------------------------------------------------------
# cocos2d
# Copyright (c) 2008-2012 Daniel Moisset, Ricardo Quesada, Rayentray Tappa,
# Lucio Torre
# Copyright (c) 2009-2015 Richard Jones, Claudio Canepa
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of cocos2d nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
"""Pause scene"""
from __future__ import division, print_function, unicode_literals
__docformat__ = 'restructuredtext'
from cocos.director import director
from cocos.layer import Layer, ColorLayer
from cocos.scene import Scene
import pyglet
from pyglet.gl import *
__pause_scene_generator__ = None
def get_pause_scene():
return __pause_scene_generator__()
def set_pause_scene_generator(generator):
global __pause_scene_generator__
__pause_scene_generator__ = generator
def default_pause_scene():
w, h = director.window.width, director.window.height
texture = pyglet.image.Texture.create_for_size(
GL_TEXTURE_2D, w, h, GL_RGBA)
texture.blit_into(pyglet.image.get_buffer_manager().get_color_buffer(), 0, 0, 0)
return PauseScene(texture.get_region(0, 0, w, h),
ColorLayer(25, 25, 25, 205), PauseLayer())
set_pause_scene_generator(default_pause_scene)
class PauseScene(Scene):
"""Pause Scene"""
def __init__(self, background, *layers):
super(PauseScene, self).__init__(*layers)
self.bg = background
self.width, self.height = director.get_window_size()
def draw(self):
self.bg.blit(0, 0, width=self.width, height=self.height)
super(PauseScene, self).draw()
class PauseLayer(Laye | r):
"""Layer that shows the text 'PAUSED'
"""
is_event_handler = True #: enable pyglet's events
def __init__(self):
super(PauseLayer, self).__init__()
x, y = director.get_window_size()
ft = pyglet.font.load('Arial', 36)
self.text = pyglet.font.Text(ft,
'PAUSED',
halign=pyglet.font.Text.CENTER)
self.text.x = x // 2
self.text.y = y // 2 |
def draw(self):
self.text.draw()
def on_key_press(self, k, m):
if k == pyglet.window.key.P and m & pyglet.window.key.MOD_ACCEL:
director.pop()
return True
|
kylehogan/haas | haas/migrations/versions/89630e3872ec_network_acl.py | Python | apache-2.0 | 2,070 | 0 | """network ACL
Revision ID: 89630e3872ec
Revises: 6a8c19565060
Create Date: 2016-05-06 09:24:26.911562
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '89630e3872ec'
down_revision = '6a8c19565060'
branch_labels = ('haas',)
def upgrade():
op.create_table(
'network_projects',
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('network_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['network_id'], ['network.id'], ),
sa.ForeignKeyConstraint(['project_id'], ['project.id'], )
)
network_projects = sa.sql.table(
'network_projects',
sa.Column('project_id', sa.Integer(), nullable=True),
sa.Column('network_id', sa.Integer(), nullable=True),
)
conn = op.get_bind()
res = conn.execute(
"select id, access_id from network where access_id >= 1")
results = res.fetchall()
networks = [{'network_id': r[0], 'project_id': r[1]} for r in results]
| op.bulk_insert(network_projects, networks)
op.alter_column(u'network', 'creator_id', new_column_name='owner_id')
op.drop_constraint(u'network_access_id_fkey',
| 'network',
type_='foreignkey')
op.drop_column(u'network', 'access_id')
def downgrade():
op.add_column(u'network',
sa.Column('access_id',
sa.INTEGER(),
autoincrement=False,
nullable=True))
op.alter_column(u'network', 'owner_id', new_column_name='creator_id')
op.create_foreign_key(u'network_access_id_fkey', 'network', 'project',
['access_id'], ['id'])
op.drop_constraint(u'network_projects_project_id_fkey',
'network_projects',
type_='foreignkey')
op.drop_constraint(u'network_projects_network_id_fkey',
'network_projects',
type_='foreignkey')
op.drop_table('network_projects')
|
subhrm/google-code-jam-solutions | solutions/helpers/CodeJam-0.3.0/codejam/datastructures/summing_list.py | Python | mit | 1,482 | 0.002024 | cl | ass summing_list:
layers = [[0]]
size = 0
def __init__(self, iter=None):
if iter != None:
| for i in iter:
self.append(i)
def _sum(self, i):
t = 0
for r in self.layers:
if i % 2:
t += r[i - 1]
i >>= 1
return t
def sum_elements(self, i=None, j=None):
if j == None:
if i == None:
i = self.size
return self._sum(i)
else:
return self._sum(max(i, j)) - self._sum(min(i, j))
def __getitem__(self, i):
if i < self.size:
return self.layers[0][i]
else:
raise ValueError()
def __setitem__(self, i, v):
d = v - self.layers[0][i]
for r in self.layers:
r[i] += d
i >>= 1
def _double_size(self):
for r in self.layers:
r += [0] * len(r)
self.layers += [[self.layers[-1][0]]]
def __iadd__(self, iter):
for i in iter:
self.append(i)
return self
def __add__(self, x):
both = summing_list(self)
both += x
return both
def append(self, x):
self.size += 1
if self.size > len(self.layers[0]):
self._double_size()
self[self.size - 1] = x
def __repr__(self):
return self.layers[0][:self.size].__repr__()
def __iter__(self):
return iter(self.layers[0][:self.size])
|
Wopple/GJK | python/test.py | Python | bsd-3-clause | 2,305 | 0.009978 | #!/usr/bin/python
"""
Pygame script to test that the algorithm works.
"""
import sys
import pygame
from pygame.locals import *
import gjk
pygame.init()
SCREEN = pygame.display.set_mode((800, 600))
CLOCK = pygame.time.Clock()
BLACK = ( 0, 0, 0)
WHITE = (255, 255, 255)
BLUE = ( 0, 0, 255)
GREEN = ( 0, 255, 0)
RED | = (255, 0, 0)
def run():
circle1 = ((400, 3 | 00), 100)
poly1 = (
( 00 + 400, 50 + 300),
(-50 + 400, 50 + 300),
(-50 + 400, 0 + 300)
)
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
elif event.type == KEYDOWN:
if event.key == K_ESCAPE:
sys.exit()
elif event.key == K_UP:
pass
elif event.key == K_DOWN:
pass
elif event.key == K_LEFT:
pass
elif event.key == K_RIGHT:
pass
SCREEN.fill(WHITE)
poly2 = makePolyFromMouse()
#collide = gjk.collidePolyPoly(poly2, poly1)
#polygon(poly1)
collide = gjk.collidePolyCircle(poly2, circle1)
circle(circle1)
polygon(poly2, GREEN if collide else RED)
pygame.display.flip()
CLOCK.tick(60)
def makePolyFromMouse():
pos = pygame.mouse.get_pos()
return (
( 50 + pos[0], 50 + pos[1]),
( 50 + pos[0], -15 + pos[1]),
( 40 + pos[0], -30 + pos[1]),
( 20 + pos[0], -50 + pos[1]),
( 0 + pos[0], -50 + pos[1]),
(-60 + pos[0], 0 + pos[1])
)
def pairs(points):
for i, j in enumerate(xrange(-1, len(points) - 1)):
yield (points[i], points[j])
def circles(cs, color=BLACK, camera=(0, 0)):
for c in cs:
circle(c, color, camera)
def circle(c, color=BLACK, camera=(0, 0)):
pygame.draw.circle(SCREEN, color, add(c[0], camera), c[1])
def polygon(points, color=BLACK, camera=(0, 0)):
for a, b in pairs(points):
line(a, b, color, camera)
def line(start, end, color=BLACK, camera=(0, 0)):
pygame.draw.line(SCREEN, color, add(start, camera), add(end, camera))
def add(p1, p2):
return p1[0] + p2[0], p1[1] + p2[1]
if __name__ == '__main__':
run()
|
kapilt/cloud-custodian | tools/c7n_azure/c7n_azure/query.py | Python | apache-2.0 | 13,063 | 0.001225 | # Copyright 2018 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
import six
from c7n_azure import constants
from c7n_azure.actions.logic_app import LogicAppAction
from azure.mgmt.resourcegraph.models import QueryRequest
from c7n_azure.actions.notify import Notify
from c7n_azure.filters import ParentFilter
from c7n_azure.provider import resources
from c7n.actions import ActionRegistry
from c7n.exceptions import PolicyValidationError
from c7n.filters import FilterRegistry
from c7n.manager import ResourceManager
from c7n.query import sources, MaxResourceLimit
from c7n.utils import local_session
log = logging.getLogger('custodian.azure.query')
class ResourceQuery(object):
def __init__(self, session_factory):
self.session_factory = session_factory
def filter(self, resource_manager, **params):
m = resource_manager.resource_type
enum_op, list_op, extra_args = m.enum_spec
if extra_args:
params.update(extra_args)
params.update(m.extra_args(resource_manager))
try:
op = getattr(getattr(resource_manager.get_client(), enum_op), list_op)
result = op(**params)
if isinstance(result, Iterable):
return [r.serialize(True) for r in result]
elif hasattr(result, 'value'):
return [r.serialize(True) for r in result.value]
except Exception as e:
log.error("Failed to query resource.\n"
"Type: azure.{0}.\n"
"Error: {1}".format(resource_manager.type, e))
raise
raise TypeError("Enumerating resources resulted in a return"
"value which could not be iterated.")
@staticmethod
def resolve(resource_type):
if not isinstance(resource_type, type):
raise ValueError(resource_type)
else:
m = resource_type
return m
@sources.register('describe-azure')
class DescribeSource(object):
resource_query_factory = Resour | ceQuery
def __init__(self, manager):
self.manager = manager
self.query = self.resource_quer | y_factory(self.manager.session_factory)
def validate(self):
pass
def get_resources(self, query):
return self.query.filter(self.manager)
def get_permissions(self):
return ()
def augment(self, resources):
return resources
@sources.register('resource-graph')
class ResourceGraphSource(object):
def __init__(self, manager):
self.manager = manager
def validate(self):
if not hasattr(self.manager.resource_type, 'resource_type'):
raise PolicyValidationError(
"%s is not supported with the Azure Resource Graph source."
% self.manager.data['resource'])
def get_resources(self, _):
log.warning('The Azure Resource Graph source '
'should not be used in production scenarios at this time.')
session = self.manager.get_session()
client = session.client('azure.mgmt.resourcegraph.ResourceGraphClient')
# empty scope will return all resource
query_scope = ""
if self.manager.resource_type.resource_type != 'armresource':
query_scope = "where type =~ '%s'" % self.manager.resource_type.resource_type
query = QueryRequest(
query=query_scope,
subscriptions=[session.get_subscription_id()]
)
res = client.resources(query)
cols = [c['name'] for c in res.data['columns']]
data = [dict(zip(cols, r)) for r in res.data['rows']]
return data
def get_permissions(self):
return ()
def augment(self, resources):
return resources
class ChildResourceQuery(ResourceQuery):
"""A resource query for resources that must be queried with parent information.
Several resource types can only be queried in the context of their
parents identifiers. ie. SQL and Cosmos databases
"""
def filter(self, resource_manager, **params):
"""Query a set of resources."""
m = self.resolve(resource_manager.resource_type) # type: ChildTypeInfo
parents = resource_manager.get_parent_manager()
# Have to query separately for each parent's children.
results = []
for parent in parents.resources():
try:
subset = resource_manager.enumerate_resources(parent, m, **params)
if subset:
# If required, append parent resource ID to all child resources
if m.annotate_parent:
for r in subset:
r[m.parent_key] = parent[parents.resource_type.id]
results.extend(subset)
except Exception as e:
log.warning('Child enumeration failed for {0}. {1}'
.format(parent[parents.resource_type.id], e))
if m.raise_on_exception:
raise e
return results
@sources.register('describe-child-azure')
class ChildDescribeSource(DescribeSource):
resource_query_factory = ChildResourceQuery
class TypeMeta(type):
def __repr__(cls):
return "<Type info service:%s client: %s>" % (
cls.service,
cls.client)
@six.add_metaclass(TypeMeta)
class TypeInfo(object):
doc_groups = None
"""api client construction information"""
service = ''
client = ''
# Default id field, resources should override if different (used for meta filters, report etc)
id = 'id'
resource = constants.RESOURCE_ACTIVE_DIRECTORY
@classmethod
def extra_args(cls, resource_manager):
return {}
@six.add_metaclass(TypeMeta)
class ChildTypeInfo(TypeInfo):
"""api client construction information for child resources"""
parent_manager_name = ''
annotate_parent = True
raise_on_exception = True
parent_key = 'c7n:parent-id'
@classmethod
def extra_args(cls, parent_resource):
return {}
class QueryMeta(type):
"""metaclass to have consistent action/filter registry for new resources."""
def __new__(cls, name, parents, attrs):
if 'filter_registry' not in attrs:
attrs['filter_registry'] = FilterRegistry(
'%s.filters' % name.lower())
if 'action_registry' not in attrs:
attrs['action_registry'] = ActionRegistry(
'%s.actions' % name.lower())
return super(QueryMeta, cls).__new__(cls, name, parents, attrs)
@six.add_metaclass(QueryMeta)
class QueryResourceManager(ResourceManager):
class resource_type(TypeInfo):
pass
def __init__(self, data, options):
super(QueryResourceManager, self).__init__(data, options)
self.source = self.get_source(self.source_type)
self._session = None
def augment(self, resources):
return resources
def get_permissions(self):
return ()
def get_source(self, source_type):
return sources.get(source_type)(self)
def get_session(self):
if self._session is None:
self._session = local_session(self.session_factory)
return self._session
def get_client(self, service=None):
if not service:
return self.get_session().client(
"%s.%s" % (self.resource_type.service, self.resource_type.client))
return self.get_session().client(service)
def get_cache_key(self, query):
re |
0359xiaodong/viewfinder | backend/www/admin/metrics.py | Python | apache-2.0 | 4,540 | 0.012115 | # Copyright 2013 Viewfinder Inc. All Rights Reserved.
"""Handlers for database administration.
MetricsHandler: main handler for detailed metrics. We don't use ajax-y tables, so there is no data handler.
"""
from tornado.escape import url_escape
__author__ = 'marc@emailscrubbed.com (Marc Berhault)'
import base64
import json
import logging
import re
import time
from collections import Counter, defaultdict
from tornado import auth, gen, template
from viewfinder.backend.base import constants, handler, util
from viewfinder.bac | kend.base.dotdict import DotDict
from viewfinder.backend.db import db_client, metric, schema, vf_schema
from viewfinder.backend.www.admin import admin, formatters, data_table
kDefaultMetricName = 'itunes.downloads'
class MetricsHandler(admin.AdminHandler):
"""Provides a list of all datastore tables and allows each to be
drilled down.
"""
@handler.authenticated()
@handler.asynchronous( | datastore=True)
@admin.require_permission(level='support')
@gen.engine
def get(self):
metric_name = self.get_argument('metric_name', kDefaultMetricName)
end_time = int(self.get_argument('end-secs', time.time()))
start_time = int(self.get_argument('start-secs', end_time - constants.SECONDS_PER_WEEK))
# Select an appropriate interval resolution based on the requested time span.
selected_interval = metric.LOGS_INTERVALS[-1]
group_key = metric.Metric.EncodeGroupKey(metric.LOGS_STATS_NAME, selected_interval)
logging.info('Query performance counters %s, range: %s - %s, resolution: %s'
% (group_key, time.ctime(start_time), time.ctime(end_time), selected_interval.name))
metrics = list()
start_key = None
while True:
new_metrics = yield gen.Task(metric.Metric.QueryTimespan, self._client, group_key,
start_time, end_time, excl_start_key=start_key)
if len(new_metrics) > 0:
metrics.extend(new_metrics)
start_key = metrics[-1].GetKey()
else:
break
columns, data = _SerializeMetrics(metrics, metric_name)
t_dict = {}
t_dict.update(self.PermissionsTemplateDict())
t_dict['col_names'] = columns
t_dict['col_data'] = data
t_dict['metric_name'] = metric_name
t_dict['start_secs'] = start_time
t_dict['end_secs'] = end_time
self.render('metrics_table.html', **t_dict)
# This is very hacky: basically, we only care about some part of the metric name.
# eg: in itunes.downloads.1_2.US, we just want the US part. itunes.downloads is already removed, so the index
# we care about is the 1st (zero indexed) in the remainder.
kMetricSignificantLevel = { 'itunes.downloads': 1, 'itunes.inapp_subscriptions_auto_renew': 1, 'itunes.updates': 1 }
# Display and sort properties. Array of (regexp, sort_by_count, show_total_in_column_name).
# If the base metric name matches the regexp, we apply sort_by_cound and show_total_in_column_name.
# Defaults are: sort_by_count = False, show_total_in_column_name = False.
kSortByCount = [ ('itunes.*', True, True) ]
def _SerializeMetrics(metrics, metric_name):
def _DisplayParams():
for regexp, sort, show in kSortByCount:
if re.match(regexp, metric_name):
return (sort, show)
return (False, False)
columns = Counter()
data = []
for m in metrics:
timestamp = m.timestamp
d = defaultdict(int)
d['day'] = util.TimestampUTCToISO8601(timestamp).replace('-', '/')
dd = DotDict(json.loads(m.payload))
if metric_name not in dd:
continue
payload = dd[metric_name].flatten()
for k, v in payload.iteritems():
if metric_name in kMetricSignificantLevel:
k = k.split('.')[kMetricSignificantLevel[metric_name]]
columns[k] += v
d[k] += v
d['Total'] += v
columns['Total'] += v
data.append(d)
# We now have "columns" with totals for each column. We need to sort everything.
sort_by_count, show_total = _DisplayParams()
if sort_by_count:
sorted_cols = columns.most_common()
else:
sorted_cols = sorted([(k, v) for k, v in columns.iteritems()])
cols = ['Day']
cols.append('Total %d' % columns['Total'] if show_total else 'Total')
for k, v in sorted_cols:
if k == 'Total':
continue
cols.append('%s %d' % (k, v) if show_total else k)
sorted_data = []
for d in reversed(data):
s = [d['day'], d['Total']]
for k, _ in sorted_cols:
if k == 'Total':
continue
s.append(d[k] if d[k] > 0 else '')
sorted_data.append(s)
return (cols, sorted_data)
|
JamesLinEngineer/RKMC | addons/plugin.video.salts/scrapers/rlseries_scraper.py | Python | gpl-2.0 | 4,255 | 0.004935 | """
SALTS XBMC Addon
Copyright (C) 2014 tknorris
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
al | ong with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import re
import urlparse
import kodi
import log_utils # @UnusedImport
import dom_parser
from salts_lib import scraper_utils
from salts_lib.constants import FORCE_NO_MATCH
from salts_lib.constants i | mport QUALITIES
from salts_lib.constants import VIDEO_TYPES
import scraper
BASE_URL = 'http://rlseries.com'
class Scraper(scraper.Scraper):
base_url = BASE_URL
def __init__(self, timeout=scraper.DEFAULT_TIMEOUT):
self.timeout = timeout
self.base_url = kodi.get_setting('%s-base_url' % (self.get_name()))
@classmethod
def provides(cls):
return frozenset([VIDEO_TYPES.SEASON, VIDEO_TYPES.EPISODE])
@classmethod
def get_name(cls):
return 'RLSeries'
def get_sources(self, video):
source_url = self.get_url(video)
hosters = []
if source_url and source_url != FORCE_NO_MATCH:
page_url = urlparse.urljoin(self.base_url, source_url)
html = self._http_get(page_url, cache_limit=1)
fragment = dom_parser.parse_dom(html, 'div', {'class': 'v_ifo'})
if fragment:
for stream_url in dom_parser.parse_dom(fragment[0], 'a', ret='href'):
host = urlparse.urlparse(stream_url).hostname
quality = scraper_utils.get_quality(video, host, QUALITIES.HIGH)
hoster = {'multi-part': False, 'host': host, 'class': self, 'quality': quality, 'views': None, 'rating': None, 'url': stream_url, 'direct': False}
hosters.append(hoster)
return hosters
def _get_episode_url(self, season_url, video):
episode_pattern = 'href="([^"]*episode-%s-[^"]*)' % (video.episode)
title_pattern = '<a[^>]*href="(?P<url>[^"]+)[^>]+title="Episode\s+\d+:\s*(?P<title>[^"]+)'
airdate_pattern = 'class="lst"[^>]+href="([^"]+)(?:[^>]+>){6}{p_day}/{p_month}/{year}<'
return self._default_get_episode_url(season_url, video, episode_pattern, title_pattern, airdate_pattern)
def search(self, video_type, title, year, season=''): # @UnusedVariable
results = []
if title and title[0].isalpha():
page_url = ['/list/?char=%s' % (title[0])]
while page_url:
page_url = urlparse.urljoin(self.base_url, page_url[0])
html = self._http_get(page_url, cache_limit=48)
fragment = dom_parser.parse_dom(html, 'ul', {'class': 'list-film-char'})
if fragment:
norm_title = scraper_utils.normalize_title(title)
for match in re.finditer('href="([^"]+)[^>]+>(.*?)</a>', fragment[0]):
match_url, match_title = match.groups()
match_title = re.sub('</?strong>', '', match_title)
match = re.search('Season\s+(\d+)', match_title, re.I)
if match:
if season and int(season) != int(match.group(1)):
continue
if norm_title in scraper_utils.normalize_title(match_title):
result = {'title': scraper_utils.cleanse_title(match_title), 'year': '', 'url': scraper_utils.pathify_url(match_url)}
results.append(result)
if results:
break
page_url = dom_parser.parse_dom(html, 'a', {'class': 'nextpostslink'}, ret='href')
return results
|
spaceone/httoop | tests/uri/test_uri.py | Python | mit | 930 | 0.021505 | from __future__ import unicode_literals
from httoop import URI
def test_simple_uri_comparision(uri):
u1 = URI(b'http://abc.com:80/~smith/home.html')
u2 = URI(b'http://ABC.com/%7Esmith/home.html')
u3 = URI(b'http://ABC.c | om:/%7esmith/home.html')
u4 = URI(b'http://ABC.com:/%7esmith/./home.html')
u5 = URI(b'http://ABC.com:/%7esmith/foo/../home.html')
assert u1 == u2
assert u2 == u3
assert u1 == u3 |
assert u1 == u4
assert u1 == u5
def test_request_uri_maxlength():
pass
def test_request_uri_is_star():
pass
def test_request_uri_containig_fragment():
pass
def test_invalid_uri_scheme():
pass
def test_invalid_port():
pass
def test_normalized_uri_redirects():
pass
def test_uri_composing_username_and_password():
assert bytes(URI(b'http://username@example.com')) == b'http://username@example.com'
assert bytes(URI(b'http://username:password@example.com')) == b'http://username:password@example.com'
|
matematik7/CSSQC | tests/test_groupProperties.py | Python | mit | 1,126 | 0.006217 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------
# test_groupProperties.py
#
# test for groupProperties rule
# ----------------------------------------------------------------
# copyright (c) 2014 - Domen Ipavec
# Distri | buted under The MIT License, see LICENSE
# ----------------------------------------------------------------
import unittest
from cssqc.parser import CSSQC
from cssqc.qualityWarning import QualityWarning
class Test_groupProperties(unittest.TestCase):
def parse(self, data):
c = CSSQC({"groupProperties": "galjot"})
c.parse(data)
return c
def test_group_pr(self):
| sample = '''div {
position: relative;
z-index: 6;
margin: 0;
padding: 0;
width: 100px;
height: 60px;
border: 0;
/* background & color */
background: #fff;
color: #333;
text-align: center
}
'''
c = self.parse(sample)
self.assertEqual(c.warnings, [
QualityWarning('groupProperties', 4),
QualityWarning('groupProperties', 14)
])
|
google/jax | tests/nn_test.py | Python | apache-2.0 | 10,349 | 0.006184 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn module."""
import collections
from functools import partial
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import scipy.stats
from jax import core
from jax._src import test_util as jtu
from jax.test_util import check_grads
from jax import nn
from jax import random
import jax
import jax.numpy as jnp
from jax.config import config
config.parse_flags_with_absl()
class NNFunctionsTest(jtu.JaxTestCase):
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testSoftplusGrad(self):
check_grads(nn.softplus, (1e-8,), order=4,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradZero(self):
check_grads(nn.softplus, (0.,), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradInf(self):
self.assertAllClose(
1., jax.grad(nn.softplus)(float('inf')))
def testSoftplusGradNegInf(self):
check_grads(nn.softplus, (-float('inf'),), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusGradNan(self):
check_grads(nn.softplus, (float('nan'),), order=1,
rtol=1e-2 if jtu.device_under_test() == "tpu" | else None)
@parameterized.parameters([int, float] + jtu.dtype | s.floating + jtu.dtypes.integer)
def testSoftplusZero(self, dtype):
self.assertEqual(jnp.log(dtype(2)), nn.softplus(dtype(0)))
def testReluGrad(self):
rtol = 1e-2 if jtu.device_under_test() == "tpu" else None
check_grads(nn.relu, (1.,), order=3, rtol=rtol)
check_grads(nn.relu, (-1.,), order=3, rtol=rtol)
jaxpr = jax.make_jaxpr(jax.grad(nn.relu))(0.)
self.assertGreaterEqual(len(jaxpr.jaxpr.eqns), 2)
def testSoftplusValue(self):
val = nn.softplus(89.)
self.assertAllClose(val, 89., check_dtypes=False)
@jtu.skip_on_flag("jax_skip_slow_tests", True)
def testEluGrad(self):
check_grads(nn.elu, (1e4,), order=4, eps=1.)
def testEluValue(self):
val = nn.elu(1e4)
self.assertAllClose(val, 1e4, check_dtypes=False)
def testGluValue(self):
val = nn.glu(jnp.array([1.0, 0.0]))
self.assertAllClose(val, jnp.array([0.5]))
@parameterized.parameters(False, True)
def testGelu(self, approximate):
def gelu_reference(x):
return x * scipy.stats.norm.cdf(x)
rng = jtu.rand_default(self.rng())
args_maker = lambda: [rng((4, 5, 6), jnp.float32)]
self._CheckAgainstNumpy(
gelu_reference, partial(nn.gelu, approximate=approximate), args_maker,
check_dtypes=False, tol=1e-3 if approximate else None)
@parameterized.parameters(*itertools.product(
(jnp.float32, jnp.bfloat16, jnp.float16),
(partial(nn.gelu, approximate=False),
partial(nn.gelu, approximate=True),
nn.relu, nn.softplus, nn.sigmoid)))
def testDtypeMatchesInput(self, dtype, fn):
x = jnp.zeros((), dtype=dtype)
out = fn(x)
self.assertEqual(out.dtype, dtype)
def testEluMemory(self):
# see https://github.com/google/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.elu(jnp.ones((10 ** 12,)))) # don't oom
def testHardTanhMemory(self):
# see https://github.com/google/jax/pull/1640
with jax.enable_checks(False): # With checks we materialize the array
jax.make_jaxpr(lambda: nn.hard_tanh(jnp.ones((10 ** 12,)))) # don't oom
@parameterized.parameters([nn.softmax, nn.log_softmax])
def testSoftmaxWhereMask(self, fn):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
out_masked = jnp.take(
fn(x, where=m, initial=-jnp.inf), jnp.array([0, 2, 3]))
out_filtered = fn(x_filtered)
self.assertAllClose(out_masked, out_filtered)
def testNormalizeWhereMask(self):
x = jnp.array([5.5, 1.3, -4.2, 0.9])
m = jnp.array([True, False, True, True])
x_filtered = jnp.take(x, jnp.array([0, 2, 3]))
out_masked = jnp.take(nn.normalize(x, where=m), jnp.array([0, 2, 3]))
out_filtered = nn.normalize(x_filtered)
self.assertAllClose(out_masked, out_filtered)
def testOneHot(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3)
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]])
self.assertAllClose(actual, expected)
def testOneHotOutOfBound(self):
actual = nn.one_hot(jnp.array([-1, 3]), 3)
expected = jnp.array([[0., 0., 0.],
[0., 0., 0.]])
self.assertAllClose(actual, expected)
def testOneHotNonArrayInput(self):
actual = nn.one_hot([0, 1, 2], 3)
expected = jnp.array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.]])
self.assertAllClose(actual, expected)
def testOneHotCustomDtype(self):
actual = nn.one_hot(jnp.array([0, 1, 2]), 3, dtype=jnp.bool_)
expected = jnp.array([[True, False, False],
[False, True, False],
[False, False, True]])
self.assertAllClose(actual, expected)
def testOneHotConcretizationError(self):
# https://github.com/google/jax/issues/3654
msg = r"in jax.nn.one_hot argument `num_classes`"
with self.assertRaisesRegex(core.ConcretizationTypeError, msg):
jax.jit(nn.one_hot)(3, 5)
def testOneHotAxis(self):
expected = jnp.array([[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.]]).T
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=0)
self.assertAllClose(actual, expected)
actual = nn.one_hot(jnp.array([1, 2, 0]), 3, axis=-2)
self.assertAllClose(actual, expected)
def testTanhExists(self):
nn.tanh # doesn't crash
def testCustomJVPLeak(self):
# https://github.com/google/jax/issues/8171
@jax.jit
def fwd():
a = jnp.array(1.)
def f(hx, _):
hx = jax.nn.sigmoid(hx + a)
return hx, None
hx = jnp.array(0.)
jax.lax.scan(f, hx, None, length=2)
with jax.checking_leaks():
fwd() # doesn't crash
InitializerRecord = collections.namedtuple(
"InitializerRecord",
["name", "initializer", "shapes", "dtypes"])
ALL_SHAPES = [(2,), (2, 2), (2, 3), (3, 2), (2, 3, 4), (4, 3, 2), (2, 3, 4, 5)]
def initializer_record(name, initializer, dtypes, min_dims=2, max_dims=4):
shapes = [shape for shape in ALL_SHAPES
if min_dims <= len(shape) <= max_dims]
return InitializerRecord(name, initializer, shapes, dtypes)
INITIALIZER_RECS = [
initializer_record("uniform", nn.initializers.uniform, jtu.dtypes.floating, 1),
initializer_record("normal", nn.initializers.normal, jtu.dtypes.inexact, 1),
initializer_record("he_normal", nn.initializers.he_normal, jtu.dtypes.inexact),
initializer_record("he_uniform", nn.initializers.he_uniform, jtu.dtypes.inexact),
initializer_record("glorot_normal", nn.initializers.glorot_normal, jtu.dtypes.inexact),
initializer_record("glorot_uniform", nn.initializers.glorot_uniform, jtu.dtypes.inexact),
initializer_record("lecun_normal", nn.initializers.lecun_normal, jtu.dtypes.inexact),
initializer_record("lecun_uniform", nn.initializers.lecun_uniform, jtu.dtypes.inexact),
initializer_record("orthogonal", nn.initializers.orthogonal, jtu.dtypes.floating, 2, 2),
init |
adebali/mistipy | tests/test_mistipy.py | Python | mit | 1,075 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_mistipy
----------------------------------
Tests for `mistipy` module.
"""
import pytest
# from contextlib import contextmanager
from click.testing import CliRunner
# from mistipy import mistipy
from mistipy import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument.
"""
| # from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main)
assert result.exit_code == 0
assert 'mistipy.cli.main' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.ou | tput
|
siosio/intellij-community | python/testData/formatter/alightDictLiteralOnValueSubscriptionsAndSlices_after.py | Python | apache-2.0 | 326 | 0.003067 | my_dict = {
"one": | example_list[0],
"two": example_list[1],
"three": example_list[2:3],
"some really long element name that takes a lot of spa | ce": "four"
}
|
britcey/ansible | lib/ansible/modules/storage/netapp/netapp_e_lun_mapping.py | Python | gpl-3.0 | 12,380 | 0.003069 | #!/usr/bin/python
# (c) 2016, NetApp, Inc
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, | or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOU | T ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: netapp_e_lun_mapping
author: Kevin Hulquest (@hulquest)
short_description: Create or Remove LUN Mappings
description:
- Allows for the creation and removal of volume to host mappings for NetApp E-series storage arrays.
version_added: "2.2"
options:
validate_certs:
required: false
default: true
description:
- Should https certificates be validated?
ssid:
description:
- "The storage system array identifier."
required: False
lun:
description:
- The LUN number you wish to give the mapping
- If the supplied I(volume_name) is associated with a different LUN, it will be updated to what is supplied here.
required: False
default: 0
target:
description:
- The name of host or hostgroup you wish to assign to the mapping
- If omitted, the default hostgroup is used.
- If the supplied I(volume_name) is associated with a different target, it will be updated to what is supplied here.
required: False
volume_name:
description:
- The name of the volume you wish to include in the mapping.
required: True
target_type:
description:
- Whether the target is a host or group.
- Required if supplying an explicit target.
required: False
choices: ["host", "group"]
state:
description:
- Present will ensure the mapping exists, absent will remove the mapping.
- All parameters I(lun), I(target), I(target_type) and I(volume_name) must still be supplied.
required: True
choices: ["present", "absent"]
api_url:
description:
- "The full API url. Example: http://ENDPOINT:8080/devmgr/v2"
- This can optionally be set via an environment variable, API_URL
required: False
api_username:
description:
- The username used to authenticate against the API. This can optionally be set via an environment variable, API_USERNAME
required: False
api_password:
description:
- The password used to authenticate against the API. This can optionally be set via an environment variable, API_PASSWORD
required: False
'''
EXAMPLES = '''
---
- name: Lun Mapping Example
netapp_e_lun_mapping:
state: present
ssid: 1
lun: 12
target: Wilson
volume_name: Colby1
target_type: group
api_url: "{{ netapp_api_url }}"
api_username: "{{ netapp_api_username }}"
api_password: "{{ netapp_api_password }}"
'''
RETURN = '''
msg:
description: Status of mapping
returned: always
type: string
sample: 'Mapping existing'
'''
import json
from ansible.module_utils.api import basic_auth_argument_spec
from ansible.module_utils.pycompat24 import get_exception
from ansible.module_utils.urls import open_url
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.six.moves.urllib.error import HTTPError
HEADERS = {
"Content-Type": "application/json",
"Accept": "application/json"
}
def request(url, data=None, headers=None, method='GET', use_proxy=True,
force=False, last_mod_time=None, timeout=10, validate_certs=True,
url_username=None, url_password=None, http_agent=None, force_basic_auth=True, ignore_errors=False):
try:
r = open_url(url=url, data=data, headers=headers, method=method, use_proxy=use_proxy,
force=force, last_mod_time=last_mod_time, timeout=timeout, validate_certs=validate_certs,
url_username=url_username, url_password=url_password, http_agent=http_agent,
force_basic_auth=force_basic_auth)
except HTTPError:
err = get_exception()
r = err.fp
try:
raw_data = r.read()
if raw_data:
data = json.loads(raw_data)
else:
raw_data = None
except:
if ignore_errors:
pass
else:
raise Exception(raw_data)
resp_code = r.getcode()
if resp_code >= 400 and not ignore_errors:
raise Exception(resp_code, data)
else:
return resp_code, data
def get_host_and_group_map(module, ssid, api_url, user, pwd):
mapping = dict(host=dict(), group=dict())
hostgroups = 'storage-systems/%s/host-groups' % ssid
groups_url = api_url + hostgroups
try:
hg_rc, hg_data = request(groups_url, headers=HEADERS, url_username=user, url_password=pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to get host groups. Id [%s]. Error [%s]" % (ssid, str(err)))
for group in hg_data:
mapping['group'][group['name']] = group['id']
hosts = 'storage-systems/%s/hosts' % ssid
hosts_url = api_url + hosts
try:
h_rc, h_data = request(hosts_url, headers=HEADERS, url_username=user, url_password=pwd)
except:
err = get_exception()
module.fail_json(msg="Failed to get hosts. Id [%s]. Error [%s]" % (ssid, str(err)))
for host in h_data:
mapping['host'][host['name']] = host['id']
return mapping
def get_volume_id(module, data, ssid, name, api_url, user, pwd):
qty = 0
for volume in data:
if volume['name'] == name:
qty += 1
if qty > 1:
module.fail_json(msg="More than one volume with the name: %s was found, "
"please use the volume WWN instead" % name)
else:
wwn = volume['wwn']
try:
return wwn
except NameError:
module.fail_json(msg="No volume with the name: %s, was found" % (name))
def get_hostgroups(module, ssid, api_url, user, pwd):
groups = "storage-systems/%s/host-groups" % ssid
url = api_url + groups
try:
rc, data = request(url, headers=HEADERS, url_username=user, url_password=pwd)
return data
except Exception:
module.fail_json(msg="There was an issue with connecting, please check that your"
"endpoint is properly defined and your credentials are correct")
def get_volumes(module, ssid, api_url, user, pwd, mappable):
volumes = 'storage-systems/%s/%s' % (ssid, mappable)
url = api_url + volumes
try:
rc, data = request(url, url_username=user, url_password=pwd)
except Exception:
err = get_exception()
module.fail_json(
msg="Failed to mappable objects. Type[%s. Id [%s]. Error [%s]." % (mappable, ssid, str(err)))
return data
def get_lun_mappings(ssid, api_url, user, pwd, get_all=None):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
rc, data = request(url, url_username=user, url_password=pwd)
if not get_all:
remove_keys = ('ssid', 'perms', 'lunMappingRef', 'type', 'id')
for key in remove_keys:
for mapping in data:
del mapping[key]
return data
def create_mapping(module, ssid, lun_map, vol_name, api_url, user, pwd):
mappings = 'storage-systems/%s/volume-mappings' % ssid
url = api_url + mappings
post_body = json.dumps(dict(
mappableObjectId=lun_map['volumeRef'],
targetId=lun_map['mapRef'],
lun=lun_map['lun']
))
rc, data = request(url, data=post_body, method='POST', url_username=user, url_password=pwd, headers=HEA |
karajrish/ChatterBot | chatterbot/adapters/logic/closest_match.py | Python | bsd-3-clause | 666 | 0 | from .logic import LogicAdapter
class ClosestMatchAdapter(LogicAdapter):
def get(self, text, list_of_statements):
"""
Takes a statement string and a list of statement strings.
Returns the closest matching statement from the list.
"""
from fuzzywuzzy import process
# If the list is empty, return the statement
if not list_of_statements:
return t | ext
# Check if an exact match exists
if text in list_of_statements:
return text
# Get t | he closest matching statement from the database
return process.extract(text, list_of_statements, limit=1)[0][0]
|
stencila/hub | manager/accounts/migrations/0004_auto_20200721_0252.py | Python | apache-2.0 | 663 | 0.003017 | # Generated by Django 3.0.8 on 2020-07-21 02:52
from django.db import migrations, models
class Migrati | on(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20200720_2208'),
]
operations = [
migrations.AddField(
model_name='accounttier',
name='active',
field=models.BooleanField(default=True, help_text='Is the tier active i.e. should be displayed to users.'),
),
migrations.AlterField(
model_name='accounttier',
name='name',
field=m | odels.CharField(help_text='The name of the tier.', max_length=64, unique=True),
),
]
|
kawamon/hue | apps/pig/src/pig/migrations/0001_initial.py | Python | apache-2.0 | 1,645 | 0.00304 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-06-06 18:55
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_design', models.BooleanField(db_index=True, default=True, help_text='If the document is not a submitted job but a real query, script, workflow.', verbos | e_name='Is a user document, not a document submission.')),
],
),
migrations.CreateModel(
name='PigScript',
fields=[
| ('document_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='pig.Document')),
('data', models.TextField(default='{"name": "", "parameters": [], "script": "", "hadoopProperties": [], "properties": [], "resources": [], "job_id": null}')),
],
bases=('pig.document',),
),
migrations.AddField(
model_name='document',
name='owner',
field=models.ForeignKey(help_text='User who can modify the job.', on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Owner'),
),
]
|
ideanotion/infoneigeapi | infoneige/settings.py | Python | gpl-2.0 | 2,053 | 0.00341 | """
Django settings for infoneige project.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.6/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.6/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4dhoa-&xuxj594s#ooy=z@gquzl199=-t8k-i2@348qjs-87_t'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'api',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware', |
'django.middleware.common.Comm | onMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'infoneige.urls'
WSGI_APPLICATION = 'infoneige.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.6/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.dummy',
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.6/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.6/howto/static-files/
STATIC_URL = '/static/'
REST_FRAMEWORK = {
'DEFAULT_FILTER_BACKENDS': ('rest_framework.filters.DjangoFilterBackend',)
} |
Clinical-Genomics/scout | tests/load/test_load_case.py | Python | bsd-3-clause | 1,272 | 0.007862 | def test_load_case(case_obj, adapter):
## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with correct info
assert adapter.case_collection.find_one()
def test_load_case_rank_model_version(case_obj, adapter):
## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with rank_model
loaded_case = adapter.case_collection.find_one({"_id": case_obj["_id"]})
assert loaded_case["rank_model_version"] == case_obj["rank_model_version"]
assert loaded_case["sv_rank_model_version"] == case_obj["sv_rank_model_version"]
def test_load_case_l | imsid(case_obj, adapter):
"""Test loading a case with lims_id"""
| ## GIVEN a database with no cases
assert adapter.case_collection.find_one() is None
## WHEN loading a case
adapter._add_case(case_obj)
## THEN assert that the case have been loaded with lims id
loaded_case = adapter.case_collection.find_one({"_id": case_obj["_id"]})
assert loaded_case["lims_id"] == case_obj["lims_id"]
|
EliotBryant/ShadDetector | shadDetector_testing/Colour Based Methods/ColorHistogram-master/color_histogram/core/color_pixels.py | Python | gpl-3.0 | 1,910 | 0.003665 | # -*- coding: utf-8 -*-
## @package color_histogram.core.color_pixels
#
# Simple color pixel class.
#
# @author tody
# @date 2015/08/28
import numpy as np
from color_histogram.cv.image import to32F, rgb2Lab, rgb2hsv, gray2rgb
## Implementation of color pixels.
#
# input image is automatically converted into np.float32 format.
class ColorPixels:
## Constructor
# @param image input | image.
# @param num_pixels target number of pixels from the image.
def __init__(self, image, num_pixels=1000):
self._image | = to32F(image)
self._num_pixels = num_pixels
self._rgb_pixels = None
self._Lab = None
self._hsv = None
## RGB pixels.
def rgb(self):
if self._rgb_pixels is None:
self._rgb_pixels = self.pixels("rgb")
return self._rgb_pixels
## Lab pixels.
def Lab(self):
if self._Lab is None:
self._Lab = self.pixels("Lab")
return self._Lab
## HSV pixels.
def hsv(self):
if self._hsv is None:
self._hsv = self.pixels("hsv")
return self._hsv
## Pixels of the given color space.
def pixels(self, color_space="rgb"):
image = np.array(self._image)
if color_space == "rgb":
if _isGray(image):
image = gray2rgb(image)
if color_space == "Lab":
image = rgb2Lab(self._image)
if color_space == "hsv":
image = rgb2hsv(self._image)
return self._image2pixels(image)
def _image2pixels(self, image):
if _isGray(image):
h, w = image.shape
step = h * w / self._num_pixels
return image.reshape((h * w))[::step]
h, w, cs = image.shape
step = h * w / self._num_pixels
return image.reshape((-1, cs))[::step]
def _isGray(image):
return len(image.shape) == 2
|
GeoMop/GeoMop | testing/JobPanel/services/job_1.py | Python | gpl-3.0 | 103 | 0 | i | mport time
for i in range(30):
time.sleep(1)
print("running {} s".format(i+1))
pr | int("end")
|
Apreche/Project-DORF | game.py | Python | mit | 6,031 | 0.00315 | import sys
import pygame
import time
import cPickle
from view_port import ViewPort
from grid import Grid
from mover import RandomMover
from terrain import TerrainData
from terrain.generators import MeteorTerrainGenerator, Smoother
from terrain.generators import PlasmaFractalGenerator
class Game:
def __init__(self):
#Our main variables used within the game
self.resolution = self.width, self.height = 800, 600
self.gridSize = self.xGrid, self.yGrid = 320, 240
self.movers = []
self._fontFile = pygame.font.match_font('freemono')
self._fontSize = 14
#Build our main grid
self.gameGrid = Grid()
self.make_grid(self.gridSize)
#Build the terrain as a single surface
self.terrainSurf = pygame.Surface(self.gridSize)
#Our main view port/camera
self.view = ViewPort((0, 0, 0), self.resolution, self.gridSize,
self.terrainSurf)
#initialize and blank the screen
pygame.init()
self.screen = pygame.display.set_mode(self.resolution)
pygame.display.set_caption('Project D.O.R.F.')
pygame.key.set_repeat(500, 33) # Key repeating
self.font = pygame.font.Font(self._fontFile, self._fontSize)
self.font.set_bold(True)
self.generate_terrain()
self.update_terrain_surf()
def make_grid(self, gridSize):
for x in range(0, gridSize[0]):
for y in range(0 ,gridSize[1]):
terrain = TerrainData()
self.gameGrid.add_node((x, y, 0), terrain)
def generate_terrain(self):
generator = PlasmaFractalGenerator(200)
generator.apply(self.gameGrid)
self.gameGrid.connect_grid()
generator = MeteorTerrainGenerator()
smoother = Smoother(0.5)
generator.apply(self.gameGrid)
smoother.apply(self.gameGrid)
# Updates the main game surface (SLOW!)
def update_terrain_surf(self):
for x in xrange(0, self.xGrid):
for y in xrange(0, self.yGrid):
loc = (x, y, self.view.z)
terrainNode = self.gameGrid.get_node_at(loc)
if terrainNode is not None:
rect = pygame.Rect(x, y, 1, 1)
terrainNode.contents.render(rect, self.terrainSurf)
# updates the screen to show the appropriate visible nodes
def update_display(self):
self.view.render_terrain(self.screen)
self.frame += 1
if time.time() - self.time > 1:
self.time = time.time()
self.fps = self.frame
self.frame = 0
text = self.font.render(str(self.view) +
" FPS:{0}".format(self.fps), 1, (0, 255, 0))
rect = text.get_rect()
rect.x, rect.y = (0,0)
self.screen.blit(text, rect)
self.display_movers()
pygame.display.update()
def move_movers(self):
for mover in self.movers:
mover.move()
def display_movers(self):
for mover in self.movers:
loc = mover.get_location()
if self.view.contains(loc):
screenX, screenY = self.view.grid2screen(loc)
rect = pygame.Rect(screenX, screenY,
self.view.blockSize, self.view.blockSize)
mover.render(rect, self.screen)
def save_grid(self):
print "saving"
save = file('world.pkl', 'wb')
cPickle.dump(self.gameGrid, save)
save.close()
print "done saving"
def load_grid(self):
print "loading"
load = file('world.pkl', 'rb')
self.gameGrid = cPickle.load(load)
load.close()
self.update_terrain_surf()
print "done loading"
def execute(self):
self.time = time.time()
self.frame = 0
self.fps = 0
self.update_display()
self.autoMovers = False
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
loc = self.view.screen2grid(event.pos)
if event.button == 1: # Add mover
rm = RandomMover(self.gameGrid, loc)
self.movers.append(rm)
if event.button == 3: # Remove mover
for mover in self.movers:
if mover.get_location() == loc:
self.movers.remove(mover)
break
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_DOWN:
self.view.scroll((0, 1))
if event.key == pygame.K_UP:
self.view.scroll((0, -1))
if event.key == pygame.K_LEFT:
self.view.scroll((-1, 0))
if event.key == pygame.K_RIGHT:
self.view.scroll((1, 0))
if event.key == pygame.K_PAGEUP:
self.view.z += 1
if event.key == pygame.K_PAGEDOWN:
self.view.z -= 1
if event.key == pygame.K_z:
self.view.zoom_in()
if event.key == pygame. | K_x:
self.view.zoom_out()
if event.key == pygame.K_SPACE:
if not self.autoMovers:
self.mo | ve_movers()
if event.key == pygame.K_t:
self.autoMovers = not self.autoMovers
if event.key == pygame.K_s:
self.save_grid()
if event.key == pygame.K_l:
self.load_grid()
if self.autoMovers: self.move_movers()
self.update_display()
if __name__ == "__main__":
dorf = Game()
dorf.execute()
|
dimagi/rapidsms | lib/rapidsms/contrib/stringcleaning/inputcleaner.py | Python | bsd-3-clause | 9,705 | 0.019784 | """
Provides utilities to cleanup text
"""
NUMBER_DICTIONARY = {
0:'Zero'
, 1: "One"
, 2: "Two"
, 3: "Three"
, 4: "Four"
, 5: "Five"
, 6: "Six"
, 7: "Seven"
, 8: "Eight"
, 9: "Nine"
, 10: "Ten"
, 11: "Eleven"
, 12: "Twelve"
, 13: "Thirteen"
, 14: "Fourteen"
, 15: "Fifteen"
, 16: "Sixteen"
, 17: "Seventeen"
, 18: "Eighteen"
, 19: "Nineteen"
, 20: "Twenty"
, 30: "Thirty"
, 40: "Forty"
, 50: "Fifty"
, 60: "Sixty"
, 70: "Seventy"
, 80: "Eighty"
, 90: "Ninety"
, 'Zero': 0
, 'One': 1
, 'Two': 2
, 'Three': 3
, 'Four': 4
, 'Five': 5
, 'Six': 6
, 'Seven': 7
, 'Eight': 8
, 'Nine': 9
, 'Ten': 10
, 'Eleven': 11
, 'Leven': 11
, 'Twelve': 12
, 'Thirteen': 13
, 'Fourteen': 14
, 'Forteen': 14
, 'Foteen': 14
, 'Fifteen': 15
, 'Sixteen': 16
, 'Seventeen': 17
, 'Eighteen': 18
, 'Nineteen': 19
, 'Twenty': 20
, 'Thirty': 30
, 'Forty': 40
, 'Fourty': 40
, 'Foty': 40
, 'Fifty': 50
, 'Sixty': 60
, 'Seventy': 70
, 'Eighty': 80
, 'Ninety': 90
, 'Ninty': 90
}
PLACE_VALUE = {
'Hundred':100
, 'Thousand':1000
, 'Million':1000000
, 'Billion':1000000000
, 'trillion':1000000000000
}
DIGIT_FOR_LETTER = {'i': '1', 'l': '1', 'o': '0', 'I':'1', 'O':'0',
'0':'0',
'1':'1',
'2':'2',
'3':'3',
'4':'4',
'5':'5',
'6':'6',
'7':'7',
'8':'8',
'9':'9'
}
class InputCleaner:
def soundex(self, name, len=4):
name = str(name)#make sure its a string--Trevor
""" soundex module conforming to Knuth's algorithm
implementation 2000-12-24 by Gregory Jorgensen
public domain
"""
# digits holds the soundex values for the alphabet
digits = '01230120022455012623010202'
sndx = ''
fc = ''
# translate alpha chars in name to soundex digits
for c in name.upper():
if c.isalpha():
if not fc: fc = c # remember first letter
d = digits[ord(c)-ord('A')]
# duplicate consecutive soundex digits are skipped
if not sndx or (d != sndx[-1]):
sndx += d
# replace first digit with first alpha character
sndx = fc + sndx[1:]
# remove all 0s from the soundex code
sndx = sndx.replace('0', '')
# return soundex code padded to len characters
return (sndx + (len * '0'))[:len]
def remove_double_spaces(self, text):
"""
return text with double spaces removed from input text
"""
while " " in text:
text = text.replace(" ", " ")
return text
def digit_to_word(self, digit):
"""
Returns a word representation of the number for a limited set of digits
"""
try:
return NUMBER_DICTIONARY[digit]
except KeyError:
return None
def words_to_digits_old_impl(self, text):
"""
Returns integer representation of numbers entered as words. None on failure.
Limited to 4 words. words_to_digits() allows more words and ignores gabbage
"""
if not text.strip():
return None
text = text.title().replace(" And", "")
text = self.remove_double_spaces(text)
tokens = text.strip().title().split(" ")
#correct tokens
for i in range(len(tokens)):
found = False
for key in NUMBER_DICTIONARY.keys():
if tokens[i] not in NUMBER_DICTIONARY.keys():
if self.soundex(tokens[i]) == self.soundex(key):
tokens[i] = key
found = True
break
if not found:
for key in PLACE_VALUE.keys():
if tokens[i] not in PLACE_VALUE.keys():
if self.soundex(tokens[i]) == self.soundex(key):
tokens[i] = key
break
if not tokens:
return None
try:
if len(tokens) == 2:
if tokens[0].title() == 'Hundred' or self.soundex(tokens[0]) == self.soundex('Hundred'):
return 100 + int(NUMBER_DICTIONARY[tokens[1]])
elif tokens[1].title() in PLACE_VALUE.keys():
return int(NUMBER_DICTIONARY[tokens[0]]) * int(PLACE_VALUE[tokens[1]])
else:
return int(NUMBER_DICTIONARY[tokens[0]]) + int(NUMBER_DICTIONARY[tokens[1]])
elif len(tokens) == 1:
return int(NUMBER_DICTIONARY[tokens[0]])
elif len(tokens) == 3:
if tokens[0].title() == 'Hundred':
return 100 + int(NUMBER_DICTIONARY[tokens[1]]) + int(NUMBER_DICTIONARY[tokens[2]])
elif tokens[1].title() == 'Hundred':
return int(NUMBER_DICTIONARY[tokens[0]]) * 100 + int(NUMBER_DICTIONARY[tokens[2]])
elif tokens[1].title() == 'Thousand':
return int(NUMBER_DICTIONARY[tokens[0]]) * 1000 + int(NUMBER_DICTIONARY[tokens[2]])
elif len(tokens) == 4:
if tokens[2].title() == 'Hundred':
return int(NUMBER_DICTIONARY[tokens[0]]) * 100 + int(NUMBER_DICTIONARY[tokens[2]]) + int(NUMBER_DICTIONARY[tokens[3]])
elif tokens[3].title() == 'Hundred':
return int(NUMBER_DICTIONARY[tokens[0]]) * 1000 + int(NUMBER_DICTIONARY[tokens[2]]) * 100
elif tokens[1].title() == 'Thousand':
return int(NUMBER_DICTIONARY[tokens[0]]) * 1000 + int(NUMBER_DICTIONARY[tokens[2]]) + int(NUMBER_DICTIONARY[tokens[3]])
elif tokens[2].title() == 'Thousand':
return (int(NUMBER_DICTIONARY[tokens[0]]) + int(NUMBER_DICTIONARY[tokens[1]])) * 1000 + int(NUMBER_DICTIONARY[tokens[3]])
elif len(tokens) > 4:
#I don't want deal with large numbers. db samples are in ones and tens
return None
except:
return None
def try_replace_oil_with_011(self, str):
original = str
result = ''
"""returns string with every occurence of i,I,l,o, and O replaced with 1 or 0 as appropriate"""
try:
for char in original:
result = result + DIGIT_FOR_LETTER[char]
except KeyError:
return original
return result
def words_to_digits(self, text):
"""
Returns Integer from numbers entered as words. None on failure.
Corrects spelling based on soundeex and ignores invalid words for numbers.
This method suffices for this project but for others it has following issue:
phrases like "ninety nine thousand" are understood as "ninety plus nine thousand"
(90 + 9 * 1000)
"""
if not text.strip():
return None
text = text.replace('-', '')
text = text.replace('+', '')
text = text.title().replace(" And", "")
text = self.remove_double_spaces(text)
tokens = text.strip().title().split(" ")
#correct tokens
for i in range(len(tokens)):
found = False
if str(tokens[i]).isdigit():
continue
for key in NUMBER_DICTIONARY.keys():
if tokens[i] not in NUMBER_DICTIONARY.keys():
if self.soundex(tokens[i]) == self.soundex(key):
tokens[i] = key
| found = True
break
if not found:
for key in PLACE_VALUE.keys():
if tokens[i] not in PLACE_VALUE.keys():
if self.soundex(tokens[i]) == self.soundex(key):
tokens[i] = key
break
if no | t tokens:
return None
result = 0
expr = "result = "
for i, val in enumerate(tokens):
if str(val).isdigit():
expr = expr + "+" + str(val)
elif val in NUMBER_DICTIONARY.keys():
expr = expr + "+" + str(NUMBER_DICTIONARY[val])
elif val in PLACE_VALUE.keys():
if i == 0:
expr = expr + str(PLACE_VALUE[val])
|
JioCloud/nova | nova/virt/libvirt/guest.py | Python | apache-2.0 | 6,242 | 0 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2012 University Of Minho
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Manages information about the guest.
This class encapsulates libvirt domain provides certain
higher level APIs around the raw libvirt API. These APIs are
then used by all the other libvirt related classes
"""
from lxml import etree
from oslo_log import log as logging
from oslo_utils import encodeutils
from oslo_utils import excutils
from oslo_utils import importutils
from nova.i18n import _LE
from nova import utils
libvirt = None
LOG = logging.getLogger(__name__)
class Guest(object):
def __init__(self, domain):
global libvirt
if libvirt is None:
libvirt = importutils.import_module('libvirt')
self._domain = domain
def __repr__(self):
return "<Guest %(id)d %(name)s %(uuid)s>" % {
'id': self.id,
'name': self.name,
'uuid': self.uuid
}
@property
def id(self):
return self._domain.ID()
@property
def uuid(self):
return self._domain.UUIDString()
@property
def name(self):
return self._domain.name()
@property
def _encoded_xml(self):
return encodeutils.safe_decode(self._domain.XMLDesc(0))
@classmethod
def create(cls, xml, host):
"""Create a new Guest
:param xml: XML definition of the domain to create
:param host: host.Host connection to define the guest on
:returns guest.Guest: Guest re | ady to be launched
| """
try:
# TODO(sahid): Host.write_instance_config should return
# an instance of Guest
domain = host.write_instance_config(xml)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error defining a domain with XML: %s') %
encodeutils.safe_decode(xml))
return cls(domain)
def launch(self, pause=False):
"""Starts a created guest.
:param pause: Indicates whether to start and pause the guest
"""
flags = pause and libvirt.VIR_DOMAIN_START_PAUSED or 0
try:
return self._domain.createWithFlags(flags)
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error launching a defined domain '
'with XML: %s') %
self._encoded_xml, errors='ignore')
def poweroff(self):
"""Stops a running guest."""
self._domain.destroy()
def resume(self):
"""Resumes a suspended guest."""
self._domain.resume()
def enable_hairpin(self):
"""Enables hairpin mode for this guest."""
interfaces = self.get_interfaces()
try:
for interface in interfaces:
utils.execute(
'tee',
'/sys/class/net/%s/brport/hairpin_mode' % interface,
process_input='1',
run_as_root=True,
check_exit_code=[0, 1])
except Exception:
with excutils.save_and_reraise_exception():
LOG.error(_LE('Error enabling hairpin mode with XML: %s') %
self._encoded_xml, errors='ignore')
def get_interfaces(self):
"""Returns a list of all network interfaces for this domain."""
doc = None
try:
doc = etree.fromstring(self._encoded_xml)
except Exception:
return []
interfaces = []
nodes = doc.findall('./devices/interface/target')
for target in nodes:
interfaces.append(target.get('dev'))
return interfaces
def get_vcpus_info(self):
"""Returns virtual cpus information of guest.
:returns: objects.VirtVCPUInfo
"""
vcpus = self._domain.vcpus()
if vcpus is not None:
for vcpu in vcpus[0]:
yield GuestVCPUInfo(
id=vcpu[0], cpu=vcpu[3], state=vcpu[1], time=vcpu[2])
def delete_configuration(self):
"""Undefines a domain from hypervisor."""
try:
self._domain.undefineFlags(
libvirt.VIR_DOMAIN_UNDEFINE_MANAGED_SAVE)
except libvirt.libvirtError:
LOG.debug("Error from libvirt during undefineFlags. %d"
"Retrying with undefine", self.id)
self._domain.undefine()
except AttributeError:
# Older versions of libvirt don't support undefine flags,
# trying to remove managed image
try:
if self._domain.hasManagedSaveImage(0):
self._domain.managedSaveRemove(0)
except AttributeError:
pass
self._domain.undefine()
class GuestVCPUInfo(object):
def __init__(self, id, cpu, state, time):
"""Structure for information about guest vcpus.
:param id: The virtual cpu number
:param cpu: The host cpu currently associated
:param state: The running state of the vcpu (0 offline, 1 running, 2
blocked on resource)
:param time: The cpu time used in nanoseconds
"""
self.id = id
self.cpu = cpu
self.state = state
self.time = time
|
soravux/scoop | scoop/broker/brokerzmq.py | Python | lgpl-3.0 | 13,389 | 0.000598 | #!/usr/bin/env python
#
# This file is part of Scalable COncurrent Operations in Python (SCOOP).
#
# SCOOP is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as
# published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# SCOOP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS | FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SCOOP. If not, see <http://www.gnu.org/licenses/>.
#
from collections import d | eque, defaultdict
import time
import zmq
import sys
import copy
import logging
try:
import cPickle as pickle
except ImportError:
import pickle
import scoop
from scoop import TIME_BETWEEN_PARTIALDEBUG
from .. import discovery, utils
from .structs import BrokerInfo
# Worker requests
INIT = b"I"
REQUEST = b"RQ"
TASK = b"T"
REPLY = b"RP"
SHUTDOWN = b"S"
VARIABLE = b"V"
BROKER_INFO = b"B"
STATUS_REQ = b"SR"
STATUS_ANS = b"SA"
STATUS_DONE = b"SD"
STATUS_UPDATE = b"SU"
# Task statuses
STATUS_HERE = b"H"
STATUS_GIVEN = b"G"
STATUS_NONE = b"N"
# Broker interconnection
CONNECT = b"C"
class LaunchingError(Exception): pass
class Broker(object):
def __init__(self, tSock="tcp://*:*", mSock="tcp://*:*", debug=False,
headless=False, hostname="127.0.0.1"):
"""This function initializes a broker.
:param tSock: Task Socket Address.
Must contain protocol, address and port information.
:param mSock: Meta Socket Address.
Must contain protocol, address and port information.
"""
# Initialize zmq
self.context = zmq.Context(1)
self.debug = debug
self.hostname = hostname
# zmq Socket for the tasks, replies and request.
self.task_socket = self.context.socket(zmq.ROUTER)
self.task_socket.setsockopt(zmq.IPV4ONLY, 0)
self.task_socket.setsockopt(zmq.ROUTER_MANDATORY, 1)
self.task_socket.setsockopt(zmq.LINGER, 1000)
self.t_sock_port = 0
if tSock[-2:] == ":*":
self.t_sock_port = self.task_socket.bind_to_random_port(tSock[:-2])
else:
self.task_socket.bind(tSock)
self.t_sock_port = tSock.split(":")[-1]
# Create identifier for this broker
self.name = "{0}:{1}".format(hostname, self.t_sock_port)
# Initialize broker logging
self.logger = utils.initLogging(2 if debug else 0, name=self.name)
self.logger.handlers[0].setFormatter(
logging.Formatter(
"[%(asctime)-15s] %(module)-9s ({0}) %(levelname)-7s "
"%(message)s".format(self.name)
)
)
# zmq Socket for the pool informations
self.info_socket = self.context.socket(zmq.PUB)
self.info_socket.setsockopt(zmq.IPV4ONLY, 0)
self.info_socket.setsockopt(zmq.LINGER, 1000)
self.info_sock_port = 0
if mSock[-2:] == ":*":
self.info_sock_port = self.info_socket.bind_to_random_port(mSock[:-2])
else:
self.info_socket.bind(mSock)
self.info_sock_port = mSock.split(":")[-1]
self.task_socket.setsockopt(zmq.SNDHWM, 0)
self.task_socket.setsockopt(zmq.RCVHWM, 0)
self.info_socket.setsockopt(zmq.SNDHWM, 0)
self.info_socket.setsockopt(zmq.RCVHWM, 0)
# Init connection to fellow brokers
self.cluster_socket = self.context.socket(zmq.DEALER)
self.cluster_socket.setsockopt(zmq.IPV4ONLY, 0)
self.cluster_socket.setsockopt_string(zmq.IDENTITY, self.getName())
self.cluster_socket.setsockopt(zmq.RCVHWM, 0)
self.cluster_socket.setsockopt(zmq.SNDHWM, 0)
self.cluster_socket.setsockopt(zmq.IMMEDIATE, 1)
self.cluster = []
self.cluster_available = set()
# Init statistics
if self.debug:
self.stats = []
self.lastDebugTs = time.time()
# Two cases are important and must be optimised:
# - The search of unassigned task
# - The search of available workers
# These represent when the broker must deal the communications the
# fastest. Other cases, the broker isn't flooded with urgent messages.
# Initializing the queue of workers and tasks
# The busy workers variable will contain a dict (map) of workers: task
self.available_workers = deque()
self.unassigned_tasks = deque()
self.assigned_tasks = defaultdict(set)
self.status_times = {}
# Shared variables containing {workerID:{varName:varVal},}
self.shared_variables = defaultdict(dict)
# Start a worker-like communication if needed
self.execQueue = None
# Handle cloud-like behavior
self.discovery_thread = None
self.config = defaultdict(bool)
self.processConfig({'headless': headless})
def addBrokerList(self, aBrokerInfoList):
"""Add a broker to the broker cluster available list.
Connects to the added broker if needed."""
self.cluster_available.update(set(aBrokerInfoList))
# If we need another connection to a fellow broker
# TODO: only connect to a given number
for aBrokerInfo in aBrokerInfoList:
self.cluster_socket.connect(
"tcp://{hostname}:{port}".format(
hostname=aBrokerInfo.hostname,
port=aBrokerInfo.task_port,
)
)
self.cluster.append(aBrokerInfo)
def processConfig(self, worker_config):
"""Update the pool configuration with a worker configuration.
"""
self.config['headless'] |= worker_config.get("headless", False)
if self.config['headless']:
# Launch discovery process
if not self.discovery_thread:
self.discovery_thread = discovery.Advertise(
port=",".join(str(a) for a in self.getPorts()),
)
def run(self):
"""Redirects messages until a shutdown message is received."""
while True:
if not self.task_socket.poll(-1):
continue
msg = self.task_socket.recv_multipart()
msg_type = msg[1]
if self.debug:
self.stats.append((time.time(),
msg_type,
len(self.unassigned_tasks),
len(self.available_workers)))
if time.time() - self.lastDebugTs > TIME_BETWEEN_PARTIALDEBUG:
self.writeDebug("debug/partial-{0}".format(
round(time.time(), -1)
))
self.lastDebugTs = time.time()
# New task inbound
if msg_type == TASK:
task_id = msg[2]
task = msg[3]
self.logger.debug("Received task {0}".format(task_id))
try:
address = self.available_workers.popleft()
except IndexError:
self.unassigned_tasks.append((task_id, task))
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id)
# Request for task
elif msg_type == REQUEST:
address = msg[0]
try:
task_id, task = self.unassigned_tasks.popleft()
except IndexError:
self.available_workers.append(address)
else:
self.logger.debug("Sent {0}".format(task_id))
self.task_socket.send_multipart([address, TASK, task])
self.assigned_tasks[address].add(task_id) |
YosaiProject/yosai_dpcache | yosai_dpcache/dogpile/core/util.py | Python | apache-2.0 | 133 | 0.007519 | import sys
py3k = sys.version_info >= (3, 0)
try:
import t | hreading
except ImportErro | r:
import dummy_threading as threading
|
rhoop/snmp2graphite | metricD.py | Python | apache-2.0 | 6,600 | 0.000152 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Richard Hoop - wrhoop@gmail.com
#
import sys
import argparse
import logging
import glob
import json
import netsnmp
import socket
import time
import signal
import threading
from raven import Client
# ====( headers )==== #
import pprint
pprint = pprint.pprint
from daemon import Daemon
# ====( arguments )==== #
parser = argparse.ArgumentParser(description="Graphite Metric Daemon")
parser.add_argument(
"-d",
"--debug",
action="store_true",
default=False,
help="Enable logging up to DEBUG logging")
parser.add_argument(
"-l",
"--log",
action="store_true",
default=True,
help="Send all output to log")
# Daemon Commands
daemongroup = parser.add_mutually_exclusive_group(required=True)
daemongroup.add_argument(
"--start",
action="store_true",
help="Start the Daemon")
daemongroup.add_argument(
"--stop",
action="store_true",
help="Stop The daemon")
daemongroup.add_argument(
"--reload",
action="store_true",
help="Reload the daemon")
daemongroup.add_argument(
"--cli",
action="store_true",
help="Run in CLI mode")
# Set our local args
args = parser.parse_args()
if args.log and not args.cli:
logging.basicConfig(
format='%(levelno)s %(process)d %(asctime)s.%(msecs)d'
' (%(module)s::%(funcName)s[#%(lineno)d]) => %(message)s',
datefmt='%H:%M:%S',
filename='/var/log/snmp2graphite.log')
else:
logging.basicConfig(
format='%(levelno)s %(process)d %(asctime)s.%(msecs)d'
' (%(module)s::%(funcName)s[#%(lineno)d]) => %(message)s',
datefmt='%H:%M:%S')
logger = logging.getLogger()
logger.setLevel(logging.INFO)
if args.debug:
logger.setLevel(logging.DEBUG)
# ====( defaults )==== #
graphite = {
"port": 2023,
"server": ""
}
class Transformation(object):
"""docstring for Transformation"""
deltas = {}
@staticmethod
def byteToMegabyte(cls, value):
return (float(value) / 1024) / 1024
@staticmethod
def byteToGigabyte(cls, value):
return float(cls.byteToMegabyte(cls, value) / 1024)
@staticmethod
def bitToGigabyte(cls, value):
return (float(value) / 1024) / 1024 / 1024
@staticmethod
def bitToGigabit(cls, value):
return float(float(value) / 8589934592) / 10
@staticmethod
def largestValue(cls, values):
return max(values)
@staticmethod
def deltaByInterval(cls, value, key, interval):
if key not in cls.deltas:
cls.deltas[key] = float(value)
return 0
else:
retval = float(value) - cls.deltas[key]
cls.deltas[key] = float(value)
return retval / interval
def send_to_graphite(value, sourcehost, metric, checkname):
message = 'PRODUCTION.host.%s.metricD.%s.%s %s %d\n' % \
(sourcehost.split('.')[0], checkname, metric, value, int(time.time()))
logger.info(message[:-1])
sock = socket.socket()
sock.connect((gr | aphite['server'], graphite['port']))
| sock.sendall(message)
sock.close()
def load_config():
CHECKS = {}
for filename in glob.glob("checks/*.json"):
with open(filename, "rb") as fh:
logger.debug("Found file [%s]", filename)
CHECKS[filename.split("/")[1][:-5]] = json.loads(
" ".join(fh.readlines()))
return CHECKS
def run_snmp(name, config):
pprint(name)
pprint(config)
while True:
for subcheck in config['checks']:
for host in config['hosts']:
result = netsnmp.snmpwalk(
subcheck['oid'],
Version=2,
DestHost=host['name'],
Community=config['community']
)
if len(result):
value = result[0]
else:
try:
msg = "Value isn't there for type [%s]=[%s]" % (
type(value),
value)
logger.exception(msg)
except Exception, e:
raise e
continue
if 'transform' in subcheck:
transformer = getattr(
Transformation, subcheck['transform'])
if subcheck['transform'] == 'deltaByInterval':
value = transformer(
Transformation,
value,
host['name'] + subcheck['name'],
float(config['interval']))
elif subcheck['transform'] == 'largestValue':
value = transformer(
Transformation,
result)
else:
value = transformer(Transformation, value)
send_to_graphite(
value=value,
sourcehost=host['name'],
metric=subcheck['name'],
checkname=name)
time.sleep(float(config['interval']))
def do_run():
thread_alive = 0
THREADS = []
# Test the App with CLI
CHECKS = load_config()
for name, config in CHECKS.items():
# print name, config
t = threading.Thread(
target=run_snmp, args=(name, config))
THREADS.append(t)
THREADS[thread_alive].daemon = True
THREADS[thread_alive].start()
thread_alive += 1
while thread_alive:
for thread in threading.enumerate():
if not thread.is_alive():
thread_alive -= 1
time.sleep(1)
class DaemonRun(Daemon):
def run(self):
do_run()
def reload(self):
load_config()
logger.warn("Reloading metricD!!")
def exit(self, signal, frame):
self.determined.stop()
logger.warn("Stopping metricD Threads!!")
sys.exit(0)
# ====( main )==== #
def main():
if args.cli:
do_run()
# This is all daemon control
else:
daemon = DaemonRun('metricD')
signal.signal(signal.SIGTERM, daemon.exit)
signal.signal(signal.SIGHUP, daemon.reload)
if args.start:
daemon.start()
elif args.stop:
daemon.stop()
elif args.reload:
daemon.reload()
sys.exit(0)
# ====( run )==== #
if __name__ == '__main__':
try:
main()
except (KeyboardInterrupt, SystemExit):
sys.exit(0)
|
mganeva/mantid | Framework/PythonInterface/test/python/plugins/algorithms/FitGaussianTest.py | Python | gpl-3.0 | 2,677 | 0.014195 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source
# & Institut Laue - Langevin
# SPDX - License - Identifier: GPL - 3.0 +
from __future__ import (absolute_import, division, print_function)
import unittest
from mantid.simpleapi import FitGaussian, CreateSampleWorkspace, DeleteWorkspace
import logging
class FitGaussianTest(unittest.TestCase):
def setUp(self):
self.ws = None
def tearDown(self):
if self.ws is not None: DeleteWorkspace(self.ws)
def _workspace(self,userFun):
if self.ws is not None: DeleteWorkspace(self.ws)
self.ws = CreateSampleWorkspace(OutputWorkspace="out",
Function="User Defined",UserDefinedFunction=userFun,
NumBanks=1, BankPixelWidth=1, XMin=0, XMax=10, BinWidth=0.1)
def _gaussianWorkspace(self,peakCentre,height,sigma):
self._workspace("name=Gaussian,PeakCentre=%s,Height=%s,Sigma=%s" %(peakCentre,height,sigma))
def _linearWorkspace(self,A0):
self._workspace("name=LinearBackground,A0=%s;" % A0)
def _veryNarrowPeakWorkspace(self):
self._gaussianWorkspace(5,1,.05)
def test_errors(self):
"""Conditions that raise RuntimeError.
"""
self._linearWorkspace(0)
self.assertRaises(RuntimeError, FitGaussian, Workspace=self.ws, Index=1)
def test_noFit(self):
"""Cases where fit is not po | ssible.
"""
self._linearWorkspace(0)
fitResult = FitGaussian(self.ws,0)
self.assertEqual(0.0, fitResult[0])
self.assertEqual(0.0, fitResult[1])
self._veryNarrowPeakWorkspace()
fitResult = FitGaussian(self.ws,0)
self.assertEqual(0.0, fitResult[0])
self.assertEqual(0.0, fitResult[1]) |
def _guessPeak(self,peakCentre,height,sigma):
"""Test-fitting one generated Gaussian peak.
"""
self._gaussianWorkspace(peakCentre,height,sigma)
fitPeakCentre,fitSigma = FitGaussian(self.ws,0)
# require a close relative match between given and fitted values
diffPeakCentre = abs((fitPeakCentre - peakCentre) / peakCentre)
diffSigma = abs((fitSigma - sigma) / sigma)
self.assertTrue(diffPeakCentre < 0.03)
self.assertTrue(diffSigma < 1e-6)
def test_guessedPeaks(self):
"""Test that generated Gaussian peaks are reasonably well guessed.
"""
self._guessPeak(2,10,.7)
self._guessPeak(5,10,.7)
self._guessPeak(9,10,.7)
if __name__ == "__main__":
unittest.main()
|
thaim/ansible | lib/ansible/modules/network/fortios/fortios_firewall_schedule_onetime.py | Python | mit | 11,113 | 0.00171 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_firewall_schedule_onetime
short_description: Onetime schedule configuration in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify firewall_schedule feature and onetime category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
state:
description:
- Indicates whether to create or remove the object.
This attribute was present already in previous version in a deeper level.
It has been moved out to this outer level.
type: str
required: false
choices:
- present
- absent
version_added: 2.9
firewall_schedule_onetime:
description:
- Onetime schedule configuration.
default: null
type: dict
suboptions:
state:
description:
- B(Deprecated)
- Starting with Ansible 2.9 we recommend using the top-level 'state' parameter.
- HORIZONTALLINE
- Indicates whether to create or remove the object.
type: str
required: false
choices:
- present
- absent
color:
description:
- Color of icon on the GUI.
type: int
end:
description:
- "Schedule end date and time, format hh:mm yyyy/mm/dd."
type: str
expiration_days:
description:
- Write an event log message this many days before the schedule expires.
type: int
name:
description:
- Onetime schedule name.
required: true
type: str
start:
description:
- "Schedule start date and time, format hh:mm yyyy/mm/dd."
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Onetime schedule configuration.
fortios_firewall_schedule_onetime:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
| https: "False"
state: "present"
firewall_schedule_onetime:
color: "3"
end: "<your_own_value>"
expiration_days: "5"
name: "default_name_6"
start: "<your_own_value>"
'''
RETURN = '''
build:
description | : Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_firewall_schedule_onetime_data(json):
option_list = ['color', 'end', 'expiration_days',
'name', 'start']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def firewall_schedule_onetime(data, fos):
vdom = data['vdom']
if 'state' in data and data['state']:
state = data['state']
elif 'state' in data['firewall_schedule_onetime'] and data['firewall_schedule_onetime']:
state = data['firewall_schedule_onetime']['state']
else:
state = True
firewall_schedule_onetime_data = data['firewall_schedule_onetime']
filtered_data = underscore_to_hyphen(filter_firewall_schedule_onetime_data(firewall_schedule_onetime_data))
if state == "present":
return fos.set('firewall.schedule',
'onetime',
data=filtered_data,
vdom=vdom)
elif state == "absent":
return fos.delete('firewall.schedule' |
kain88-de/mdanalysis | package/MDAnalysis/core/topologyobjects.py | Python | gpl-2.0 | 33,789 | 0.000059 | # -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# MDAnalysis --- http://www.mdanalysis.org
# Copyright (c) 2006-2016 The MDAnalysis Development Team and contributors
# (see the file AUTHORS for the full list of names)
#
# Released under the GNU Public Licence, v2 or any higher version
#
# Please cite your use of MDAnalysis in published work:
#
# R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler,
# D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein.
# MDAnalysis: A Python package for the rapid analysis of molecular dynamics
# simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th
# Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy.
#
# N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein.
# MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations.
# J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787
#
"""
Core Topology Objects --- :mod:`MDAnalysis.core.topologyobjects`
================================================================
The building blocks for MDAnalysis' description of topology
"""
from __future__ import print_function, absolute_import, division
from six.moves import zip
import numpy as np
import functools
from ..lib.mdamath import norm, dihedral
from ..lib.mdamath import angle as slowang
from ..lib.util import cached
from ..lib import util
from ..lib import distances
@functools.total_ordering
class TopologyObject(object):
"""Base class for all Topology items.
Defines the behaviour by which Bonds/Angles/etc in MDAnalysis should
behave.
.. versionadded:: 0.9.0
.. versionchanged:: 0.10.0
All TopologyObject now keep track of if they were guessed or not
via the ``is_guessed`` managed property.
.. versionadded:: 0.11.0
Added the `value` method to return the size of the object
"""
__slots__ = ("_ix", "_u", "btype", "_bondtype", "_guessed", "order")
def __init__(self, ix, universe, type=None, guessed=False, order=None):
"""Create a topology object
Parameters
----------
ix : numpy array
indices of the Atoms
universe : MDAnalysis.Universe
type : optional
Type of the bond
guessed : optional
If the Bond is guessed
"""
self._ix = ix
self._u = universe
self._bondtype = type
self._guessed = guessed
self.order = order
@property
def atoms(self):
"""Atoms within this Bond"""
return self._u.atoms[self._ix]
@property
def indices(self):
"""Tuple of indices describing this object
.. versionadded:: 0.10.0
"""
return self._ix
@property
def universe(self):
return self._u
@property
def type(self):
"""Type of the bond as a tuple
Note
----
When comparing types, it is important to consider the reverse
of the type too, i.e.::
a.type == b.type or a.type == b.type[::-1]
"""
if self._bondtype is not None:
| return self._bondtype
else:
return tuple(self.atoms.types)
@property
def is_guessed(self):
return bool(self._guessed)
def __hash__(self):
return hash((self._u, tuple(self.indices)))
def __repr__(self):
indices = sorted(self.indices)
return "<{cname} between: {conts}>".format(
cname=self.__class__.__name__,
conts=", ".join([
"Atom {0}".fo | rmat(i)
for i in indices]))
def __contains__(self, other):
"""Check whether an atom is in this :class:`TopologyObject`"""
return other in self.atoms
def __eq__(self, other):
"""Check whether two bonds have identical contents"""
if not self.universe == other.universe:
return False
return (np.array_equal(self.indices, other.indices) or
np.array_equal(self.indices[::-1], other.indices))
def __ne__(self, other):
return not self == other
def __lt__(self, other):
return tuple(self.indices) < tuple(other.indices)
def __getitem__(self, item):
"""Can retrieve a given Atom from within"""
return self.atoms[item]
def __iter__(self):
return iter(self.atoms)
def __len__(self):
return len(self._ix)
class Bond(TopologyObject):
"""A bond between two :class:`~MDAnalysis.core.groups.Atom` instances.
Two :class:`Bond` instances can be compared with the ``==`` and
``!=`` operators. A bond is equal to another if the same atom
numbers are connected and they have the same bond order. The
ordering of the two atom numbers is ignored as is the fact that a
bond was guessed.
The presence of a particular atom can also be queried::
>>> Atom in Bond
will return either ``True`` or ``False``.
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`. Changed class to use
:attr:`__slots__` and stores atoms in :attr:`atoms` attribute.
"""
btype = 'bond'
def partner(self, atom):
"""Bond.partner(Atom)
Returns
-------
the other :class:`~MDAnalysis.core.groups.Atom` in this
bond
"""
if atom == self.atoms[0]:
return self.atoms[1]
elif atom == self.atoms[1]:
return self.atoms[0]
else:
raise ValueError("Unrecognised Atom")
def length(self, pbc=False):
"""Length of the bond.
.. versionchanged:: 0.11.0
Added pbc keyword
"""
if pbc:
box = self.universe.dimensions
return distances.self_distance_array(
np.array([self[0].position, self[1].position]),
box=box)[0]
else:
return norm(self[0].position - self[1].position)
value = length
class Angle(TopologyObject):
"""An angle between three :class:`~MDAnalysis.core.groups.Atom` instances.
Atom 2 is the apex of the angle
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`; now uses
:attr:`__slots__` and stores atoms in :attr:`atoms` attribute
"""
btype = 'angle'
def angle(self):
"""Returns the angle in degrees of this Angle.
Angle between atoms 0 and 2 with apex at 1::
2
/
/
1------0
Note
----
The numerical precision is typically not better than
4 decimals (and is only tested to 3 decimals).
.. versionadded:: 0.9.0
"""
a = self[0].position - self[1].position
b = self[2].position - self[1].position
return np.rad2deg(
np.arccos(np.dot(a, b) / (norm(a) * norm(b))))
value = angle
class Dihedral(TopologyObject):
"""Dihedral (dihedral angle) between four
:class:`~MDAnalysis.core.groups.Atom` instances.
The dihedral is defined as the angle between the planes formed by
Atoms (1, 2, 3) and (2, 3, 4).
.. versionadded:: 0.8
.. versionchanged:: 0.9.0
Now a subclass of :class:`TopologyObject`; now uses :attr:`__slots__`
and stores atoms in :attr:`atoms` attribute.
.. versionchanged:: 0.11.0
Renamed to Dihedral (was Torsion)
"""
# http://cbio.bmt.tue.nl/pumma/uploads/Theory/dihedral.png
btype = 'dihedral'
def dihedral(self):
"""Calculate the dihedral angle in degrees.
Dihedral angle around axis connecting atoms 1 and 2 (i.e. the angle
between the planes spanned by atoms (0,1,2) and (1,2,3))::
3
|
1-----2
/
0
Note
----
The numerical precision is typically not better than
4 decimals (and is only tested to 3 decimals).
.. versionadded:: 0.9.0
"""
A, B, C, D = self.atoms
ab = A.position - B.positio |
cristianst85/binwalk | src/binwalk/plugins/compressd.py | Python | mit | 1,421 | 0.021816 | #import binwalk.core.C
import binwalk.core.plugin
#from binwalk.core.common import *
class CompressdPlugin(binwalk.core.plugin.Plugin):
# '''
# Searches for and validates compress'd data.
# '''
MODULES = ['Signature']
#READ_SIZE = 64
#COMPRESS42 = "compress42"
#COMPRESS42_FUNCTIONS = [
# binwalk.core.C.Function(name="is_compressed", type=bool),
#]
#comp = None
#def init(self):
#self.comp = binwalk.core.C.Library(self.COMPRESS42, self.COMPRESS42_FUNCTIONS)
# This plugin is currently disabled due to the need to move away from supporting C
# libraries and into a pure Python project, for cross-platform support and ease of
# installation / package maintenance. A Python implementation will likely need to
# be custom developed in the future, but for now, since this compression format is
# not very common, especially in firmware, simply disable it.
#self.comp = None
#def scan(self, result):
# if self.comp and result.file and result.description.lower().startswith("compress'd data"):
# fd = self.module.config.open_file(result.file.name, offset=result.offset, length=self.READ_SIZE)
| # compressed_data = fd.read(self.READ_SIZE)
# fd.close()
# if not self.comp.is_compresse | d(compressed_data, len(compressed_data)):
# result.valid = False
|
demisto/content | Packs/Unit42Intel/Integrations/FeedUnit42IntelObjects/FeedUnit42IntelObjects.py | Python | mit | 23,539 | 0.002549 | from typing import Dict, List, Optional
import urllib3
import demistomock as demisto # noqa: F401
from CommonServerPython import * # noqa: F401
# disable insecure warnings
urllib3.disable_warnings()
''' CONSTANTS '''
DATE_FORMAT = '%Y-%m-%dT%H:%M:%SZ' # ISO8601 format with UTC, default in XSOAR
AF_TAGS_DATE_FORMAT = '%Y-%m-%d %H:%M:%S'
TAG_CLASS_TO_DEMISTO_TYPE = {
'malware_family': ThreatIntel.ObjectsNames.MALWARE,
'actor': ThreatIntel.ObjectsNames.THREAT_ACTOR,
'campaign': ThreatIntel.ObjectsNames.CAMPAIGN,
'malicious_behavior': ThreatIntel.ObjectsNames.ATTACK_PATTERN,
}
MAP_RELATIONSHIPS = {
ThreatIntel.ObjectsNames.MALWARE:
{
ThreatIntel.ObjectsNames.MALWARE: 'related-to',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'used-by',
ThreatIntel.ObjectsNames.CAMPAIGN: 'used-by',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'used-by'
},
ThreatIntel.ObjectsNames.THREAT_ACTOR:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'related-to',
ThreatIntel.ObjectsNames.CAMPAIGN: 'attributed-by',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'uses'
},
ThreatIntel.ObjectsNames.CAMPAIGN:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'attributed-to',
ThreatIntel.ObjectsNames.CAMPAIGN: 'related-to',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'used-by'
},
ThreatIntel.ObjectsNames.ATTACK_PATTERN:
{
ThreatIntel.ObjectsNames.MALWARE: 'uses',
ThreatIntel.ObjectsNames.THREAT_ACTOR: 'used-by',
ThreatIntel.ObjectsNames.CAMPAIGN: 'uses',
ThreatIntel.ObjectsNames.ATTACK_PATTERN: 'related-to'
},
}
SCORES_MAP = {
ThreatIntel.ObjectsNames.MALWARE: ThreatIntel.ObjectsScore.MALWARE,
ThreatIntel.ObjectsNames.THREAT_ACTOR: ThreatIntel.ObjectsScore.THREAT_ACTOR,
ThreatIntel.ObjectsNames.CAMPAIGN: ThreatIntel.ObjectsScore.CAMPAIGN,
ThreatIntel.ObjectsNames.ATTACK_PATTERN: ThreatIntel.ObjectsScore.ATTACK_PATTERN,
}
# The page size in the AutoFocus response (can be 1-200)
PAGE_SIZE = 50
''' CLIENT CLASS '''
class Client(BaseClient):
"""
Client class to interact with AutoFocus API
Args:
api_key: AutoFocus API Key.
"""
def __init__(self, api_key, base_url, verify, proxy):
super().__init__(base_url=base_url, verify=verify, proxy=proxy)
self.headers = {
'apiKey': api_key,
'Content-Type': 'application/json'
}
LOG.add_replace_strs(api_key)
def get_tags(self, data: Dict[str, Any]): # pragma: no cover
res = self._http_request('POST',
url_suffix='tags',
headers=self.headers,
json_data=data,
timeout=90,
)
return res
def get_tag_details(self, public_tag_name: str): # pragma: no cover
res = self._http_request('POST',
url_suffix=f'tag/{public_tag_name}',
headers=self.headers,
timeout=90,
)
return res
def build_iterator(self, is_get_command: bool, limit: int = -1) -> list:
"""
Retrieves all entries from the feed.
This method implements the logic to get tags from the feed.
Args:
limit: max amount of results to return
is_get_command: whether this method is called from the get-indicators-command
Returns:
A list of objects, containing the indicators.
"""
results: list = []
if is_get_command:
# since get-indicators command is used mostly for debug,
# getting the tags from the first page is sufficient
page_num = 0
else:
integration_context = get_integration_context()
# if so, than this is the first fetch
if not integration_context:
page_num = 0
time_of_first_fetch = date_to_timestamp(datetime.now(), DATE_FORMAT)
set_integration_context({'time_of_first_fetch': time_of_first_fetch})
else:
page_num = integration_context.get('page_num', 0)
get_tags_response = self.get_tags({
'pageNum': page_num,
'pageSize': PAGE_SIZE,
'sortBy': 'created_at'
})
tags = get_tags_response.get('tags', [])
# when finishing the "first level fetch" (getting all the tags from the feed), the next call to the api
# will be with a page num greater than the total pages, and the api should return an empty tags list.
if not tags:
# now the fetch will retrieve only tags that has been updated after the last fetch time
return incremental_level_fetch(self)
# this is the "first level fetch" logic. Every fetch returns at most PAGE_SIZE indicators from the feed.
for tag in tags:
if is_get_command and limit > 0:
if len(results) >= limit:
return results
public_tag_name = tag.get('public_tag_name', '')
tag_details_response = self.get_tag_details(public_tag_name)
results.append(tag_details_response)
if not is_get_command:
page_num += 1
context = get_integration_context()
context['page_num'] = page_num
set_integration_context(context)
return results
''' HELPER FUNCTIONS '''
def incremental_level_fetch(client: Client) -> list:
"""
This method implements the incremental level of the feed. It checks if any updates
have been made in the tags from the last fetch time, and returns the updated tags.
Args:
client: Client object
Returns:
A list of tag details represents the tags that have been updated.
"""
results: list = []
integration_context = get_integration_context()
# This field saves tags that have been updated since the last fetch time and ne | ed to be updated in demisto
list_of_all_updated_tags = argToList(integration_context.get('tags_need_to_be_fetched', ''))
time_from_last_update = integration_context.get('time_of_first_fetch')
index_to_delete = 0
for tag in list_of_all_updated_tags: # pragma: no cover
# if there are such tags, we first get all of them, so we wont miss any tags
if len(results) < PAGE_SIZE:
results.append(client.get_tag_details(tag.get('public_t | ag_name', '')))
index_to_delete += 1
else:
context = get_integration_context()
context['time_of_first_fetch'] = date_to_timestamp(datetime.now(), DATE_FORMAT)
context['tags_need_to_be_fetched'] = list_of_all_updated_tags[index_to_delete:]
set_integration_context(context)
return results
list_of_all_updated_tags = get_all_updated_tags_since_last_fetch(client,
list_of_all_updated_tags,
time_from_last_update)
# add only PAGE_SIZE tag_details to results, so we wont make too many calls to the API
index_to_delete = 0
for tag in list_of_all_updated_tags:
if len(results) < PAGE_SIZE:
public_tag_name = tag.get('public_tag_name')
response = client.get_tag_details(public_tag_name)
results.append(response)
index_to_delete += 1
else:
break
# delete from the list all tags that will be returned this fetch
list_of_all_updated_tags = list_of_all_updated_tags[index_to_delete:]
# update integration context
context = get_integration_context()
context['tags_need_to_be_fetched'] = list_of_all_updated_tags
context['time_of_first_fetch'] = date_to_timestam |
advisory/djangosaml2_tenant | djangosaml2/utils.py | Python | apache-2.0 | 1,490 | 0 | # Copyright (C) 2012 Yaco Sistemas (http://www.yaco.es)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django.conf import settings
def get_custom_setting(name, default=None):
if hasattr(settings, name):
return getattr(settings, name)
else:
return default
def available_idps(config, langpref=None):
if langpref is None:
langpref = "en"
idps = set()
for m | etadata_name, metadata in config.metadata.metadata.items():
result = metadata.any('idpsso_descriptor', 'single_sign_on_service')
if result:
idps = idps.union(set(result.keys()))
return dict([(idp, config.metadata.name(idp, langpref)) for idp in id | ps])
def get_location(http_info):
"""Extract the redirect URL from a pysaml2 http_info object"""
assert 'headers' in http_info
headers = http_info['headers']
assert len(headers) == 1
header_name, header_value = headers[0]
assert header_name == 'Location'
return header_value
|
RevansChen/online-judge | Codewars/8kyu/find-multiples-of-a-number/Python/test.py | Python | mit | 238 | 0.008403 | # Python - 3.6.0
Test.expect(find_multiples(5, 25) == [5, 10, 15, 20, 25], f'{str(find_multiples(5, 25))} should equal [5, 10, 15, 20, 25]')
Te | st.expect(find_multiples(1, 2) == [1, 2], f'{str(find_multipl | es(1, 2))} should equal [1, 2]')
|
larsks/cloud-init-patches | tests/unittests/test_handler/test_handler_apt_conf_v1.py | Python | gpl-3.0 | 4,917 | 0 | from cloudinit.config import cc_apt_configure
from cloudinit import util
from ..helpers import TestCase
import copy
import os
import re
import shutil
import tempfile
def load_tfile_or_url(*args, **kwargs):
return(util.decode_binary(util.read_file_or_url(*args, **kwargs).contents))
class TestAptProxyConfig(TestCase):
def setUp(self):
super(TestAptProxyConfig, self).setUp()
self.tmp = tempfile.mkdtemp()
self.addCleanup(shutil.rmtree, self.tmp)
self.pfile = os.path.join(self.tmp, "proxy.cfg")
self.cfile = os.path.join(self.tmp, "config.cfg")
def _search_apt_config(self, contents, ptype, value):
return re.search(
r"acquire::%s::proxy\s+[\"']%s[\"'];\n" % (ptype, value),
contents, flags=re.IGNORECASE)
def test_apt_proxy_written(self):
cfg = {'proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
self.assertF | alse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_http_proxy_written(self):
cfg = {'http_proxy': 'myproxy'}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
| self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "myproxy"))
def test_apt_all_proxy_written(self):
cfg = {'http_proxy': 'myproxy_http_proxy',
'https_proxy': 'myproxy_https_proxy',
'ftp_proxy': 'myproxy_ftp_proxy'}
values = {'http': cfg['http_proxy'],
'https': cfg['https_proxy'],
'ftp': cfg['ftp_proxy'],
}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
contents = load_tfile_or_url(self.pfile)
for ptype, pval in values.items():
self.assertTrue(self._search_apt_config(contents, ptype, pval))
def test_proxy_deleted(self):
util.write_file(self.cfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
self.assertFalse(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
def test_proxy_replaced(self):
util.write_file(self.cfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'proxy': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.pfile))
contents = load_tfile_or_url(self.pfile)
self.assertTrue(self._search_apt_config(contents, "http", "foo"))
def test_config_written(self):
payload = 'this is my apt config'
cfg = {'conf': payload}
cc_apt_configure.apply_apt_config(cfg, self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
self.assertFalse(os.path.isfile(self.pfile))
self.assertEqual(load_tfile_or_url(self.cfile), payload)
def test_config_replaced(self):
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({'conf': "foo"},
self.pfile, self.cfile)
self.assertTrue(os.path.isfile(self.cfile))
self.assertEqual(load_tfile_or_url(self.cfile), "foo")
def test_config_deleted(self):
# if no 'conf' is provided, delete any previously written file
util.write_file(self.pfile, "content doesnt matter")
cc_apt_configure.apply_apt_config({}, self.pfile, self.cfile)
self.assertFalse(os.path.isfile(self.pfile))
self.assertFalse(os.path.isfile(self.cfile))
class TestConversion(TestCase):
def test_convert_with_apt_mirror_as_empty_string(self):
# an empty apt_mirror is the same as no apt_mirror
empty_m_found = cc_apt_configure.convert_to_v3_apt_format(
{'apt_mirror': ''})
default_found = cc_apt_configure.convert_to_v3_apt_format({})
self.assertEqual(default_found, empty_m_found)
def test_convert_with_apt_mirror(self):
mirror = 'http://my.mirror/ubuntu'
f = cc_apt_configure.convert_to_v3_apt_format({'apt_mirror': mirror})
self.assertIn(mirror, {m['uri'] for m in f['apt']['primary']})
def test_no_old_content(self):
mirror = 'http://my.mirror/ubuntu'
mydata = {'apt': {'primary': {'arches': ['default'], 'uri': mirror}}}
expected = copy.deepcopy(mydata)
self.assertEqual(expected,
cc_apt_configure.convert_to_v3_apt_format(mydata))
# vi: ts=4 expandtab
|
protwis/protwis | common/migrations/0003_citation_page_name.py | Python | apache-2.0 | 413 | 0 | # Generated by D | jango 3.0.3 on 2020-08-20 16:17
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('common', '0002_citation'),
]
operations = [
migrations.AddField(
model_name='citation',
name='page_name',
field=models.TextField(default=None),
preserve_default=False,
),
]
| |
popazerty/beyonwiz-sh4 | lib/python/Components/About.py | Python | gpl-2.0 | 8,286 | 0.03705 | from Tools.Directories import resolveFilename, SCOPE_SYSETC
from Components.Console import Console
import sys
import time
import re
from boxbranding import getImageVersion, getMachineBrand
from sys import modules
import socket, fcntl, struct
def getVersionString():
return getImageVersion()
def getImageVersionString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "version":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getImageUrlString():
try:
if getMachineBrand() == "GI":
return "www.xpeed-lx.de"
elif getMachineBrand() == "Beyonwiz":
return "www.beyonwiz.com.au"
else:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "url":
version = splitted[1].replace('\n','')
file.close()
return version
except IOError:
return "unavailable"
def getEnigmaVersionString():
return getImageVersion()
def getGStreamerVersionString():
import enigma
return enigma.getGStreamerVersionString()
def getKernelVersionString():
try:
f = open("/proc/version","r")
kernelversion = f.read().split(' ', 4)[2].split('-',2)[0]
f.close()
return kernelversion
except:
return _("unknown")
def getLastUpdateString():
try:
file = open(resolveFilename(SCOPE_SYSETC, 'image-version'), 'r')
lines = file.readlines()
for x in lines:
splitted = x.split('=')
if splitted[0] == "date":
#YYYY MM DD hh mm
#2005 11 29 01 16
string = splitted[1].replace('\n','')
year = string[0:4]
month = string[4:6]
day = string[6:8]
date = '-'.join((year, month, day))
hour = string[8:10]
minute = string[10:12]
time = ':'.join((hour, minute))
lastupdated = ' '.join((date, time))
file.close()
return lastupdated
except IOError:
return "unavailable"
class BootLoaderVersionFetcher:
monMap = {
"Jan": "01", "Feb": "02", "Mar": "03",
"Apr": "04", "May": "05", "Jun": "06",
"Jul": "07", "Aug": "08", "Sep": "09",
"Oct": "10", "Nov": "11", "Dec": "12",
}
dateMatch = "(Sun|Mon|Tue|Wed|Thu|Fri|Sat) (" + '|'.join(monMap.keys()) + ") ([ 1-3][0-9]) [0-2][0-9]:[0-5][0-9]:[0-5][0-9] [A-Za-z]+ ([0-9]{4})"
dateMatchRe = re.compile(dateMatch)
def __init__(self):
pass
def searchBootVer(self, appcallback):
self.console = Console()
cmd = "strings -n 28 /dev/mtd3ro | grep ' [0-2][0-9]:[0-5][0-9]:[0-5][0-9] '"
self.console.ePopen(cmd, callback=self.searchBootVerFinished, extra_args=appcallback)
def searchBootVerFinished(self, result, retval, extra_args):
callback = extra_args
latest_date = (0, 0, 0, "Unknown")
for line in result.splitlines():
line = line.strip()
match = self.dateMatchRe.search(line)
groups = match.groups()
if len(groups) == 4:
month = self.monMap[groups[1]]
day = groups[2]
if day[0] == ' ':
day = '0' + day[1:]
year = groups[3]
d = (year, month, day, line)
if latest_date < d:
latest_date = d
if callback:
callback(latest_date[3])
__bootLoaderFetcher = BootLoaderVersionFetcher()
def getBootLoaderVersion(callback):
__bootLoaderFetcher.searchBootVer(callback)
import socket, fcntl, struct
SIOCGIFADDR = 0x8915
SIOCGIFBRDADDR = 0x8919
SIOCSIFHWADDR = 0x8927
SIOCGIFNETMASK = 0x891b
SIOCGIFFLAGS = 0x8913
ifflags = {
"up": 0x1, # interface is up
"broadcast": 0x2, # broadcast address valid
"debug": 0x4, # turn on debugging
"loopback": 0x8, # is a loopback net
"pointopoint": 0x10, # interface is has p-p link
"notrailers": 0x20, # avoid use of trailers
"running": 0x40, # interface RFC2863 OPER_UP
"noarp": 0x80, # no ARP protocol
"promisc": 0x100, # receive all packets
"allmulti": 0x200, # receive all multicast packets
"master": 0x400, # master of a load balancer
"slave": 0x800, # slave of a load balancer
"multicast": 0x1000, # Supports multicast
"portsel": 0x2000, # can set media type
"automedia": 0 | x4000, # auto media select active
"dynamic": 0x8000, # dialup device with changing addresses
"lower_up": 0x10000, # driver signals L1 up
"dormant": 0x20000, # driver signals dormant
"echo": 0x40000, # echo sent packets
}
def _ifinfo(sock, addr, ifname):
iface = struct.pack('256s', ifname[:15])
info = fcntl.ioctl(sock.fileno(), addr, iface)
if addr = | = SIOCSIFHWADDR:
return ':'.join(['%02X' % ord(char) for char in info[18:24]])
elif addr == SIOCGIFFLAGS:
return socket.ntohl(struct.unpack("!L", info[16:20])[0])
else:
return socket.inet_ntoa(info[20:24])
def getIfConfig(ifname):
ifreq = {'ifname': ifname}
infos = {}
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
# offsets defined in /usr/include/linux/sockios.h on linux 2.6
infos['addr'] = SIOCGIFADDR
infos['brdaddr'] = SIOCGIFBRDADDR
infos['hwaddr'] = SIOCSIFHWADDR
infos['netmask'] = SIOCGIFNETMASK
infos['flags'] = SIOCGIFFLAGS
try:
for k,v in infos.items():
ifreq[k] = _ifinfo(sock, v, ifname)
except:
pass
if 'flags' in ifreq:
flags = ifreq['flags']
ifreq['flags'] = dict([(name, bool(flags & flag)) for name, flag in ifflags.items()])
sock.close()
return ifreq
def getAllIfTransferredData():
transData = {}
for line in file("/proc/net/dev").readlines():
flds = line.split(':')
if len(flds) > 1:
ifname = flds[0].strip()
flds = flds[1].strip().split()
rx_bytes, tx_bytes = (flds[0], flds[8])
transData[ifname] = (rx_bytes, tx_bytes)
return transData
def getIfTransferredData(ifname):
for line in file("/proc/net/dev").readlines():
if ifname in line:
data = line.split('%s:' % ifname)[1].split()
rx_bytes, tx_bytes = (data[0], data[8])
return rx_bytes, tx_bytes
return None
def getGateways():
gateways = {}
count = 0
for line in file("/proc/net/route").readlines():
if count > 0:
flds = line.strip().split()
for i in range(1, 4):
flds[i] = int(flds[i], 16)
if flds[3] & 2:
if flds[0] not in gateways:
gateways[flds[0]] = []
gateways[flds[0]].append({
"destination": socket.inet_ntoa(struct.pack("!L", socket.htonl(flds[1]))),
"gateway": socket.inet_ntoa(struct.pack("!L", socket.htonl(flds[2])))
})
count += 1
return gateways
def getIfGateways(ifname):
return getGateways().get(ifname)
def getModelString():
try:
file = open("/proc/stb/info/boxtype", "r")
model = file.readline().strip()
file.close()
return model
except IOError:
return "unknown"
def getChipSetString():
try:
f = open('/proc/stb/info/chipset', 'r')
chipset = f.read()
f.close()
return str(chipset.lower().replace('\n','').replace('bcm',''))
except IOError:
return "unavailable"
def getCPUSpeedString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("cpu MHz"):
mhz = float(splitted[1].split(' ')[0])
if mhz and mhz >= 1000:
mhz = "%s GHz" % str(round(mhz/1000,1))
else:
mhz = "%s MHz" % str(round(mhz,1))
file.close()
return mhz
except IOError:
return "unavailable"
def getCPUString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("system type"):
system = splitted[1].split(' ')[0]
elif splitted[0].startswith("Processor"):
system = splitted[1].split(' ')[0]
file.close()
return system
except IOError:
return "unavailable"
def getCpuCoresString():
try:
file = open('/proc/cpuinfo', 'r')
lines = file.readlines()
for x in lines:
splitted = x.split(': ')
if len(splitted) > 1:
splitted[1] = splitted[1].replace('\n','')
if splitted[0].startswith("processor"):
if int(splitted[1]) > 0:
cores = 2
else:
cores = 1
file.close()
return cores
except IOErr |
cpaton/dvblink-plex-client | plex/Contents/Code/localization.py | Python | mit | 4,066 | 0.000246 | IDS_PLUGIN_TITLE = L("IDS_PLUGIN_TITLE")
IDS_SETTINGS_MENU_ITEM = L("IDS_SETTINGS_MENU_ITEM")
IDS_RECORDED_TV_MENU_ITEM = L("IDS_RECORDED_TV_MENU_ITEM")
IDS_CHANNEL_MENU_ITEM = L("IDS_CHANNEL_MENU_ITEM")
IDS_GUIDE_MENU_ITEM = L("IDS_GUIDE_MENU_ITEM")
IDS_RADIO_MENU_ITEM = L("IDS_RADIO_MENU_ITEM")
IDS_SCHEDULED_RECORDINGS_MENU_ITEM = L("IDS_SCHEDULED_RECORDINGS_MENU_ITEM")
IDS_CAPTION_ERROR = L("IDS_CAPTION_ERROR")
IDS_CAPTION_WARNING = L("IDS_CAPTION_WARNING")
IDS_CAPTION_INFO = L("IDS_CAPTION_INFO")
IDS_PORT_VALIDATOR_MSG = L("IDS_PORT_VALIDATOR_MSG")
IDS_SUCCESS_MSG = L("IDS_SUCCESS_MSG")
IDS_COMMAND_NOT_IMPLEMENTED_MSG = L("IDS_COMMAND_NOT_IMPLEMENTED_MSG")
IDS_MC_NOT_RUNNING_MSG = L("IDS_MC_NOT_RUNNING_MSG")
IDS_MC_CONNECTION_ERROR_MSG = L("IDS_MC_CONNECTION_ERROR_MSG")
IDS_INVALID_DATA_MSG = L("IDS_INVALID_DATA_MSG")
IDS_DVBLINK_CONNECTION_ERROR_MSG = L("IDS_DVBLINK_CONNECTION_ERROR_MSG")
IDS_NO_DEFAULT_RECORDER_MSG = L("IDS_NO_DEFAULT_RECORDER_MSG")
IDS_UNAUTHORISED_ERROR_MSG = L("IDS_UNAUTHORISED_ERROR_MSG")
IDS_GENERAL_ERROR_MSG = L("IDS_GENERAL_ERROR_MSG")
IDS_INVALID_PARAM_MSG = L("IDS_INVALID_PARAM_MSG")
IDS_UNKNOWN_MSG = L("IDS_UNKNOWN_MSG")
IDS_CHANNELS_NOT_AVALIABLE = L("IDS_CHANNELS_NOT_AVALIABLE")
IDS_RECORDER_NOT_FOUND_MSG = L("IDS_RECORDER_NOT_FOUND_MSG")
IDS_EPISODE_LABEL = L("IDS_EPISODE_LABEL")
IDS_SEASON_LABEL = L("IDS_SEASON_LABEL")
IDS_ACTORS_HEADER = L("IDS_ACTORS_HEADER")
IDS_GUESTS_HEADER = L("IDS_GUESTS_HEADER")
IDS_DIRECTORS_HEADER = L("IDS_DIRECTORS_HEADER")
IDS_PRODUCERS_HEADER = L("IDS_PRODUCERS_HEADER")
IDS_WRITERS_HEADER = L("IDS_WRITERS_HEADER")
IDS_YEAR_HEADER = L("IDS_YEAR_HEADER")
IDS_SERVER_ADDRESS = L("IDS_SERVER_ADDRESS")
IDS_SERVER_PORT = L("IDS_SERVER_PORT")
IDS_USER_NAME = L("IDS_USER_NAME")
IDS_PASSWORD = L("IDS_PASSWORD")
IDS_CHANNEL_SORTING = L("IDS_CHANNEL_SORTING")
IDS_BY_NAME = L("IDS_BY_NAME")
IDS_BY_NUMBER = L("IDS_BY_NUMBER")
IDS_CHANNEL_SHOW_EPG = L("IDS_CHANNEL_SHOW_EPG")
IDS_ADD_RECORDING = L("IDS_ADD_RECORDING")
IDS_CANCEL_RECORDING = L("IDS_CANCEL_RECORDING")
IDS_RECORD_SERIES = L("IDS_RECORD_SERIES")
IDS_CANCEL_RECORD_SERIES = L("IDS_CANCEL_RECORD_SERIES")
IDS_DATA_NOT_AVAILABLE = L("IDS_DATA_NOT_AVAILABLE")
IDS_RECORD_LABEL = L("IDS_RECORD_LABEL")
IDS_SERIES_RECORD_LABEL = L("IDS_SERIES_RECORD_LABEL")
IDS_ADD_RECORDING_ERROR_MSG = L("IDS_ADD_RECORDING_ERROR_MSG")
IDS_CANCEL_RECORDING_ERROR_MSG = L("IDS_CANCEL_RECORDING_ERROR_MSG")
IDS_ADD_SCHEDULE_ERROR_MSG = L("IDS_ADD_SCHEDULE_ERROR_MSG")
IDS_CANCEL_SCHEDULE_ERROR_MSG = L("IDS_CANCEL_SCHEDULE_ERROR_MSG")
IDS_TODAY_LABEL = L("IDS_TODAY_LABEL")
IDS_NOW_LABEL = L("IDS_NOW_LABEL")
IDS_EPG_DAYS_LABEL = L("IDS_EPG_DAYS_LABEL")
IDS_EPG_2_DAY = L("IDS_EPG_2_DAY")
IDS_EPG_3_DAY = L("IDS_EPG_3_DAY")
IDS_EPG_4_DAY = L("IDS_EPG_4_DAY")
IDS_EPG_5_DAYS = L("IDS_EPG_5_DAYS")
IDS_EPG_6_DAYS = L("IDS_EPG_6_DAYS")
IDS_EPG_7_DAYS = L("IDS_EPG_7_DAYS")
IDS_UNKNOWN = L("IDS_UNKNOWN")
IDS_SEARCH_MENU_ITEM = L("IDS_SEARCH_MENU_ITEM")
IDS_KEYWORD_MENU_ITEM = L("IDS_KEYWORD_MENU_ITEM")
IDS_CATEGORIES_MENU_ITEM = L("IDS_CATEGORIES_MENU_ITEM")
IDS_ENTER_KEYWORD_TITLE = L("IDS_ENTER_KEYWORD_TITLE")
IDS_GENRE_ANY = L("IDS_GENRE_ANY")
IDS_GENRE_NEWS = L("IDS_GENRE_NEWS")
IDS_GENRE_KIDS = L("IDS_GENRE_KIDS")
IDS_GENRE_MOVIE = L("IDS_GENRE_MOVIE")
IDS_GENRE_SPORT = L("IDS_GENRE_SPORT")
IDS_GENRE_DOCUMENTARY = L("IDS_GENRE_DOCUMENTARY")
IDS_GENRE_ACTION = L("IDS_GENRE_ACTION")
IDS_GENRE_COMEDY = L("IDS_GENRE_COMEDY")
IDS_GENRE_DRAMA = L("IDS_GENRE_DRAMA")
IDS_GENRE_EDU = L("IDS_GENRE_EDU")
IDS_GENRE_HORROR = L("IDS_GENRE_HORROR")
IDS_GENRE_MUSIC = L("IDS_GENRE_MUSIC")
IDS_GENRE_REALITY = L("IDS_GENRE_REALITY")
IDS_GENRE_ROMANCE = L("IDS_GENRE_ROMANCE")
IDS_GENRE_SCIFI = L("IDS_GENRE_SCIFI")
| IDS_GENRE_SERIAL = L("IDS_GENRE_SERIAL")
IDS_GENRE_SOAP = L("IDS_GENRE_SOAP")
IDS_GENRE_SPECIAL = L("IDS_GENRE_SPECIAL")
IDS_GENRE_THRILLER = L("IDS_GENRE_THRILLER")
IDS_GENRE_ADULT = L("IDS_GENRE_ADULT")
IDS_CONFLICT_RECORDING_LABEL = L("IDS_CONFLICT_RECORDI | NG_LABEL")
IDS_ENTER_KEYWORD_WARNING = L("IDS_ENTER_KEYWORD_WARNING") |
lukas-hetzenecker/home-assistant | homeassistant/components/philips_js/remote.py | Python | apache-2.0 | 3,097 | 0.000323 | """Remote control support for Apple TV."""
import asyncio
from haphilipsjs.typing import SystemType
from homeassistant.components.remote import (
ATTR_DELAY_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
RemoteEntity,
)
from . import LOGGER, PhilipsTVDataUpdateCoordinator
from .const import CONF_SYSTEM, DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the configuration entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
PhilipsTVRemote(
coordinator,
config_entry.data[CONF_SYSTEM],
config_entry.unique_id,
)
]
)
class PhilipsTVRemote(RemoteEntity):
"""Device that sends commands."""
def __init__(
self,
coordinator: PhilipsTVDataUpdateCoordinator,
system: SystemType,
unique_id: str,
) -> None:
"""Initialize the Philips TV."""
self._tv = coordinator.api
self._coordinator = coordinator
self._system = system
self._unique_id = unique_id
@property
def name(self):
"""Return the device name."""
return self._system["name"]
@property
def is_on(self):
"""Return true if device is on."""
return bool(
self._tv.on and (self._tv.powerstate == "On" or self._tv.powerstate is None)
)
@property
def should_poll(self):
"""No polling needed for Apple TV."""
return False
@property
def unique_id(self):
"""Return unique identifier if known."""
return self._unique_id
@property
def device_info | (self):
"""Return a device description for device registry."""
return {
"name": self._system["name"],
"identifiers": {
(DOMAIN, self._unique_id),
},
"model": self._system.get("model"),
"manufacturer": "Philips",
"sw_version": self._system.get("softwareversion | "),
}
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
if self._tv.on and self._tv.powerstate:
await self._tv.setPowerState("On")
else:
await self._coordinator.turn_on.async_run(self.hass, self._context)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._tv.on:
await self._tv.sendKey("Standby")
self.async_write_ha_state()
else:
LOGGER.debug("Tv was already turned off")
async def async_send_command(self, command, **kwargs):
"""Send a command to one device."""
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay = kwargs.get(ATTR_DELAY_SECS, DEFAULT_DELAY_SECS)
for _ in range(num_repeats):
for single_command in command:
LOGGER.debug("Sending command %s", single_command)
await self._tv.sendKey(single_command)
await asyncio.sleep(delay)
|
beloglazov/openstack-neat | setup.py | Python | apache-2.0 | 4,540 | 0.001101 | # Copyright 2012 Anton Beloglazov
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
The OpenStack Neat Project
==========================
OpenStack Neat is a project intended to provide an extension to
OpenStack implementing dynamic consolidation of Virtual Machines (VMs)
using live migration. The major objective of dynamic VM consolidation
is to improve the utilization of physical resources and reduce energy
consumption by re-allocating VMs using live migration according to
their real-time resource demand and switching idle hosts to the sleep
mode. Apart from consolidating VMs, the system should be able to react
to increases in the resource demand and deconsolidate VMs when
necessary to avoid performance degradation. In general, the problem of
dynamic VM consolidation includes 4 sub-problems: host underload /
overload detection, VM selection, and VM placement.
This work is conducted within the Cloud Computing and Distributed
Systems (CLOUDS) Laboratory (http://www.cloudbus.org/) at the
University of Melbourne. The problem of dynamic VM consolidation
considering Quality of Service (QoS) constraints has been studied from
the theoretical perspective and algorithms addressing the sub-problems
listed above have been proposed [1], [2]. The algorithms have been
evaluated using CloudSim (http://code.google.com/p/cloudsim/) and
real-world workload traces collected from more than a thousand
PlanetLab VMs hosted on servers located in more than 500 places around
the world.
The aim of the OpenStack Neat project is to provide an extensible
framework for dynamic consolidation of VMs based on the OpenStack
platform. The framework should provide an infrastructure enabling the
interaction of components implementing the decision-making algorithms.
The framework should allow configuration-driven switching of different
implementations of the decision-making algorithms. The implementation
of the framework will include the algorithms proposed in our previous
works [1], [2].
[1] Anton Beloglazov and Rajkumar Buyya, "Optimal Online Deterministic
Algorithms and Adaptive Heuristics for Energy and Performance
Efficient Dynamic Consolidation of Virtual Machines in Cloud Data
Centers", Concurrency and Computation: Practice and Experience (CCPE),
Volume 24, Issue 13, Pages: 1397-1420, John Wiley & Sons, Ltd, New
York, USA, 2012. Download:
http://beloglazov.info/papers/2012-optimal-algorithms-ccpe.pdf
[2] Anton Beloglazov and Rajkumar Buyya, "Managing Overloaded Hosts
for Dynamic Consolidation of Virtual Machines in Cloud Data Centers
Under Quality of Service C | onstraints", IEEE Transactions on Parallel
and Distributed Systems (TPDS), IEEE CS Press, USA, 2012 (in press,
accepted on August 2, 2012). Download:
http://beloglazov.info/papers/2012-host-overload-detectio | n-tpds.pdf
"""
import distribute_setup
distribute_setup.use_setuptools()
from setuptools import setup, find_packages
setup(
name='openstack-neat',
version='0.1',
description='The OpenStack Neat Project',
long_description=__doc__,
author='Anton Beloglazov',
author_email='anton.beloglazov@gmail.com',
url='https://github.com/beloglazov/openstack-neat',
platforms='any',
include_package_data=True,
license='LICENSE',
packages=find_packages(),
test_suite='tests',
tests_require=['pyqcy', 'mocktest', 'PyContracts'],
entry_points = {
'console_scripts': [
'neat-data-collector = neat.locals.collector:start',
'neat-local-manager = neat.locals.manager:start',
'neat-global-manager = neat.globals.manager:start',
'neat-db-cleaner = neat.globals.db_cleaner:start',
]
},
data_files = [('/etc/init.d', ['init.d/openstack-neat-data-collector',
'init.d/openstack-neat-local-manager',
'init.d/openstack-neat-global-manager',
'init.d/openstack-neat-db-cleaner']),
('/etc/neat', ['neat.conf'])],
)
|
fedspendingtransparency/data-act-broker-backend | dataactcore/migrations/versions/5de499ab5b62_cascade_useraffiliation_deletes.py | Python | cc0-1.0 | 1,559 | 0.010263 | """Cascade UserAffiliation deletes
Revision ID: 5de499ab5b62
Revises: 14f51f27a106
Create Date: 2016-12-13 00:21:39.842218
"""
# revision identifiers, used by Alembic.
revision = '5de499ab5b62'
down_revision = '14f51f27a106'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user_affiliation_user_fk', 'user_affiliation', type_='foreignkey')
op.drop_constraint('user_affiliation_cgac_fk', 'user_affiliation', type_='foreignkey')
op.create_foreign_key('user_affiliation_user_fk', 'user_affiliation', 'users', ['user_id'], ['user_id'], ondelete='CASCADE')
op.create_foreign_key('user_affiliation_cgac_fk', 'user_affiliation', 'cgac', ['cgac_id'], ['cgac_id'], ondelete='CASCADE')
### end Alembic commands ###
def downgrade_data_broker():
### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('user_affiliation_cgac_fk', 'user_affiliation', type_='foreignkey')
op.drop_constraint('user_affiliation_user_fk', 'user_affiliation', type_='foreignkey')
op.c | reate_foreign_key('user_affiliation_cgac_fk', 'user_affiliation', 'cgac', ['cgac_id'], ['cgac_id'])
op.create_foreign_key('user_affiliation_user_fk', 'user_affiliation', 'users', ['user_id'], ['user_id'])
### end Alembi | c commands ###
|
buzzfeed/caliendo | test/test_replay.py | Python | mit | 2,701 | 0.007775 | import unittest
import time
import random
import os
os.environ['USE_CALIENDO'] = 'True'
from caliendo.db import flatfiles
from caliendo.facade import patch
from caliendo.patch import replay
from caliendo.util import recache
from caliendo import Ignore
import caliendo
from test.api import callback
from test.api.callback import method_calling_method
from test.api.callback import method_with_callback
from test.api.callback import callback_for_method
from test.api.callback import CALLBACK_FILE
from test.api.callback import CACHED_METHOD_FILE
def run_n_times(func, n):
for i in range(n):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
func(i)
os._exit(0)
class ReplayTestCase(unittest.TestCase):
def setUp(self):
caliendo.util.register_suite()
recache()
flatfiles.CACHE_['stacks'] = {}
flatfiles.CACHE_['seeds'] = {}
flatfiles.CACHE_['cache'] = {}
with open(CALLBACK_FILE, 'w+') as f:
pass
with open(CACHED_METHOD_FILE, 'w+') as f:
pass
def test_replay(self):
def do_it(i):
@replay('test.api.callback.callback_for_method')
@patch('test.api.callback.method_with_callb | ack')
def test(i):
cb_file = method_with_callback(callback_for_method, 0.5)
with open(cb_file, 'rb') as f:
contents = f.read()
assert contents == ('.' * (i+1)), "Got {0} was expecting {1}".format(contents, ('.' * (i+1)))
test(i)
os._exit(0)
for i in range(2):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
| do_it(i)
with open(CACHED_METHOD_FILE, 'rb') as f:
assert f.read() == '.'
def test_replay_with_ignore(self):
def do_it(i):
@replay('test.api.callback.callback_for_method')
@patch('test.api.callback.method_with_callback', ignore=Ignore([1]))
def test_(i):
cb_file = method_with_callback(callback_for_method, random.random())
with open(cb_file, 'rb') as f:
contents = f.read()
assert contents == ('.' * (i+1)), "Got {0} was expecting {1}".format(contents, ('.' * (i+1)))
test_(i)
os._exit(0)
for i in range(2):
pid = os.fork()
if pid:
os.waitpid(pid, 0)
else:
do_it(i)
with open(CACHED_METHOD_FILE, 'rb') as f:
assert f.read() == '.'
if __name__ == '__main__':
unittest.main()
|
tensorflow/tensorflow | tensorflow/python/ops/ragged/row_partition.py | Python | apache-2.0 | 58,566 | 0.005703 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A class used to partition a sequence into contiguous subsequences ("rows").
"""
# TODO(martinz): Remove preferred_dtype
# TODO(edloper): Make into a ExtensionType (if possible)
import numpy as np
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_math_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import segment_id_ops
#===============================================================================
# RowPartition
#===============================================================================
# TODO(edloper): Consider removing row_starts and row_limits factory methods
# and accessors from RowPartition. In particular, these two encodings are
# "second-class citizens": we never cache them, and if you do construct a
# RowPartition from them then it may be more expensive than you might expect
# (because we append a value to the beginning/end to transform them into
# splits). If we do remove them from RowPartition, then we would still keep
# the from_row_starts and from_row_limits factory methods in RaggedTensor.
class RowPartition(composite_tensor.CompositeTensor):
"""Partitioning of a sequence of values into contiguous subsequences ("rows").
A `RowPartition` describes how a sequence with `nvals` items should be
divided into `nrows` contiguous subsequences ("rows"). For example, a
`RowPartition` could be used to partition the vector `[1, 2, 3, 4, 5]` into
subsequences `[[1, 2], [3], [], [4, 5]]`. Note that `RowPartition` stores
information about how values are partitioned, but does not include the
partitioned values themselves. `tf.RaggedTensor` is used to pair a `values`
tensor with one or more `RowPartition`s, providing a complete encoding for a
ragged tensor (i.e. a tensor with variable-length dimensions).
`RowPartition`s may be defined using several different schemes:
* `row_lengths`: an integer vector with shape `[nrows]`, which specifies
the length of each row.
* `row_splits`: an integer vector with shape `[nrows+1]`, specifying the
"split points" between each row.
* `row_starts`: an integer vector with shape `[nrows]`, which specifies
the start offset for each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: an integer vector with shape `[nrows]`, which specifies
the stop offset for each row. Equivalent to `row_splits[1:]`.
* `value_rowids` is an integer vector with shape `[nvals]`, corresponding
one-to-one with sequence values, which specifies the row that each value
belongs to. If the partition has empty trailing rows, then `nrows`
must also be specified.
* `uniform_row_length` is an integer scalar, specifying the length of every
row. This scheme may only be used if all rows have the same length.
For example, the following `RowPartition`s all represent the partitioning of
8 values into 5 sublists as follows: `[[*, *, *, *], [], [*, *, *], [*], []]`.
>>> p1 = RowPartition.from_row_lengths([4, 0, 3, 1, 0])
>>> p2 = RowPartition.from_row_splits([0, 4, 4, 7, 8, 8])
>>> p3 = RowPartition.from_row_starts([0, 4, 4, 7, 8], nvals=8)
>>> p4 = RowPartition.from_row_limits([4, 4, 7, 8, 8])
>>> p5 = RowPartition.from_value_rowids([0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
For more information about each scheme, see the documentation for the
its factory method. For additional examples, see the documentation on
`tf.RaggedTensor`.
### Precomputed Encodings
`RowPartition` always stores at least one encoding of the partitioning, but
it can be configured to cache additional encodings as well. This can
avoid unnecessary recomputation in eager mode. (In graph mode, optimizations
such as common subexpression elimination will typically prevent these
unnecessary recomputations.) To check which encodings are precomputed, use
`RowPartition.has_precomputed_<encoding>`. To cache an additional
encoding, use `R | owPartition.with_precomputed_<encoding>`.
"""
#=============================================================================
# Constructor (private)
#=============================================================================
def __init__(self,
row_splits,
row_lengths=None,
value_rowids=None,
| nrows=None,
uniform_row_length=None,
nvals=None,
internal=False):
"""Creates a `RowPartition` from the specified encoding tensor(s).
This constructor is private -- please use one of the following ops to
build `RowPartition`s:
* `RowPartition.from_row_lengths`
* `RowPartition.from_value_rowids`
* `RowPartition.from_row_splits`
* `RowPartition.from_row_starts`
* `RowPartition.from_row_limits`
* `RowPartition.from_uniform_row_length`
If row_splits is has a constant value, then all other arguments should
have a constant value.
Args:
row_splits: A 1-D integer tensor with shape `[nrows+1]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`
value_rowids: A 1-D integer tensor with shape `[nvals]`.
nrows: A 1-D integer scalar tensor.
uniform_row_length: A scalar tensor.
nvals: A scalar tensor.
internal: Private key value, required to ensure that this private
constructor is *only* called from the factory methods.
Raises:
TypeError: If a row partitioning tensor has an inappropriate dtype.
TypeError: If exactly one row partitioning argument was not specified.
ValueError: If a row partitioning tensor has an inappropriate shape.
ValueError: If multiple partitioning arguments are specified.
ValueError: If nrows is specified but value_rowids is not None.
"""
if internal is not _row_partition_factory_key:
raise ValueError("RowPartition constructor is private; please use one "
"of the factory methods instead (e.g., "
"RowPartition.from_row_lengths())")
# Validate the arguments.
if not isinstance(row_splits, ops.Tensor):
raise TypeError("Row-partitioning argument must be a Tensor, got %r" %
row_splits)
if row_splits.dtype not in (dtypes.int32, dtypes.int64):
raise ValueError("Row-partitioning argument must be int32 or int64")
# Validate shapes & dtypes.
row_splits.shape.assert_has_rank(1)
row_splits.set_shape([None])
self._row_splits = row_splits
# Store any cached tensors. These are used to avoid unnecessary
# round-trip conversions when a RowPartition is constructed from
# lengths or rowids, and we later want those lengths/rowids back.
for tensor in [row_lengths, value_rowids, nrows, uniform_row_length, nvals]:
if tensor is not None:
if not isinstance(tensor, ops.Tensor):
raise TypeError("Cached value must be a Tensor or None.")
elif ten |
grantula/fantasypremierleagueapi | fantasypremierleagueapi/tests/test_unit_tests.py | Python | mit | 763 | 0.001311 | # -*- coding: utf-8 -*-
"""
File: test_unit_tests.py
Path: fantasypremierleague/tests/
Author: Grant W
"""
import unittest
import fantasypremierleagueapi as fp
class TestEnsureOneItem(unittest.TestCase):
def setUp(self):
super().setUp()
@fp.ensure_one_item
def fake_func(self, list):
return list
def test_ensure_one_item | _works(self):
item = ['1']
assert self.fake_func(item) == item[0]
def test_ensure_one_item_raises_error_with_multiples(self):
item = ['1', '2']
with self.assertRaises(TypeError):
self.fake_func(item)
def test_ensure_one_item_raises_error_with_nothing(self):
item = []
with self.assertRaises(TypeError):
self.fake_func | (item)
|
bukun/maplet | extor/handlers/ext_tutorial_hander.py | Python | mit | 1,135 | 0.002643 | import json
from torcms.core.base_handler import BaseHandler
from torcms.model.category_model import MCategory
class TutorialIndexHandler(BaseHandler):
def initialize(self, **kwargs):
super(TutorialIndexHandler, self).initialize()
self.kind = kwargs.get('kind', 'k')
def get(self, *args, **kwargs):
url_str = args[0]
url_arr = self.parse_url(url_str)
if url_str == '' or url_str == 'list':
self.list()
else:
self.show404()
def list(self):
'''
The default page of POST.
'''
post_data = self.get_post_data()
cat_slug = post_data. | get('slug', '')
cat_rec = MCategory.get_by_slug(cat_slug)
if no | t cat_rec:
return False
kwd = {
'uid': '',
'cat_slug': cat_slug,
'cat_name' : cat_rec.name,
'page_slug':cat_slug + "_index"
}
self.render('post_{0}/post_list.html'.format(self.kind),
userinfo=self.userinfo,
catinfo=cat_rec,
kwd=kwd
)
|
gotostack/swift | test/unit/common/test_swob.py | Python | apache-2.0 | 58,939 | 0 | # Copyright (c) 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"Tests for swift.common.swob"
import unittest
import datetime
import re
from StringIO import StringIO
from urllib import quote
import swift.common.swob
class TestHeaderEnvironProxy(unittest.TestCase):
def test_proxy(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEquals(
proxy.environ, {'CONTENT_LENGTH': '20',
'CONTENT_TYPE': 'text/plain',
'HTTP_SOMETHING_ELSE': 'somevalue'})
self.assertEquals(proxy['content-length'], '20')
self.assertEquals(proxy['content-type'], 'text/plain')
self.assertEquals(proxy['something-else'], 'somevalue')
def test_del(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
del proxy['Content-Length']
del proxy['Content-Type']
del proxy['Something-Else']
self.assertEquals(proxy.environ, {})
def test_contains(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assert_('content-length' in proxy)
self.assert_('content-type' in proxy)
self.assert_('something-else' in proxy)
def test_keys(self):
environ = {}
proxy = swift.common.swob.HeaderEnvironProxy(environ)
proxy['Content-Length'] = 20
proxy['Content-Type'] = 'text/plain'
proxy['Something-Else'] = 'somevalue'
self.assertEquals(
set(proxy.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestHeaderKeyDict(unittest.TestCase):
def test_case_insensitive(self):
headers = swift.common.swob.HeaderKeyDict()
headers['Content-Length'] = 0
headers['CONTENT-LENGTH'] = 10
headers['content-length'] = 20
self.assertEquals(headers['Content-Length'], '20')
self.assertEquals(headers['content-length'], '20')
self.assertEquals(headers['CONTENT-LENGTH'], '20')
def test_setdefault(self):
headers = swift.common.swob.HeaderKeyDict()
# it gets set
headers.setdefault('x-rubber-ducky', 'the one')
self.assertEquals(headers['X-Rubber-Ducky'], 'the one')
# it has the right return value
ret = headers.setdefault('x-boat', 'dinghy')
self.assertEquals(ret, 'dinghy')
ret = headers.setdefault('x-boat', 'yacht')
self.assertEquals(ret, 'dinghy')
# shouldn't crash
headers.setdefault('x-sir-not-appearing-in-this-request', None)
def test_del_contains(self):
headers = swift.common.swob.HeaderKeyDict()
headers['Content-Length'] = 0
self.assert_('Content | -Length' in headers)
del headers['Content-Length']
self.assert_('Content-Length' not in headers)
def test_update(self):
headers = swift.common.swob.HeaderKeyDict()
headers.update({'Content-L | ength': '0'})
headers.update([('Content-Type', 'text/plain')])
self.assertEquals(headers['Content-Length'], '0')
self.assertEquals(headers['Content-Type'], 'text/plain')
def test_get(self):
headers = swift.common.swob.HeaderKeyDict()
headers['content-length'] = 20
self.assertEquals(headers.get('CONTENT-LENGTH'), '20')
self.assertEquals(headers.get('something-else'), None)
self.assertEquals(headers.get('something-else', True), True)
def test_keys(self):
headers = swift.common.swob.HeaderKeyDict()
headers['content-length'] = 20
headers['cOnTent-tYpe'] = 'text/plain'
headers['SomeThing-eLse'] = 'somevalue'
self.assertEquals(
set(headers.keys()),
set(('Content-Length', 'Content-Type', 'Something-Else')))
class TestRange(unittest.TestCase):
def test_range(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEquals(range.ranges[0], (1, 7))
def test_upsidedown_range(self):
range = swift.common.swob.Range('bytes=5-10')
self.assertEquals(range.ranges_for_length(2), [])
def test_str(self):
for range_str in ('bytes=1-7', 'bytes=1-', 'bytes=-1',
'bytes=1-7,9-12', 'bytes=-7,9-'):
range = swift.common.swob.Range(range_str)
self.assertEquals(str(range), range_str)
def test_ranges_for_length(self):
range = swift.common.swob.Range('bytes=1-7')
self.assertEquals(range.ranges_for_length(10), [(1, 8)])
self.assertEquals(range.ranges_for_length(5), [(1, 5)])
self.assertEquals(range.ranges_for_length(None), None)
def test_ranges_for_large_length(self):
range = swift.common.swob.Range('bytes=-1000000000000000000000000000')
self.assertEquals(range.ranges_for_length(100), [(0, 100)])
def test_ranges_for_length_no_end(self):
range = swift.common.swob.Range('bytes=1-')
self.assertEquals(range.ranges_for_length(10), [(1, 10)])
self.assertEquals(range.ranges_for_length(5), [(1, 5)])
self.assertEquals(range.ranges_for_length(None), None)
# This used to freak out:
range = swift.common.swob.Range('bytes=100-')
self.assertEquals(range.ranges_for_length(5), [])
self.assertEquals(range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,100-')
self.assertEquals(range.ranges_for_length(5), [(4, 5)])
def test_ranges_for_length_no_start(self):
range = swift.common.swob.Range('bytes=-7')
self.assertEquals(range.ranges_for_length(10), [(3, 10)])
self.assertEquals(range.ranges_for_length(5), [(0, 5)])
self.assertEquals(range.ranges_for_length(None), None)
range = swift.common.swob.Range('bytes=4-6,-100')
self.assertEquals(range.ranges_for_length(5), [(4, 5), (0, 5)])
def test_ranges_for_length_multi(self):
range = swift.common.swob.Range('bytes=-20,4-,30-150,-10')
# the length of the ranges should be 4
self.assertEquals(len(range.ranges_for_length(200)), 4)
# the actual length less than any of the range
self.assertEquals(range.ranges_for_length(90),
[(70, 90), (4, 90), (30, 90), (80, 90)])
# the actual length greater than any of the range
self.assertEquals(range.ranges_for_length(200),
[(180, 200), (4, 200), (30, 151), (190, 200)])
self.assertEquals(range.ranges_for_length(None), None)
def test_ranges_for_length_edges(self):
range = swift.common.swob.Range('bytes=0-1, -7')
self.assertEquals(range.ranges_for_length(10),
[(0, 2), (3, 10)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEquals(range.ranges_for_length(10),
[(3, 10), (0, 2)])
range = swift.common.swob.Range('bytes=-7, 0-1')
self.assertEquals(range.ranges_for_length(5),
[(0, 5), (0, 2)])
def test_range_invalid_syntax(self):
def _check_invalid_range(range_value):
try:
swift.common.swob.Range(ra |
potatolondon/search | search/tests/base.py | Python | mit | 374 | 0 | import unittest
from google.appengine.ext import testbed
class AppengineTestCase(unittest.Tes | tCase):
def setUp(self):
super | (AppengineTestCase, self).setUp()
self.tb = testbed.Testbed()
self.tb.activate()
self.tb.init_search_stub()
def tearDown(self):
self.tb.deactivate()
super(AppengineTestCase, self).tearDown()
|
SalesforceFoundation/CumulusCI | cumulusci/tasks/salesforce/tests/test_custom_settings_wait.py | Python | bsd-3-clause | 6,537 | 0.000765 | import unittest
import responses
from cumulusci.core.config import (
UniversalConfig,
BaseProjectConfig,
TaskConfig,
)
from cumulusci.core.keychain import BaseProjectKeychain
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.tests.utils import MockLoggerMixin
from cumulusci.tasks.salesforce.custom_settings_wait import CustomSettingValueWait
from cumulusci.tests.util import DummyOrgConfig
class TestRunCustomSettingsWait(MockLoggerMixin, unittest.TestCase):
def setUp(self):
self.api_version = 42.0
self.universal_config = UniversalConfig(
{"project": {"api_version": self.api_version}}
)
self.task_config = TaskConfig()
self.project_config = BaseProjectConfig(
self.universal_config, config={"noyaml": True}
)
self.project_config.config["project"] = {
"package": {"api_version": self.api_version}
}
keychain = BaseProjectKeychain(self.project_config, "")
self.project_config.set_keychain(keychain)
self.org_config = DummyOrgConfig(
{
"id": "foo/1",
"instance_url": "https://example.com",
"access_token": "abc123",
},
"test",
)
self.base_normal_url = "{}/services/data/v{}/query/".format(
self.org_config.instance_url, self.api_version
)
self.task_log = self._task_log_handler.messages
def _get_query_resp(self):
return {
"size": 1,
"totalSize": 1,
"done": True,
"queryLocator": None,
"entityTypeName": "Customizable_Rollup_Setings__c",
"records": [
{
"attributes": {
"type": "Customizable_Rollup_Setings__c",
"url": "/services/data/v47.0/sobjects/Customizable_Rollup_Setings__c/707L0000014nnPHIAY",
},
"Id": "707L0000014nnPHIAY",
"SetupOwnerId": "00Dxxxxxxxxxxxx",
"Customizable_Rollups_Enabled__c": True,
"Rollups_Account_Batch_Size__c": 200,
}
],
}
def _get_url_and_task(self):
task = CustomSettingValueWait(
self.project_config, self.task_config, self.org_config
)
url = self.base_normal_url
return task, url
@respo | nses.activate
def test_run_custom_settings_wait_match_bool(self):
self.task_config.config["options"] = {
"object": "Customizable_Rollup_Setings__c",
"field": "Customizable_Rollups_Enabled__c",
| "value": True,
"poll_interval": 1,
}
# simulate finding no settings record and then finding one with the expected value
task, url = self._get_url_and_task()
responses.add(
responses.GET, url, json={"totalSize": 0, "done": True, "records": []}
)
response = self._get_query_resp()
response["records"][0]["Customizable_Rollups_Enabled__c"] = True
responses.add(responses.GET, url, json=response)
task()
@responses.activate
def test_run_custom_settings_wait_match_bool_changed_case(self):
self.task_config.config["options"] = {
"object": "CUSTOMIZABLE_Rollup_Setings__c",
"field": "CUSTOMIZABLE_Rollups_enabled__C",
"value": True,
"poll_interval": 1,
}
task, url = self._get_url_and_task()
response = self._get_query_resp()
response["records"][0]["Customizable_Rollups_Enabled__c"] = True
responses.add(responses.GET, url, json=response)
task()
@responses.activate
def test_run_custom_settings_wait_match_int(self):
self.task_config.config["options"] = {
"object": "Customizable_Rollup_Setings__c",
"field": "Rollups_Account_Batch_Size__c",
"value": 200,
"poll_interval": 1,
}
task, url = self._get_url_and_task()
response = self._get_query_resp()
response["records"][0]["Rollups_Account_Batch_Size__c"] = 200
responses.add(responses.GET, url, json=response)
task()
@responses.activate
def test_run_custom_settings_wait_match_str(self):
self.task_config.config["options"] = {
"object": "Customizable_Rollup_Setings__c",
"field": "Rollups_Account_Batch_Size__c",
"value": "asdf",
"poll_interval": 1,
}
task, url = self._get_url_and_task()
response = self._get_query_resp()
response["records"][0]["Rollups_Account_Batch_Size__c"] = "asdf"
responses.add(responses.GET, url, json=response)
task()
@responses.activate
def test_run_custom_settings_wait_not_settings_object(self):
self.task_config.config["options"] = {
"object": "Customizable_Rollup_Setings__c",
"field": "Rollups_Account_Batch_Size__c",
"value": 200,
"poll_interval": 1,
}
task, url = self._get_url_and_task()
responses.add(
responses.GET,
url,
status=400,
json=[
{
"message": "\nSELECT SetupOwnerId FROM npe5__Affiliation__c\n ^\nERROR at Row:1:Column:8\nNo such column 'SetupOwnerId' on entity 'npe5__Affiliation__c'. If you are attempting to use a custom field, be sure to append the '__c' after the custom field name. Please reference your WSDL or the describe call for the appropriate names.",
"errorCode": "INVALID_FIELD",
}
],
)
with self.assertRaises(TaskOptionsError) as e:
task()
assert "supported" in str(e.exception)
def test_apply_namespace__managed(self):
self.project_config.config["project"]["package"]["namespace"] = "ns"
self.task_config.config["options"] = {
"object": "%%%NAMESPACE%%%Test__c",
"field": "Field__c",
"value": "x",
"managed": True,
"namespaced": True,
}
task, url = self._get_url_and_task()
task.object_name = "%%%NAMESPACE%%%Test__c"
task.field_name = "%%%NAMESPACE%%%Field__c"
task._apply_namespace()
assert task.object_name == "ns__Test__c"
assert task.field_name == "ns__Field__c"
|
buaase/Phylab-Web | PythonExperimentDataHandle/test/P1011_test.py | Python | gpl-2.0 | 1,628 | 0.037469 | import P1011
import unittest
class test_phylab(unittest.TestCase):
def testSteelWire_1(self):
m = [10.000,12 | .000,14.000,16.000,18.0 | 00,20.000,22.000,24.000,26.00]
C_plus = [3.50, 3.81, 4.10, 4.40, 4.69, 4.98, 5.28, 5.59, 5.89]
C_sub = [3.52, 3.80, 4.08, 4.38, 4.70, 4.99, 5.30, 5.59, 5.89]
D = [0.789, 0.788, 0.788, 0.787, 0.788]
L = 38.9
H = 77.0
b = 8.50
res = P1011.SteelWire(m, C_plus, C_sub, D, L, H, b)
self.assertEqual(res,'(1.90\\pm0.04){\\times}10^{11}',"test SteelWire fail")
def testInertia_1(self):
m = [711.77, 711.82, 1242.30, 131.76, 241.56,238.38]
d = [99.95, 99.95, 93.85, 114.60, 610.00]
T = [[4.06, 4.06, 4.07, 4.06, 4.06], [6.57, 6.57, 6.57, 6.56, 6.57],
[8.16, 8.16, 8.17, 8.17, 8.17], [7.35, 7.35, 7.33, 7.35, 7.37],
[11.40, 11.40, 11.41, 11.41, 11.41]]
l = [34.92, 6.02, 33.05]
T2 = [[13.07,13.07,13.07,13.07,13.06],[16.86,16.86,16.88,16.87,16.88],
[21.79,21.82,21.83,21.84,21.84],[27.28,27.28,27.29,27.27,27.27],
[32.96,32.96,32.96,32.97,32.96]]
res = P1011.Inertia(m, d, T, l, T2)
x = 1
if(abs(res[0] - 0.9999989) > pow(10,-7)):
x = 0
if(abs(res[1] - 610.9)/610.9 > 0.001):
x = 0
self.assertEqual(x,1,"test Inertia fail")
if __name__ =='__main__':
unittest.main()
|
spillz/minepy | util.py | Python | gpl-3.0 | 5,629 | 0.046189 | import numpy
import time
import pyglet
import pyglet.graphics as gl
import noise
from config import SECTOR_SIZE
cb_v = numpy.array([
[-1,+1,-1, -1,+1,+1, +1,+1,+1, +1,+1,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-1,-1,-1, -1,-1,+1, -1,+1,+1, -1,+1,-1], # left
[+1,-1,+1, +1,-1,-1, +1,+1,-1, +1,+1,+1], # right
[-1,-1,+1, +1,-1,+1, +1,+1,+1, -1,+1,+1], # front
[+1,-1,-1, -1,-1,-1, -1,+1,-1, +1,+1,-1], # back
],dtype = numpy.float32)
c = 1
cb_v_half = numpy.array([
[-1,+0,-1, -1,+0,+1, +1,+0,+1, +1,+0,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-c,-1,-1, -c,-1,+1, -c,+1,+1, -c,+1,-1], # left
[+c,-1,+1, +c,-1,-1, +c,+1,-1, +c,+1,+1], # right
[-1,-1,+ | c, +1,-1,+c, +1,+1,+c, -1,+1,+c], # front
[+1,-1,-c, -1,-1,-c, -1,+1,-c, +1,+1,-c], # back
],dtype = numpy.float32) |
c = 14.0/16
cb_v_cake = numpy.array([
[-1,+0,-1, -1,+0,+1, +1,+0,+1, +1,+0,-1], # top
[-1,-1,-1, +1,-1,-1, +1,-1,+1, -1,-1,+1], # bottom
[-c,-1,-1, -c,-1,+1, -c,+1,+1, -c,+1,-1], # left
[+c,-1,+1, +c,-1,-1, +c,+1,-1, +c,+1,+1], # right
[-1,-1,+c, +1,-1,+c, +1,+1,+c, -1,+1,+c], # front
[+1,-1,-c, -1,-1,-c, -1,+1,-c, +1,+1,-c], # back
],dtype = numpy.float32)
de_v = numpy.array([
[0]*12,
[0]*12,
[-1,-1,+1, +1,-1,-1, +1,+1,-1, -1,+1,+1],
[+1,-1,-1, -1,-1,+1, -1,+1,+1, +1,+1,-1],
[-1,-1,-1, +1,-1,+1, +1,+1,+1, -1,+1,-1],
[+1,-1,+1, -1,-1,-1, -1,+1,-1, +1,+1,+1],
],dtype = numpy.float32)
def cube_v(pos,n):
return n*cb_v+numpy.tile(pos,4)
def cube_v2(pos,n):
return (n*cb_v)+numpy.tile(pos,4)[:,numpy.newaxis,:]
def deco_v(pos,n):
return n*de_v+numpy.tile(pos,4)
def deco_v2(pos,n):
return (n*de_v)+numpy.tile(pos,4)[:,numpy.newaxis,:]
def tex_coord(x, y, n=4):
""" Return the bounding vertices of the texture square.
"""
m = 1.0 / n
dx = x * m
dy = y * m
return [dx, dy, dx + m, dy, dx + m, dy + m, dx, dy + m]
def tex_coords(*sides): #top, bottom,
""" Return a list of the texture squares for the top, bottom and side.
"""
# top = tex_coord(*top)
# bottom = tex_coord(*bottom)
result = []
# result.append(top)
# result.append(bottom)
i=6
for s in sides:
result.append(tex_coord(*s))
i-=1
while i>=0:
result.append(tex_coord(*sides[-1]))
i-=1
return result
FACES = [
( 0, 1, 0), #up
( 0,-1, 0), #down
(-1, 0, 0), #left
( 1, 0, 0), #right
( 0, 0, 1), #forward
( 0, 0,-1), #back
]
def normalize(position):
""" Accepts `position` of arbitrary precision and returns the block
containing that position.
Parameters
----------
position : tuple of len 3
Returns
-------
block_position : tuple of ints of len 3
"""
x, y, z = position
x, y, z = (int(round(x)), int(round(y)), int(round(z)))
return (x, y, z)
def sectorize(position):
""" Returns a tuple representing the sector for the given `position`.
Parameters
----------
position : tuple of len 3
Returns
-------
sector : tuple of len 3
"""
x, y, z = normalize(position)
x, y, z = x / SECTOR_SIZE, y / SECTOR_SIZE, z / SECTOR_SIZE
return (x*SECTOR_SIZE, 0, z*SECTOR_SIZE)
##monkey patch IndirectArrayRegion.__setitem__ to make it a bit quick for numpy arrays
orig_indirect_array_region_setitem = pyglet.graphics.vertexbuffer.IndirectArrayRegion.__setitem__
def numpy__setitem__(self, index, value):
if isinstance(value, numpy.ndarray) and isinstance(index, slice) \
and index.start is None and index.stop is None and index.step is None:
arr = numpy.ctypeslib.as_array(self.region.array)
for i in range(self.count):
arr[i::self.stride] = value[i::self.count]
return
orig_indirect_array_region_setitem(self, index, value)
pyglet.graphics.vertexbuffer.IndirectArrayRegion.__setitem__ = numpy__setitem__
class LineDrawGroup(pyglet.graphics.Group):
def __init__(self, thickness = 1, parent=None):
pyglet.graphics.Group.__init__(self, parent)
self.thickness = thickness
def set_state(self):
gl.glLineWidth(self.thickness)
gl.glColor3d(0, 0, 0)
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)
def unset_state(self):
gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)
gl.glLineWidth(1)
class DrawTranslateGroup(pyglet.graphics.Group):
def __init__(self, translate = (0,0,0), parent=None):
pyglet.graphics.Group.__init__(self, parent)
self.translate = translate
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(*self.translate)
def unset_state(self):
gl.glPopMatrix()
class InventoryGroup(pyglet.graphics.Group):
def __init__(self, parent=None):
pyglet.graphics.Group.__init__(self, parent)
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(0, 0, -64)
gl.glRotatef(45, 1, 0, 0)
gl.glRotatef(45, 0, 1, 0)
def unset_state(self):
gl.glPopMatrix()
class InventoryOutlineGroup(pyglet.graphics.Group):
def __init__(self, parent=None):
pyglet.graphics.Group.__init__(self, parent)
def set_state(self):
gl.glPushMatrix()
gl.glTranslatef(0, 0, -60)
gl.glRotatef(45, 1, 0, 0)
gl.glRotatef(45, 0, 1, 0)
def unset_state(self):
gl.glPopMatrix()
|
spectralpython/spectral | spectral/database/aster.py | Python | gpl-2.0 | 15,620 | 0.001344 | '''
Code for reading and managing ASTER spectral library data.
'''
from __future__ import absolute_import, division, print_function, unicode_literals
from spectral.utilities.python23 import IS_PYTHON3, tobytes, frombytes
from .spectral_database import SpectralDatabase
if IS_PYTHON3:
readline = lambda fin: fin.readline()
open_file = lambda filename: open(filename, encoding='iso-8859-1')
else:
readline = lambda fin: fin.readline().decode('iso-8859-1')
open_file = lambda filename: open(filename)
table_schemas = [
'CREATE TABLE Samples (SampleID INTEGER PRIMARY KEY, Name TEXT, Type TEXT, Class TEXT, SubClass TEXT, '
'ParticleSize TEXT, SampleNum TEXT, Owner TEXT, Origin TEXT, Phase TEXT, Description TEXT)',
'CREATE TABLE Spectra (SpectrumID INTEGER PRIMARY KEY, SampleID INTEGER, SensorCalibrationID INTEGER, '
'Instrument TEXT, Environment TEXT, Measurement TEXT, '
'XUnit TEXT, YUnit TEXT, MinWavelength FLOAT, MaxWavelength FLOAT, '
'NumValues INTEGER, XData BLOB, YData BLOB)',
]
arraytypecode = chr(ord('f'))
# These files contained malformed signature data and will be ignored.
bad_files = [
'jhu.nicolet.mineral.silicate.tectosilicate.fine.albite1.spectrum.txt',
'usgs.perknic.rock.igneous.mafic.colid.me3.spectrum.txt'
]
def read_pair(fin, num_lines=1):
'''Reads a colon-delimited attribute-value pair from the file stream.'''
s = ''
for i in range(num_lines):
s += " " + readline(fin).strip()
return [x.strip().lower() for x in s.split(':')]
class Signature:
'''Object to store sample/measurement metadata, as well as wavelength-signatrure vectors.'''
def __init__(self):
self.sample = {}
self.measurement = {}
def read_aster_file(filename):
'''Reads an ASTER 2.x spectrum file.'''
fin = open_file(filename)
s = Signature()
# Number of lines per metadata attribute value
lpv = [1] * 8 + [2] + [6]
# A few files have an additional "Colleted by" sample metadata field, which
# sometimes affects the number of header lines
haveCollectedBy = False
for i in range(30):
line = readline(fin).strip()
if line.find('Collected by:') >= 0:
haveCollectedBy = True
collectedByLineNum = i
if line.startswith('Description:'):
descriptionLineNum = i
if line.startswith('Measurement:'):
measurementLineNum = i
if haveCollectedBy:
lpv = [1] * 10 + [measurementLineNum - descriptionLineNum]
# Read sample metadata
fin.seek(0)
for i in range(len(lpv)):
pair = read_pair(fin, lpv[i])
s.sample[pair[0].lower()] = pair[1]
# Read measurement metadata
lpv = [1] * 8 + [2]
for i in range(len(lpv)):
pair = read_pair(fin, lpv[i])
if len(pair) < 2:
print(pair)
s.measurement[pair[0].lower()] = pair[1]
# Read signature spectrum
pairs = []
for line in fin.readlines():
line = line.strip()
if len(line) == 0:
continue
pair = line.split()
nItems = len(pair)
# Try to handle invalid values on signature lines
if nItems == 1:
# print 'single item (%s) on signature line, %s' \
# % (pair[0], filename)
continue
elif nItems > 2:
print('more than 2 values on signature line,', filename)
continue
try:
x = float(pair[0])
except:
print('corrupt signature line,', filename)
if x == 0:
# print 'Zero wavelength value', filename
continue
elif x < 0:
print('Negative wavelength value,', filename)
continue
pairs.append(pair)
[x, y] = [list(v) for v in zip(*pairs)]
# Make sure wavelengths are ascending
if float(x[0]) > float(x[-1]):
x.reverse()
y.reverse()
s.x = [float(val) for val in x]
s.y = [float(val) for val in y]
s.measurement['first x value'] = x[0]
s.measurement['last x value'] = x[-1]
s.measurement['number of x values'] = len(x)
fin.close()
return s
class AsterDatabase(SpectralDatabase):
'''A rela | tional database to manage ASTER spectral library data.'''
schemas = table_schemas
def _add_sample(self, name, sampleType, sampleClass, subClass,
particleSize, sampleNumber, owner, origin, phas | e,
description):
sql = '''INSERT INTO Samples (Name, Type, Class, SubClass, ParticleSize, SampleNum, Owner, Origin, Phase, Description)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
self.cursor.execute(sql, (name, sampleType, sampleClass, subClass,
particleSize, sampleNumber, owner, origin,
phase, description))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
def _add_signature(
self, sampleID, calibrationID, instrument, environment, measurement,
xUnit, yUnit, minWavelength, maxWavelength, xData, yData):
import sqlite3
import array
sql = '''INSERT INTO Spectra (SampleID, SensorCalibrationID, Instrument,
Environment, Measurement, XUnit, YUnit, MinWavelength, MaxWavelength,
NumValues, XData, YData) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'''
xBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, xData)))
yBlob = sqlite3.Binary(tobytes(array.array(arraytypecode, yData)))
numValues = len(xData)
self.cursor.execute(
sql, (
sampleID, calibrationID, instrument, environment, measurement,
xUnit, yUnit, minWavelength, maxWavelength, numValues, xBlob,
yBlob))
rowId = self.cursor.lastrowid
self.db.commit()
return rowId
@classmethod
def create(cls, filename, aster_data_dir=None):
'''Creates an ASTER relational database by parsing ASTER data files.
Arguments:
`filename` (str):
Name of the new sqlite database file to create.
`aster_data_dir` (str):
Path to the directory containing ASTER library data files. If
this argument is not provided, no data will be imported.
Returns:
An :class:`~spectral.database.AsterDatabase` object.
Example::
>>> AsterDatabase.create("aster_lib.db", "/CDROM/ASTER2.0/data")
This is a class method (it does not require instantiating an
AsterDatabase object) that creates a new database by parsing all of the
files in the ASTER library data directory. Normally, this should only
need to be called once. Subsequently, a corresponding database object
can be created by instantiating a new AsterDatabase object with the
path the database file as its argument. For example::
>>> from spectral.database.aster import AsterDatabase
>>> db = AsterDatabase("aster_lib.db")
'''
import os
if os.path.isfile(filename):
raise Exception('Error: Specified file already exists.')
db = cls()
db._connect(filename)
for schema in cls.schemas:
db.cursor.execute(schema)
if aster_data_dir:
db._import_files(aster_data_dir)
return db
def __init__(self, sqlite_filename=None):
'''Creates a database object to interface an existing database.
Arguments:
`sqlite_filename` (str):
Name of the database file. If this argument is not provided,
an interface to a database file will not be established.
Returns:
An :class:`~spectral.AsterDatabase` connected to the database.
'''
from spectral.io.spyfile import find_file_path
if sqlite_filename:
self._connect(find_file_path(sqlite_filename))
else:
self.db = None
self.cursor = None
def read_file(self, filename):
return rea |
waseem18/oh-mainline | vendor/packages/python-dateutil/updatezinfo.py | Python | agpl-3.0 | 1,230 | 0.001626 | #!/usr/bin/python
from dateutil.zoneinfo import rebuild
import shutil
import sys
import os
import re
SERVER = "elsie.nci.nih.gov"
DIR = "/pub"
NAME = re.compile("tzdata(.*).tar.gz")
def main():
if len(sys.argv) == 2:
tzdata = sys.argv[1]
else:
from ftplib import FTP
print "Connecting to %s..." % SERVER
ftp = FTP(SERVER)
print "Logging in..."
ftp.login()
pri | nt "Changing to %s..." % DIR
ftp.cwd(DIR)
print "Listing files..."
for name in ftp.nlst():
| if NAME.match(name):
break
else:
sys.exit("error: file matching %s not found" % NAME.pattern)
if os.path.isfile(name):
print "Found local %s..." % name
else:
print "Retrieving %s..." % name
file = open(name, "w")
ftp.retrbinary("RETR "+name, file.write)
file.close()
ftp.close()
tzdata = name
if not tzdata or not NAME.match(tzdata):
sys.exit("Usage: updatezinfo.py tzdataXXXXX.tar.gz")
print "Updating timezone information..."
rebuild(tzdata, NAME.match(tzdata).group(1))
print "Done."
if __name__ == "__main__":
main()
|
jpardobl/django_sprinkler | django_sprinkler/cicles.py | Python | bsd-3-clause | 1,692 | 0.003546 | from django_sprinkler.models import Program, Context
from datetime import datetime
import pytz, logging
from django.conf import settings
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG if settings.DEBUG else logging.INFO)
logger_watering = logging.getLogger("watering")
logger_watering.setLevel(logger.level)
def exec_step(ctxt, step=None):
if ctxt.state == "manual":
logger.info("State is manual, thus not acting")
return
| for pstep in | ctxt.active_program.steps.all():
if step == pstep:
#Arrancamos el aspersor que ha sido devuelto como activo
pstep.sprinkler.toggle(True)
continue
#apagamos el resto de aspersores
pstep.sprinkler.toggle(False)
def run():
ctxt = Context.objects.get_context()
program = ctxt.active_program
#check state
try:
if ctxt.state == 'manual':
next_step = None
elif ctxt.state in ('automatic', 'running_program'):
next_step = program.has_active_step()
elif ctxt.state == '3min_cicle':
startt = datetime.now(pytz.timezone(settings.TIME_ZONE)) \
if ctxt.start_at is None \
else ctxt.start_at
next_step = program.has_active_step(program_must_start_at=startt, minutes=3)
elif ctxt.state == 'cicle':
startt = datetime.now(pytz.timezone(settings.TIME_ZONE)) \
if ctxt.start_at is None \
else ctxt.start_at
next_step = program.has_active_step(program_must_start_at=startt)
exec_step(ctxt, next_step)
except Program.ProgramMustJumpException as ex:
return
|
lukas-ke/faint-graphics-editor | help/example_py/py_frame_example.py | Python | apache-2.0 | 148 | 0.027027 | from faint import *
#sta | rt
# Retrieving a frame from an image
image = get_active_image()
|
frame = image.get_frame(3)
frame.line(0,0,100,100)
|
gunan/tensorflow | tensorflow/python/ops/quantized_ops_test.py | Python | apache-2.0 | 4,127 | 0.007512 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing | permissions and
# limitations under the License.
# ===================================================== | =========================
"""Functional tests for quantized operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import test
class QuantizedOpsTest(test.TestCase):
def __init__(self, method_name="runTest"):
super(QuantizedOpsTest, self).__init__(method_name)
def testQuantizeOp(self):
expected_output = [1, 1, 2, 127, 255, 255]
with self.session(use_gpu=False) as sess:
x = constant_op.constant(
[1.0, 1.25, 1.75, 127.0, 255.0, 500.0],
shape=[6],
dtype=dtypes.float32)
x_min = 0.0
x_max = 255.0
op = array_ops.quantize(x, x_min, x_max, dtypes.quint8, mode="MIN_FIRST")
value = self.evaluate(op)
self.assertArrayNear(expected_output, value.output, 0.1)
def testDequantizeOp(self):
expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0]
inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8)
with self.session(use_gpu=False) as sess:
x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8)
x_min = 0.0
x_max = 255.0
op = array_ops.dequantize(x, x_min, x_max, mode="MIN_FIRST")
value = self.evaluate(op)
self.assertArrayNear(expected_output, value, 0.1)
def testAxis(self):
# Generates a tensor of the specified `shape` using values from `values`
# scaled by (slice_idx + 1) along `axis` dimension.
def scale_per_slice(shape, axis, values):
# Note: repeats the values if the shape is larger than values.
out = np.take(values, np.remainder(np.arange(np.prod(shape)),
len(values))).reshape(shape)
if axis is not None:
scale_shape = [1] * len(shape)
scale_shape[axis] = shape[axis]
out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
return out
shape = np.array([2, 3, 4, 5])
values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)
quant_values = np.array([-128, -64, 0, 38, 102, 71, 64], dtype=np.int32)
for axis in [None, 0, 1, 2, 3]:
inputs = constant_op.constant(scale_per_slice(shape, axis, values))
expected_quantized = scale_per_slice(shape, None, quant_values)
if axis is None:
min_range, max_range = -1.0, 0.8
else:
num_slices = shape[axis]
min_range, max_range = [], []
for slice_idx in range(num_slices):
min_range.append(-1.0 * (slice_idx + 1))
max_range.append(0.8 * (slice_idx + 1))
quantized = self.evaluate(
array_ops.quantize(
inputs,
min_range,
max_range,
T=dtypes.qint8,
mode="SCALED",
round_mode="HALF_TO_EVEN",
axis=axis)).output
self.assertAllEqual(quantized, expected_quantized)
if axis is not None:
quantized = self.evaluate(
array_ops.quantize(
inputs,
min_range,
max_range,
T=dtypes.qint8,
mode="SCALED",
round_mode="HALF_TO_EVEN",
axis=(axis - 4))).output
self.assertAllClose(quantized, expected_quantized)
if __name__ == "__main__":
test.main()
|
polyaxon/polyaxon | platform/polycommon/polycommon/settings/api.py | Python | apache-2.0 | 4,027 | 0.000745 | #!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List
from polyaxon.env_vars.keys import (
POLYAXON_KEYS_PLATFORM_HOST,
POLYAXON_KEYS_UI_ADMIN_ENABLED,
POLYAXON_KEYS_UI_ASSETS_VERSION,
POLYAXON_KEYS_UI_BASE_URL,
POLYAXON_KEYS_UI_ENABLED,
POLYAXON_KEYS_UI_OFFLINE,
)
from polycommon.config_manager import ConfigManager
def set_api(context, config: ConfigManager, processors: List[str] = None):
context["ROOT_URLCONF"] = "polyconf.urls"
platform_host = config.get_string(POLYAXON_KEYS_PLATFORM_HOST, is_optional=True)
context["PLATFORM_HOST"] = platform_host
def get_allowed_hosts():
allowed_hosts = config.get_string(
"POLYAXON_ALLOWED_HOSTS", is_optional=True, is_list=True, default=["*"]
) # type: list
| if platform_host:
allowed_hosts.append(platform_host)
if ".polyaxon.com" not in allowed_hosts:
allowed_hosts.append(".polyaxon.com")
pod_ip = config.get_string("POLYAXON_POD_IP", is_optional=True)
if pod_ip:
allowed_hosts.append(pod_ip)
host_ip = config.get_string("POLYAXON_HOST_IP", is_optional=True)
if host_ip:
host_cidr = ".".join(host_ip.split(".")[:-1])
allowed_hosts += | ["{}.{}".format(host_cidr, i) for i in range(255)]
return allowed_hosts
context["ALLOWED_HOSTS"] = get_allowed_hosts()
processors = processors or []
processors = [
"django.contrib.auth.context_processors.auth",
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.template.context_processors.media",
"django.template.context_processors.static",
"django.template.context_processors.tz",
"django.contrib.messages.context_processors.messages",
"polycommon.settings.context_processors.version",
"polycommon.settings.context_processors.ui_assets_version",
"polycommon.settings.context_processors.ui_base_url",
"polycommon.settings.context_processors.ui_offline",
"polycommon.settings.context_processors.ui_enabled",
] + processors
context["FRONTEND_DEBUG"] = config.get_boolean("POLYAXON_FRONTEND_DEBUG")
template_debug = (
config.get_boolean("DJANGO_TEMPLATE_DEBUG", is_optional=True)
or config.is_debug_mode
)
context["UI_ADMIN_ENABLED"] = config.get_boolean(
POLYAXON_KEYS_UI_ADMIN_ENABLED, is_optional=True, default=False
)
base_url = config.get_string(POLYAXON_KEYS_UI_BASE_URL, is_optional=True)
if base_url:
context["UI_BASE_URL"] = base_url
context["FORCE_SCRIPT_NAME"] = base_url
else:
context["UI_BASE_URL"] = "/"
context["UI_ASSETS_VERSION"] = config.get_string(
POLYAXON_KEYS_UI_ASSETS_VERSION, is_optional=True, default=""
)
context["UI_OFFLINE"] = config.get_boolean(
POLYAXON_KEYS_UI_OFFLINE, is_optional=True, default=False
)
context["UI_ENABLED"] = config.get_boolean(
POLYAXON_KEYS_UI_ENABLED, is_optional=True, default=True
)
context["TEMPLATES_DEBUG"] = template_debug
context["LIST_TEMPLATE_CONTEXT_PROCESSORS"] = processors
context["TEMPLATES"] = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"APP_DIRS": True,
"OPTIONS": {"debug": template_debug, "context_processors": processors},
}
]
|
engla/kupfer | contrib/evilplugin.py | Python | gpl-3.0 | 798 | 0.005013 | """
This is a plugin that should do everything wrong, for debugging Purposes
"""
__kupfer_name__ = u"Evil Plugin"
__kupfer_sources__ = (
"EvilSource",
"EvilInstantiationSource",
)
__description__ = u"Evil for debugging purposes (necessary evil)"
__version__ = ""
__author__ = "Ulrik Sverdrup <ulrik. | sverdrup@gmail.com>"
from kupfer.objects import Leaf, Action, Source
class EvilError (Exception):
pass
class EvilInstantiationSource (Source):
def __init__(self):
raise EvilError
class EvilSource (Source):
def __init__(self):
Source.__init__(self, u"Evil Source")
def initialize(self):
raise EvilError
def get_items(self):
raise EvilError
def get_icon_name(self):
raise EvilError
def p | rovides(self):
pass
|
dedayoa/django-country-dialcode | country_dialcode/forms.py | Python | mit | 54 | 0 | # -*- coding: ut | f-8 -*-
# place form | definition here
|
chrisz/pyhusmow | pyhusmow/status_logger.py | Python | gpl-3.0 | 4,539 | 0.002423 | import argparse
from datetime import datetime, timedelta
from sched import scheduler
from sys import stdout
from time import time
from .husmow import TokenConfig, API
def run_logger(tc, args, stop_time):
mow = API()
mow.set_token(tc.token, tc.provider)
mow.select_robot(args.mower)
sch = scheduler(timefunc=time)
status = {'status': None, 'status_changed': None}
def write_log(*strings, fName=args.file, mode='a'):
out = open(fName, mode) if fName else stdout
print(*strings, sep=',', file=out)
if fName:
out.close()
def now():
return datetime.now().replace(microsecond=0)
def log_status():
mow_status = mow.status()
start = datetime.utcfromtimestamp(
mow_status['nextStartTimestamp']) if mow_status['nextStartTimest | amp'] else None
if status['status'] != mow_status['mowerStatus']:
if args.summary and status['status'] is not None:
# Write the summary. Skip the first iteration
write_log(
now().isoformat(),
status['status'],
now() - status['status_changed'],
fName=args.summary)
| status['status'] = mow_status['mowerStatus']
status['status_changed'] = now()
# The latest location has index 0
location = mow_status['lastLocations'][0]
currentTime = datetime.now()
write_log(currentTime.isoformat(), mow_status['mowerStatus'], mow_status['batteryPercent'],
start.isoformat() if start else '',
now() - status['status_changed'], location['latitude'], location['longitude'])
if stop_time >= currentTime:
if mow_status['mowerStatus'] == 'PARKED_TIMER' and mow_status['batteryPercent'] == 100:
# The mower has a full battery and is waiting for the next timer.
# Skip until 2 minutes before the next timer start
nextStart = start - timedelta(0, 2 * 60)
if nextStart > datetime.now():
# fallback to the usual operation if the nextStart is not in the future
return sch.enterabs(nextStart.timestamp(), 1, log_status)
sch.enter(args.delay, 1, log_status)
elif args.summary and status['status'] is not None:
write_log(
now(), status['status'], now() - status['status_changed'], fName=args.summary)
write_log(
'time',
'status',
'battery %',
'next start time',
'status duration',
'latitude',
'longitude',
mode='w')
if args.summary:
write_log('time', 'status', 'status duration', fName=args.summary, mode='w')
log_status()
sch.run()
def parse_until(args):
until = args.until.lower()
try:
num = int(until[:-1])
except Exception:
print('The until argument is not valid.', until)
exit(2)
if until.endswith('m'):
return datetime.now() + timedelta(0, 60 * num)
if until.endswith('d'):
return datetime.now() + timedelta(num)
print('The until argument is not valid.')
exit(3)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description='Periodically log the mower status.',
epilog='A valid token.cfg config file is required. Use husmow to create it.')
parser.add_argument(
'-d',
'--delay',
help='How often (in seconds) should the status be logged.',
default=60,
type=int)
parser.add_argument(
'-u',
'--until',
help='When to stop logging. Use NUMm for minutes or NUMd for days. \
Es: 20m is 20 minutes; 10d if 10 days',
default='1d')
parser.add_argument('-f', '--file', help='Save output on a file')
parser.add_argument(
'-s',
'--summary-file',
dest='summary',
help='Save the summary on a file. This command will print a line only when \
the mower status changes.')
parser.add_argument(
'-m',
'--mower',
dest='mower',
help='Select the mower to use. When not provied the first mower will be used.')
args = parser.parse_args()
stop_time = parse_until(args)
tc = TokenConfig()
tc.load_config()
if tc.token_valid():
run_logger(tc, args, stop_time)
else:
print('The token is not valid.')
exit(1)
|
kumar303/addons-server | src/olympia/api/permissions.py | Python | bsd-3-clause | 10,291 | 0 | from rest_framework.exceptions import MethodNotAllowed
from rest_framework.permissions import SAFE_METHODS, BasePermission
from olympia.amo import permissions
from olympia.access import acl
# Most of these classes come from zamboni, check out
# https://github.com/mozilla/zamboni/blob/master/mkt/api/permissions.py for
# more.
class GroupPermission(BasePermission):
"""
Allow access depending on the result of action_allowed_user().
"""
def __init__(self, permission):
self.permission = permission
def has_permission(self, request, view):
if not request.user.is_authenticated:
return False
return acl.action_allowed_user(request.user, self.permission)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
def __call__(self, *a):
"""
ignore DRF's nonsensical need to call this object.
"""
return self
class AnyOf(BasePermission):
"""
Takes multiple permission objects and succeeds if any single one does.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return any(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return any((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllOf(BasePermission):
"""
Takes multiple permission objects and succeeds if all of them do.
"""
def __init__(self, *perms):
# DRF calls the items in permission_classes, might as well do
# it here too.
self.perms = [p() for p in perms]
def has_permission(self, request, view):
return all(perm.has_permission(request, view) for perm in self.perms)
def has_object_permission(self, request, view, obj):
# This method *must* call `has_permission` for each
# sub-permission since the default implementation of
# `has_object_permission` returns True unconditionally, and
# some permission objects might not override it.
return all((perm.has_permission(request, view) and
perm.has_object_permission(request, view, obj))
for perm in self.perms)
def __call__(self):
return self
class AllowNone(BasePermission):
def has_permission(self, request, view):
return False
def has_object_permission(self, request, view, obj):
return False
class AllowAddonAuthor(BasePermission):
"""Allow access if the user is in the object authors."""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return obj.authors.filter(pk=request.user.pk).exists()
class AllowOwner(BasePermission):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
return ((obj == request.user) or
(getattr(obj, 'user', None) == request.user))
class AllowNotOwner(AllowOwner):
"""
Permission class to use when you are dealing with a model instance that has
a "user" FK pointing to an UserProfile, and you want only the corresponding
user to be able to access your instance.
"""
def has_object_permission(self, request, view, obj):
return not super().has_object_permission(request, view, obj)
class AllowReviewer(BasePermission):
"""Allow reviewers to access add-ons with listed versions.
The user logged in must either be making a read-only request and have the
'ReviewerTools:View' permission, or simply be a reviewer or admin.
The definition of an add-on reviewer depends on the object:
- For static themes, it's someone with 'Addons:ThemeReview'
- For personas, it's someone with 'Personas:Review'
- For the rest of the add-ons, is someone who has either
'Addons:Review', 'Addons:PostReview' or 'Addons:ContentReview'
permission.
"""
def has_permission(self, request, view):
return request.user.is_authenticated
def has_object_permission(self, request, view, obj):
can_access_because_viewer = (
request.method in SAFE_METHODS and
acl.action_allowed(request, permissions.REVIEWER_TOOLS_VIEW))
can_access_because_listed_reviewer = (
obj.has_listed_versions() and acl.is_reviewer(request, obj))
return can_access_because_viewer or can_access_because_listed_reviewer
class AllowReviewerUnlisted(AllowReviewer):
"""Allow unlisted reviewers to access add-ons with unlisted versions, or
add-ons with no listed versions at all.
Like reviewers.decorators.unlisted_addons_reviewer_required, but as a
permission class and not a decorator.
The user logged in must an unlisted add-on reviewer or admin.
An unlisted add-on reviewer is someone who is in the group with the
following permission: 'Addons:ReviewUnlisted'.
"""
def has_permission(self, request, view):
return acl.check_unlisted_addons_reviewer(request)
def has_object_permission(self, request, view, obj):
return (
(obj.has_unlisted_versions() or not obj.has_listed_versions()) and
self.has_permission(request, view))
class AllowAnyKindOfReviewer(BasePermission):
"""Allow access to any kind of reviewer. Use only for views that don't
alter add-on data.
Allows access to users with any of those permissions:
- ReviewerTools:View
- Addons:Review
- Addons:ReviewUnlisted
- Addons:ContentReview
- Addons:PostReview
- Personas:Review
Uses acl.is_user_any_kind_of_reviewer() behind the scenes.
See also any_reviewer_required() decorator.
"""
def has_permission | (self, request, view):
return acl.is_user_any_kind_of_reviewer(request.user)
def has_object_permission(self, request, view, obj):
return self.has_permission(request, view)
class AllowIfPublic(BasePermission):
"""
Allow access | when the object's is_public() method returns True.
"""
def has_permission(self, request, view):
return True
def has_object_permission(self, request, view, obj):
return (obj.is_public() and self.has_permission(request, view))
class AllowReadOnlyIfPublic(AllowIfPublic):
"""
Allow access when the object's is_public() method returns True and the
request HTTP method is GET/OPTIONS/HEAD.
"""
def has_permission(self, request, view):
return request.method in SAFE_METHODS
class ByHttpMethod(BasePermission):
"""
Permission class allowing you to define different permissions depending on
the HTTP method used.
method_permission is a dict with the lowercase http method names as keys,
permission classes (not instantiated, like DRF expects them) as values.
Warning: you probably want to define AllowAny for 'options' if you are
using a CORS-enabled endpoint.
If using this permission, any method that does not have a permission set
will raise MethodNotAllowed.
"""
def __init__(self, method_permissions):
# Initialize the permissions by calling them like DRF does.
self.method_permissions = {
method: perm() for method, perm in method_permissions.items()}
def has_permission(self, request, view):
tr |
felipevolpone/quokka | quokka/modules/comments/tasks.py | Python | mit | 195 | 0 | # coding: utf-8
from __future__ import print_function
from quokka import create_celery_app
celery = create_celery_app()
@celery.t | ask
def comment_task():
print("Doing something asy | nc...")
|
skapfer/rubber | src/latex_modules/xelatex.py | Python | gpl-2.0 | 273 | 0.010989 | import rubber.module_interface
class Module (rubber.module_interface.Module):
de | f __init__ (self, document, opt):
document.program = 'xelatex'
document.engine = 'XeLaTeX'
document.register_post_processor (old_suffix='.pdf | ', new_suffix='.pdf')
|
mattjmorrison/logilab-common-clone | test/unittest_testlib.py | Python | gpl-2.0 | 31,676 | 0.002494 | # copyright 2003-2011 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of logilab-common.
#
# logilab-common is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# logilab-common is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with logilab-common. If not, see <http://www.gnu.org/licenses/>.
"""unittest module for logilab.comon.testlib"""
import os
import sys
from os.path import join, dirname, isdir, isfile, abspath, exists
from cStringIO import StringIO
import tempfile
import shutil
try:
__file__
except NameError:
__file__ = sys.argv[0]
from logilab.common.testlib import (unittest, TestSuite, unittest_main, Tags,
TestCase, mock_object, create_files, InnerTest, with_tempdir, tag,
require_version, require_module)
from logilab.common.pytest import SkipAwareTextTestRunner, NonStrictTestLoader
class MockTestCase(TestCase):
def __init__(self):
# Do not call unittest.TestCase's __init__
pass
def fail(self, msg):
raise AssertionError(msg)
class UtilTC(TestCase):
def test_mockobject(self):
obj = mock_object(foo='bar', baz='bam')
self.assertEqual(obj.foo, 'bar')
self.assertEqual(obj.baz, 'bam')
def test_create_files(self):
chroot = tempfile.mkdtemp()
path_to = lambda path: join(chroot, path)
dircontent = lambda path: sorted(os.listdir(join(chroot, path)))
try:
self.assertFalse(isdir(path_to('a/')))
create_files(['a/b/foo.py', 'a/b/c/', 'a/b/c/d/e.py'], chroot)
# make sure directories exist
self.assertTrue(isdir(path_to('a')))
self.assertTrue(isdir(path_to('a/b')))
self.assertTrue(isdir(path_to('a/b/c')))
self.assertTrue(isdir(path_to('a/b/c/d')))
# make sure files exist
self.assertTrue(isfile(path_to('a/b/foo.py')))
self.assertTrue(isfile(path_to('a/b/c/d/e.py')))
# make sure only asked files were created
self.assertEqual(dircontent('a'), ['b'])
self.assertEqual(dircontent('a/b'), ['c', 'foo.py'])
self.assertEqual(dircontent('a/b/c'), ['d'])
self.assertEqual(dircontent('a/b/c/d'), ['e.py'])
finally:
shutil.rmtree(chroot)
class TestlibTC(TestCase):
def mkdir(self, path):
if not exists(path):
self._dirs.add(path)
os.mkdir(path)
def setUp(self):
self.tc = MockTestCase()
self._dirs = set()
def tearDown(self):
while(self._dirs):
shutil.rmtree(self._dirs.pop(), ignore_errors=True)
def test_dict_equals(self):
"""tests TestCase.assertDictEqual"""
d1 = {'a' : 1, 'b' : 2}
d2 = {'a' : 1, 'b' : 3}
d3 = dict(d1)
self.assertRaises(AssertionError, self.tc.assertDictEqual, d1, d2)
self.tc.assertDictEqual(d1, d3)
self.tc.assertDictEqual(d3, d1)
self.tc.assertDictEqual(d1, d1)
def test_list_equals(self):
"""tests TestCase.assertListEqual"""
l1 = range(10)
l2 = range(5)
l3 = range(10)
self.assertRaises(AssertionError, self.tc.assertListEqual, l1, l2)
self.tc.assertListEqual(l1, l1)
self.tc.assertListEqual(l1, l3)
self.tc.assertListEqual(l3, l1)
def test_xml_valid(self):
"""tests xml is valid"""
valid = """<root>
<hello />
<world>Logilab</world>
</root>"""
invalid = """<root><h2> </root>"""
self.tc.assertXMLStringWellFormed(valid)
self.assertRaises(AssertionError, self.tc.assertXMLStringWellFormed, invalid)
invalid = """<root><h2 </h2> </root>"""
self.assertRaises(AssertionError, self.tc.assertXMLStringWellFormed, invalid)
def test_unordered_equality_for_lists(self):
l1 = [0, 1, 2]
l2 = [1, 2, 3]
self.assertRaises(AssertionError, self.tc.assertItemsEqual, l1, l2)
self.assertRaises(AssertionError, self.tc.assertItemsEqual, l1, l2)
self.tc.assertItemsEqual(l1, l1)
self.tc.assertItemsEqual(l1, l1)
self.tc.assertItemsEqual([], [])
self.tc.assertItemsEqual([], [])
l1 = [0, 1, 1]
l2 = [0, 1]
self.assertRaises(AssertionError, self.tc.assertItemsEqual, l1, l2)
self.assertRaises(AssertionError, self.tc.assertItemsEqual, l1, l2)
self.tc.assertItemsEqual(l1, l1)
self.tc.assertItemsEqual(l1, l1)
def test_unordered_equality_for_dicts(self):
d1 = {'a' : 1, 'b' : 2}
d2 = {'a' : 1}
self.assertRaises(AssertionError, self.tc.assertItemsEqual, d1, d2)
self.tc.assertItemsEqual(d1, d1)
self.tc.assertItemsEqual({}, {})
def test_equality_for_sets(self):
s1 = set('ab')
s2 = set('a')
self.assertRaises(AssertionError, self.tc.assertSetEqual, s1, s2)
self.tc.assertSetEqual(s1, s1)
self.tc.assertSetEqual(set(), set())
def test_unordered_equality_for_iterables(self):
self.assertRaises(AssertionError, self.tc.assertItemsEqual, xrange(5), xrange(6))
self.assertRaises(AssertionError, self.tc.assertItemsEqual, xrange(5), xrange(6))
self.tc.assertItemsEqual(xrange(5), range(5))
self.tc.assertItemsEqual(xrange(5), range(5))
self.tc.assertItemsEqual([], ())
self.tc.assertItemsEqual([], ())
def test_file_equality(self):
foo = join(dirname(__file__), 'data', 'foo.txt')
spam = join(dirname(__file__), 'data', 'spam.txt')
self.assertRaises( | AssertionError, self.tc.assertFileEqual, foo, spam)
self.tc.assertFileEqual(foo, foo)
def test_dir_equality(self):
ref = join(dirname(__file__), 'data', 'reference_dir')
same = join(dirname(__file__), ' | data', 'same_dir')
subdir_differ = join(dirname(__file__), 'data', 'subdir_differ_dir')
file_differ = join(dirname(__file__), 'data', 'file_differ_dir')
content_differ = join(dirname(__file__), 'data', 'content_differ_dir')
ed1 = join(dirname(__file__), 'data', 'empty_dir_1')
ed2 = join(dirname(__file__), 'data', 'empty_dir_2')
for path in (ed1, ed2, join(subdir_differ, 'unexpected')):
self.mkdir(path)
self.assertDirEqual(ed1, ed2)
self.assertDirEqual(ref, ref)
self.assertDirEqual( ref, same)
self.assertRaises(AssertionError, self.assertDirEqual, ed1, ref)
self.assertRaises(AssertionError, self.assertDirEqual, ref, ed2)
self.assertRaises(AssertionError, self.assertDirEqual, subdir_differ, ref)
self.assertRaises(AssertionError, self.assertDirEqual, file_differ, ref)
self.assertRaises(AssertionError, self.assertDirEqual, ref, content_differ)
def test_stream_equality(self):
foo = join(dirname(__file__), 'data', 'foo.txt')
spam = join(dirname(__file__), 'data', 'spam.txt')
stream1 = open(foo)
self.tc.assertStreamEqual(stream1, stream1)
stream1 = open(foo)
stream2 = open(spam)
self.assertRaises(AssertionError, self.tc.assertStreamEqual, stream1, stream2)
def test_text_equality(self):
self.assertRaises(AssertionError, self.tc.assertMultiLineEqual, "toto", 12)
self.assertRaises(AssertionError, self.tc.assertMultiLineEqual, "toto", 12)
self.assertRaises(AssertionError, self.tc.assertMultiLineEqual, "toto", None)
self.assertRaises(AssertionError, self.tc.assertMultiLineEqual, "toto", None)
self.assertRaises(AssertionError, self.tc.assertMultiLineEqual, 3.12, u"toto")
|
scheib/chromium | third_party/blink/web_tests/external/wpt/webdriver/tests/element_send_keys/conftest.py | Python | bsd-3-clause | 376 | 0 | import pytest
@pytest.fixture
def create_files(tmpdir_factory):
def inner(filenames):
filelist = []
tmpdir = tmpdir_factory.mktemp("tmp")
for filename in filenames:
fh = tmpdir.join(filename)
fh.write(filename)
filelist.append(fh)
return filelist
inner.__name__ = "create_files"
return inner | ||
Comunitea/CMNT_00098_2017_JIM_addons | custom_sale_order_variant_mgmt/__init__.py | Python | agpl-3.0 | 150 | 0 | # -*- c | oding: utf-8 -*-
# © 2017 Comunitea
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from . import models
fr | om . import wizard
|
qPCR4vir/orange | Orange/testing/regression/tests_20/reference_linear-learner.py | Python | gpl-3.0 | 353 | 0.005666 | import orange
data = orange.ExampleTable("iris")
classifier = orange.LinearLearner(data)
for i, cls_name in enumerate(dat | a.domain.classVar.values):
print "Attrib | ute weights for %s vs. rest classification:\n\t" % cls_name,
for attr, w in zip(data.domain.attributes, classifier.weights[i]):
print "%s: %.3f " % (attr.name, w),
print |
charleshong/cs3240-labdemo | quack.py | Python | mit | 129 | 0.007752 | # Charles | Hong (csh6cw)
# 09/11/17
# quack.py
| __author__ = 'Charles Hong'
__emailID__ = 'csh6cw'
def quack(msg):
print(msg) |
alby128/syncplay | syncplay/__init__.py | Python | apache-2.0 | 123 | 0 | version = ' | 1.6.8'
re | vision = ' development'
milestone = 'Yoitsu'
release_number = '95'
projectURL = 'https://syncplay.pl/'
|
haihabi/simpy | simpy/core/result/base_function.py | Python | mit | 391 | 0 | import numpy as np
def data_concat(res | ult_a):
return np.concatenate(result_a, axis=0)
def data_mean(result_a):
return np.mean(result_a)
def data_identity(result_a):
return result_a
def data_stack(result_a):
return np.stack(result_a)
def data_single(result_a):
return result_a[0]
def data_stack_me | an(result_a):
return np.mean(data_stack(result_a), axis=0)
|
AfricaChess/lichesshub | tournament/migrations/0002_auto_20171202_0508.py | Python | mit | 550 | 0.001818 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.6 on 2017-12-02 05:08
from __future__ import unicode_literals
from django.db import migrations, models
impor | t django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('tournament', '0001_initial'),
]
operations = [
| migrations.AlterField(
model_name='game',
name='match',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='tournament.Match'),
),
]
|
uncled1023/pygments | Pygments/pygments-lib/pygments/lexers/make.py | Python | bsd-2-clause | 7,332 | 0.000546 | # -*- coding: utf-8 -*-
"""
pygments.lexers.make
~~~~~~~~~~~~~~~~~~~~
Lexers for Makefiles and similar.
:copyright: Copyright 2006-2015 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
from pygments.lexer import Lexer, RegexLexer, include, bygroups, \
do_insertions, using
from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
Punctuation
from pygments.lexers.shell import BashLexer
__all__ = ['MakefileLexer', 'BaseMakefileLexer', 'CMakeLexer']
class MakefileLexer(Lexer):
"""
Lexer for BSD and GNU make extensions (lenient enough to handle both in
the same file even).
*Rewritten in Pygments 0.10.*
"""
name = 'Makefile'
aliases = ['make', 'makefile', 'mf', 'bsdmake']
filenames = ['*.mak', '*.mk', 'Makefile', 'makefile', 'Makefile.*', 'GNUmakefile']
mimetypes = ['text/x-makefile']
r_special = re.compile(
r'^(?:'
# BSD Make
r'\.\s*(include|undef|error|warning|if|else|elif|endif|for|endfor)|'
# GNU Make
r'\s*(ifeq|ifneq|ifdef|ifndef|else|endif|-?include|define|endef|:|vpath)|'
# GNU Automake
r'\s*(if|else|endif))(?=\s)')
r_comment = re.compile(r'^\s*@?#')
def get_tokens_unprocessed(self, text):
ins = []
lines = text.splitlines(True)
done = ''
lex = BaseMakefileLexer(**self.options)
backslashflag = False
for line in lines:
if self.r_special.match(line) or backslashflag:
ins.append((len(done), [(0, Comment.Preproc, line)]))
backslashflag = line.strip().endswith('\\')
elif self.r_comment.match(line):
ins.append((len(done), [(0, Comment, line)]))
else:
done += line
for item in do_insertions(ins, lex.get_tokens_unprocessed(done)):
yield item
def analyse_text(text):
# Many makefiles have $(BIG_CAPS) style variables
if re.search(r'\$\([A-Z_]+\)', text):
return 0.1
class BaseMakefileLexer(RegexLexer):
"""
Lexer for simple Makefiles (no preprocessing).
.. versionadded:: 0.10
"""
name = 'Base Makefile'
aliases = ['basemake']
filenames = []
mimetypes = []
tokens = {
'root': [
# recipes (need to allow spaces because of expandtabs)
(r'^(?:[\t ]+.*\n|\n)+', using(BashLexer)),
# special variables
(r'\$[<@$+%?|*]', Keyword),
(r'\s+', Text),
(r'#.*?\n', Comment),
(r'(export)(\s+)(?=[\w${}\t -]+\n)',
bygroups(Keyword, Text), 'export'),
(r'export\s+', Keyword),
# assignment
(r'([\w${}().-]+)(\s*)([!?:+]?=)([ \t]*)((?:.*\\\n)+|.*\n)',
bygroups(Name.Variable, Text, Operator, Text, using(BashLexer))),
# strings
(r'(?s)"(\\\\|\\.|[^"\\])*"', String.Double),
(r"(?s)'(\\\\|\\.|[^'\\])*'", String.Single),
# targets
(r'([^\n:]+)(:+)([ \t]*)', bygroups(Name.Function, Operator, Text),
'block-header'),
# expansions
(r'\$\(', Keyword, 'expansion'),
],
'expansion': [
(r'[^$a-zA-Z_()]+', Text),
(r'[a-zA-Z_]+', Name.Variable),
(r'\$', Keyword),
(r'\(', Keyword, '#push'),
(r'\)', Keyword, '#pop'),
],
'export': [
(r'[\w${}-]+', Name.Variable),
(r'\n', Text, '#pop'),
(r'\s+', Text),
],
'block-header': [
(r'[,|]', Punctuation),
(r'#.*?\n', Comment, '#pop'),
(r'\\\n', Text), # line continuation
(r'\$\(', Keyword, 'expansion'),
(r'[a-zA-Z_]+', Name),
(r'\n', Text, '#pop'),
(r'.', Text),
],
}
class CMakeLexer(RegexLexer):
"""
Lexer for `CMake <http://cmake.org/Wiki/CMake>`_ files.
.. versionadded:: 1.2
"""
name = 'CMake'
aliases = ['cmake']
filenames = ['*.cmake', 'CMakeLists.txt']
mimetypes = ['text/x-cmake']
tokens = {
'root': [
# (r'(ADD_CUSTOM_COMMAND|ADD_CUSTOM_TARGET|ADD_DEFINITIONS|'
# r'ADD_DEPENDENCIES|ADD_EXECUTABLE|ADD_LIBRARY|ADD_SUBDIRECTORY|'
# r'ADD_TEST|AUX_SOURCE_DIRECTORY|BUILD_COMMAND|BUILD_NAME|'
# r'CMAKE_MINIMUM_REQUIRED|CONFIGURE_FILE|CREATE_TEST_SOURCELIST|'
# r'ELSE|ELSEIF|ENABLE_LANGUAGE|ENABLE_TESTING|ENDFOREACH|'
# r'ENDFUNCTION|ENDIF|ENDMACRO|ENDWHILE|EXEC_PROGRAM|'
# r'EXECUTE_PROCESS|EXPORT_LIBRARY_DEPENDENCIES|FILE|FIND_FILE|'
# r'FIND_LIBRARY|FIND_PACKAGE|FIND_PATH|FIND_PROGRAM|FLTK_WRAP_UI|'
# r'FOREACH|FUNCTION|GET_CMAKE_PROPERTY|GET_DIRECTORY_PROPERTY|'
# r'GET_FILENAME_COMPONENT|GET_SOURCE_FILE_PROPERTY|'
# r'GET_TARGET_PROPERTY|GET_TEST_PROPERTY|IF|INCLUDE|'
# r'INCLUDE_DIRECTORIES|INCLUDE_EXTERNAL_MSPROJECT|'
# r'INCLUDE_REGULAR_EXPRESSION|INSTALL|INSTALL_FILES|'
# r'INSTALL_PROGRAMS|INSTALL_TARGETS|LINK_DIRECTORIES|'
# r'LINK_LIBRARIES|LIST|LOAD_CACHE|LOAD_COMMAND|MACRO|'
# r'MAKE_DIRECTORY|MARK_AS_ADVANCED|MATH|MESSAGE|OPTION|'
# r'OUTPUT_REQUIRED_FILES|PROJECT|QT_WRAP_CPP|QT_WRAP_UI|REMOVE|'
# r'REMOVE_DEFINITIONS|SEPARATE_ARGUMENTS|SET|'
# r'SET_DIRECTORY_PROPERTIES|SET_SOURCE_FILES_PROPERTIES|'
# r'SET_TARGET_PROPERTIES|SET_TESTS_PROPERTIES|SITE_NAME|'
# r'SOURCE_GROUP|STRING|SUBDIR_DEPENDS|SUBDIRS|'
# r'TARGET_LINK_LIBRARIES|TRY_COMPILE|TRY_RUN|UNSET|'
# r'USE_MANGLED_MESA|UTILITY_SOURCE|VARIABLE_REQUIRES|'
# r'VTK_MAKE_INSTANTIATOR|VTK_WRAP_JAVA|VTK_WRAP_PYTHON|'
# r'VTK_WRAP_TCL|WHILE|WRITE_FILE|'
# r'COUNTARGS)\b', Name.Builtin, 'args'),
(r'\b(\w+)([ \t]*)(\()', bygroups(Name.Builtin, Text,
Punctuation), 'args'),
include('keywords'),
include('ws')
],
'args': [
(r'\(', Punctuation, '#push'),
(r'\)', Punctuation, '#pop'),
(r'(\$\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
(r'(\$ENV\{)(.+?)(\})', bygroups(Operator, Name.Variable, Operator)),
(r'(\$<)(.+?)(>)', bygroups(Operator, Name.Variable, Operator)),
(r'(?s)".*?"', String.Double),
(r'\\\S+', String),
(r'[^)$"# \t\n]+', String),
(r'\n', Text), # explicitly legal
include('ke | ywords'),
include('ws')
],
'string': [
],
'keywords': [
(r'\b(WIN32|UNIX|APPLE|CYGWIN|BORLAND|MINGW|MSVC|MSVC_IDE|MSVC60|'
r'MSVC70|MSVC71|MSVC80|MSVC90)\b', Keyword),
],
'ws': [
(r'[ \t]+', Text),
(r'#.*\n', Comment),
]
}
def analyse_text(te | xt):
exp = r'^ *CMAKE_MINIMUM_REQUIRED *\( *VERSION *\d(\.\d)* *( FATAL_ERROR)? *\) *$'
if re.search(exp, text, flags=re.MULTILINE | re.IGNORECASE):
return 0.8
return 0.0
|
charliequinn/python-litmos-api | src/litmos/api.py | Python | bsd-2-clause | 6,221 | 0.002572 | import html
import json
import time
import requests
class API(object):
ROOT_URL = 'https://api.litmos.com/v1.svc'
PAGINATION_OFFSET = 200
api_key = None
app_name = None
@classmethod
def _base_url(cls, resource, **kwargs):
return cls.ROOT_URL + "/" + \
resource + \
("/" + kwargs['resource_id'] if kwargs.get('resource_id', None) else "") + \
("/" + kwargs['sub_resource'] if kwargs.get('sub_resource', None) else "") + \
("/" + kwargs['sub_resource_id'] if kwargs.get('sub_resource_id', None) else "") + \
'?source=' + cls.app_name + \
'&format=json' + \
("&search=" + str(kwargs['search_param']) if kwargs.get('search_param', None) else "") + \
("&limit=" + str(kwargs['limit']) if kwargs.get('limit', None) else "") + \
("&start=" + str(kwargs['start']) if kwargs.get('start', None) else "")
@classmethod
def _perform_request(cls, method, url, **kwargs):
kwargs['headers'] = {'apikey': cls.api_key}
response = requests.request(method, url, **kwargs)
if response.status_code == 503: # request rate limit exceeded
time.sleep(60)
response = requests.request(method, url, **kwargs)
response.raise_for_status()
return response
@staticmethod
def _parse_response(re | sponse):
return json.loads(html.unescape(response.text))
@classmethod
def find(cls, resource, resource_id):
response = cls._perform_request(
'GET',
cls._base_url(resource, resource_id=resource_id)
)
| return cls._parse_response(response)
@classmethod
def delete(cls, resource, resource_id):
cls._perform_request(
'DELETE',
cls._base_url(resource,
resource_id=resource_id
)
)
return True
@classmethod
def create(cls, resource, attributes):
response = cls._perform_request(
'POST',
cls._base_url(resource),
json=attributes
)
return cls._parse_response(response)
@classmethod
def update(cls, resource, resource_id, attributes):
response = cls._perform_request(
'PUT',
cls._base_url(resource, resource_id=resource_id),
json=attributes
)
if response.text:
return cls._parse_response(response)
return {}
@classmethod
def search(cls, resource, search_param):
response = cls._perform_request(
'GET',
cls._base_url(resource, search_param=search_param)
)
return cls._parse_response(response)
@classmethod
def _get_all(cls, resource, results, start_pos):
response = cls._perform_request(
'GET',
cls._base_url(resource, limit=cls.PAGINATION_OFFSET, start=start_pos)
)
response_list = cls._parse_response(response)
results += response_list
if not response_list:
return results
else:
return cls._get_all(resource, results, start_pos + cls.PAGINATION_OFFSET)
@classmethod
def all(cls, resource):
return cls._get_all(resource, [], 0)
@classmethod
def get_children(cls, resource, resource_id):
response = cls._perform_request(
'GET',
cls._base_url(resource, resource_id=resource_id, sub_resource=resource)
)
return cls._parse_response(response)
@classmethod
def get_sub_resource(cls, resource, resource_id, sub_resource):
return cls._get_sub_resource(resource, resource_id, sub_resource, [], 0)
@classmethod
def _get_sub_resource(cls, resource, resource_id, sub_resource, results, start_pos):
response = cls._perform_request(
'GET',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource,
limit=cls.PAGINATION_OFFSET, start=start_pos
)
)
response_list = cls._parse_response(response)
results += response_list
if not response_list:
return results
else:
return cls._get_sub_resource(resource, resource_id, sub_resource, results, start_pos + cls.PAGINATION_OFFSET)
@classmethod
def add_sub_resource(cls, resource, resource_id, sub_resource, attributes):
response = cls._perform_request(
'POST',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
@classmethod
def update_sub_resource(cls, resource, resource_id, sub_resource, sub_resource_id, attributes=None):
response = cls._perform_request(
'PUT',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource,
sub_resource_id=sub_resource_id
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
@classmethod
def remove_sub_resource(cls, resource, resource_id, sub_resource, sub_resource_id):
cls._perform_request(
'DELETE',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource,
sub_resource_id=sub_resource_id)
)
return True
@classmethod
def remove_sub_resources(cls, resource, resource_id, sub_resource, attributes):
response = cls._perform_request(
'DELETE',
cls._base_url(
resource,
resource_id=resource_id,
sub_resource=sub_resource
),
json=attributes
)
if response.text:
return cls._parse_response(response)
return True
|
Martin09/E-BeamPatterns | 100 Wafers - 1cm Squares/Multi-Use Pattern/v1.4/MembraneDesign_100Wafer_v1.4.py | Python | gpl-3.0 | 20,307 | 0.003102 | # -*- coding: utf-8 -*-
"""
Created on Fri Dec 18 14:11:31 2015
@author: Martin Friedl
"""
import itertools
from datetime import date
from random import choice as random_choice
import numpy as np
from Patterns.GrowthTheoryCell import make_theory_cell
from Patterns.GrowthTheoryCell_100_3BranchDevices import make_theory_cell_3br
from Patterns.GrowthTheoryCell_100_4BranchDevices import make_theory_cell_4br
from Patterns.QuantumPlayground_100_v1 import make_qp
from gdsCAD_py3.core import Cell, Boundary, CellArray, Layout, Path
from gdsCAD_py3.shapes import Box, Rectangle, Label
from gdsCAD_py3.templates100 import Wafer_GridStyle, dashed_line
WAFER_ID = '000050254318SL' # CHANGE THIS FOR EACH DIFFERENT WAFER
PATTERN = 'SQ1.4'
putOnWafer = True # Output full wafer or just a single pattern?
HighDensity = False # High density of triangles?
glbAlignmentMarks = False
tDicingMarks = 10. # Dicing mark line thickness (um)
rotAngle = 0. # Rotation angle of the membranes
wafer_r = 25e3
waferVer = '100 Membranes Multi-Use v1.4'.format(int(wafer_r / 1000))
waferLabel = waferVer + '\n' + date.today().strftime("%d%m%Y")
# Layers
l_smBeam = 0
l_lgBeam = 1
l_drawing = 100
# %% Wafer template for MBE growth
class MBE100Wafer(Wafer_GridStyle):
"""
A 2" wafer divided into square cells
"""
def __init__(self, name, cells=None):
Wafer_GridStyle.__init__(self, name=name, cells=cells, block_gap=1200.)
# The placement of the wafer alignment markers
am_x = 1.5e4
am_y = 1.5e4
self.align_pts = np.array([am_x, am_y])
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(-1, 1))) # Reflect about y-axis
self.align_pts = np.vstack((self.align_pts, self.align_pts *
(1, -1))) # Reflect about x-axis
self.wafer_r = 25e3
self.block_size = np.array([10e3, 10e3])
self._place_blocks(radius=self.wafer_r + 5e3)
# if glbAlignmentMarks:
# self.add_aligment_marks(l_lgBeam)
# self.add_orientation_text(l_lgBeam)
# self.add_dicing_marks() # l_lgBeam, mkWidth=mkWidth Width of dicing marks
self.add_blocks()
self.add_wafer_outline(layers=l_drawing)
self.add_dashed_dicing_marks(layers=[l_lgBeam])
self.add_subdicing_marks(200, 5, layers=[l_lgBeam])
self.add_block_labels(l_lgBeam, quasi_unique_labels=True)
self.add_prealignment_markers(layers=[l_lgBeam])
self.add_tem_membranes([0.02, 0.04, 0.06, 0.08], 500, 1, l_smBeam)
self.add_theory_cells()
self.add_chip_labels()
# self.add_blockLabels(l_lgBeam)
# self.add_cellLabels(l_lgBeam)
bottom = np.array([0, -self.wafer_r * 0.9])
# top = np.array([0, -1]) * bottom
self.add_waferLabel(waferLabel, l_drawing, pos=bottom)
def add_block_labels(self, layers, quasi_unique_labels=False):
if type(layers) is not list:
layers = [layers]
txtSize = 800
if quasi_unique_labels:
unique_label_string = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'
possible_labels = ["".join(x) for x in itertools.product(unique_label_string, repeat=2)]
blockids_set = set()
while len(blockids_set) < len(self.blocks):
blockids_set.add(random_choice(possible_labels))
blockids = list(blockids_set)
for i, block in enumerate(self.blocks):
blocklabel = Cell('LBL_B_' + blockids[i])
for l in layers:
txt = Label(blockids[i], txtSize, layer=l)
bbox = txt.bounding_box
offset = (0, 0)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
txt.translate(offset) # Translate it to bottom of wafer
blocklabel.add(txt)
block.add(blocklabel, origin=(self.block_size[0] / 2., self.block_size[1] / 2.))
else:
for (i, pt) in enumerate(self.block_pts):
origin = (pt + np.array([0.5, 0.5])) * self.block_size
blk_lbl = self.blockcols[pt[0]] + self.blockrows[pt[1]]
for l in layers:
txt = Label(blk_lbl, txtSize, layer=l_lgBeam)
bbox = txt.bounding_box
offset = np.array(pt)
txt.translate(-np.mean(bbox, 0)) # Center text around origin
lbl_cell = Cell("lbl_" + blk_lbl)
lbl_cell.add(txt)
origin += np.array([0, 2000]) # Translate it up by 2mm
self.add(lbl_cell, origin=origin)
def add_dashed_dicing_marks(self, layers):
if type(layers) is not list:
layers = [layers]
width = 10. / 2
dashlength = 2000
r = self.wafer_r
rng = np.floor(self.wafer_r / self.block_size).astype(int)
dmarks = Cell('DIC_MRKS')
for l in layers:
for x in np.arange(-rng[0], rng[0] + 1) * self.block_size[0]:
y = np.sqrt(r ** 2 - x ** 2)
vm = dashed_line([x, y], [x, -y], dashlength, width, layer=l)
dmarks.add(vm)
for y in np.arange(-rng[1], rng[1] + 1) * self.block_size[1]:
x = np.sqrt(r ** 2 - y ** 2)
hm = dashed_line([x, y], [-x, y], dashlength, width, layer=l)
dmarks.add(hm)
self.add(dmarks)
def add_subdicing_marks(self, length, width, layers):
if type(layers) is not list:
layers = [layers]
for l in layers:
mark_cell = Cell("SubdicingMark")
line = Path([[0, 0], [0, length]], width=width, layer=l)
mark_cell.add(line)
for block in self.blocks:
block.add(mark_cell, origin=(self.block_size[0] / 2., 0), rotation=0)
block.add(mark_cell, origin=(0, self.block_size[1] / 2.), rotation=-90)
block.add(mark_cell, origin=(self.block_size[0], self.block_size[1] / 2.), rotation=90)
block.add(mark_cell, origin=(self.block_size[0] / 2., self.block_size[1]), rotation=180)
def add_prealignment_markers(self, layers, mrkr_size=7):
if mrkr_size % 2 == 0: # Number is even, but we need odd numbers
mrkr_size += 1
if type(layers) is not list:
layers = [layers]
for l in layers:
rect_size = 10. # 10 um large PAMM rectangles
marker_rect = Rectangle([-rect_size / 2., -rect_size / 2.], [rect_size / 2., rect_size / 2.], layer=l)
marker = Cell('10umMarker')
marker.add(marker_rect)
# Make one arm of the PAMM array
marker_arm = Cell('PAMM_Arm')
# Define the positions of the markers, they increase in spacing by 1 um eac | h time:
mrkr_positions = [75 * n + (n - 1) * n // 2 for n in range(1, (mrkr_size - 1) // 2 + 1)]
for pos in mrkr_positions:
marker_arm.add(marker, origin=[pos, 0])
# Build the final PAMM Mark | er
pamm_cell = Cell('PAMM_Marker')
pamm_cell.add(marker) # Center marker
pamm_cell.add(marker_arm) # Right arm
pamm_cell.add(marker_arm, rotation=180) # Left arm
pamm_cell.add(marker_arm, rotation=90) # Top arm
pamm_cell.add(marker_arm, rotation=-90) # Bottom arm
for pos in mrkr_positions:
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=90) # Top arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=90)
pamm_cell.add(marker_arm, origin=[pos, 0], rotation=-90) # Bottom arms
pamm_cell.add(marker_arm, origin=[-pos, 0], rotation=-90)
# Make the 4 tick marks that mark the center of the array
h = 30.
w = 100.
tick_mrk = Rectangle([-w / 2., -h / 2.], [w / 2, h / 2.], layer=l)
tick_mrk_cell = Cell("TickMark")
tick_mrk_cell |
gevaerts/bup | cmd/bloom-cmd.py | Python | lgpl-2.1 | 5,247 | 0.002287 | #!/bin/sh
"""": # -*-python-*-
bup_python="$(dirname "$0")/bup-python" || exit $?
exec "$bup_python" "$0" ${1+"$@"}
"""
# end of bup preamble
import glob, os, sys, tempfile
from bup import options, git, bloom
from bup.helpers import (add_error, debug1, handle_ctrl_c, log, progress, qprogress,
saved_errors)
optspec = """
bup bloom [options...]
--
ruin ruin the specified bloom file (clearing the bitfield)
f,force ignore existing bloom file and regenerate it from scratch
o,output= output bloom filename (default: auto)
d,dir= input directory to look for idx files (default: auto)
k,hashes= number of hash functions to use (4 or 5) (default: auto)
c,check= check the given .idx file against the bloom filter
"""
def ruin_bloom(bloomfilename):
rbloomfilename = git.repo_rel(bloomfilename)
if not os.path.exists(bloomfilename):
log("%s\n" % bloomfilename)
add_error("bloom: %s not found to ruin\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename, readwrite=True, expected=1)
b.map[16:16+2**b.bits] = '\0' * 2**b.bits
def check_bloom(path, bloomfilename, idx):
rbloomfilename = git.repo_rel(bloomfilename)
ridx = git.repo_rel(idx)
if not os.path.exists(bloomfilename):
log("bloom: %s: does not exist.\n" % rbloomfilename)
return
b = bloom.ShaBloom(bloomfilename)
if not b.valid():
add_error("bloom: %r is invalid.\n" % rbloomfilename)
return
base = os.path.basename(idx)
if base not in b.idxnames:
log("bloom: %s does not contain the idx.\n" % rbloomfilename)
| return
if base == idx:
idx = os.path.join(path, idx)
log("bloom: bloom file: %s\n" % rbloomfilename)
log("bloom: checking %s\n" % ridx)
for objsha in git.open_idx(idx):
if not b.exists(objsha):
add_error("bloom: ERROR: object %s missing"
% s | tr(objsha).encode('hex'))
_first = None
def do_bloom(path, outfilename):
global _first
b = None
if os.path.exists(outfilename) and not opt.force:
b = bloom.ShaBloom(outfilename)
if not b.valid():
debug1("bloom: Existing invalid bloom found, regenerating.\n")
b = None
add = []
rest = []
add_count = 0
rest_count = 0
for i,name in enumerate(glob.glob('%s/*.idx' % path)):
progress('bloom: counting: %d\r' % i)
ix = git.open_idx(name)
ixbase = os.path.basename(name)
if b and (ixbase in b.idxnames):
rest.append(name)
rest_count += len(ix)
else:
add.append(name)
add_count += len(ix)
total = add_count + rest_count
if not add:
debug1("bloom: nothing to do.\n")
return
if b:
if len(b) != rest_count:
debug1("bloom: size %d != idx total %d, regenerating\n"
% (len(b), rest_count))
b = None
elif (b.bits < bloom.MAX_BLOOM_BITS and
b.pfalse_positive(add_count) > bloom.MAX_PFALSE_POSITIVE):
debug1("bloom: regenerating: adding %d entries gives "
"%.2f%% false positives.\n"
% (add_count, b.pfalse_positive(add_count)))
b = None
else:
b = bloom.ShaBloom(outfilename, readwrite=True, expected=add_count)
if not b: # Need all idxs to build from scratch
add += rest
add_count += rest_count
del rest
del rest_count
msg = b is None and 'creating from' or 'adding'
if not _first: _first = path
dirprefix = (_first != path) and git.repo_rel(path)+': ' or ''
progress('bloom: %s%s %d file%s (%d object%s).\n'
% (dirprefix, msg,
len(add), len(add)!=1 and 's' or '',
add_count, add_count!=1 and 's' or ''))
tfname = None
if b is None:
tfname = os.path.join(path, 'bup.tmp.bloom')
b = bloom.create(tfname, expected=add_count, k=opt.k)
count = 0
icount = 0
for name in add:
ix = git.open_idx(name)
qprogress('bloom: writing %.2f%% (%d/%d objects)\r'
% (icount*100.0/add_count, icount, add_count))
b.add_idx(ix)
count += 1
icount += len(ix)
# Currently, there's an open file object for tfname inside b.
# Make sure it's closed before rename.
b.close()
if tfname:
os.rename(tfname, outfilename)
handle_ctrl_c()
o = options.Options(optspec)
(opt, flags, extra) = o.parse(sys.argv[1:])
if extra:
o.fatal('no positional parameters expected')
git.check_repo_or_die()
if not opt.check and opt.k and opt.k not in (4,5):
o.fatal('only k values of 4 and 5 are supported')
paths = opt.dir and [opt.dir] or git.all_packdirs()
for path in paths:
debug1('bloom: scanning %s\n' % path)
outfilename = opt.output or os.path.join(path, 'bup.bloom')
if opt.check:
check_bloom(path, outfilename, opt.check)
elif opt.ruin:
ruin_bloom(outfilename)
else:
do_bloom(path, outfilename)
if saved_errors:
log('WARNING: %d errors encountered during bloom.\n' % len(saved_errors))
sys.exit(1)
elif opt.check:
log('All tests passed.\n')
|
raidixlab/insane_striping | parse.py | Python | gpl-2.0 | 10,189 | 0.004762 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
"""This script automates testing of insane_striping algorithms.
You should fill configuration file with your own values,then start this script
as 'python parse.py <config>'. If you test LRC, make sure that stripe-searcher
is in './searcher' catalogue.
Results of tests will be contained in the file "results.csv"
"""
__author__ = "Mariya Podpirova, Evgeny Anastasiev"
__copyright__ = "Copyright (C) 2015, Raidix"
__credits__ = ["Mariya Podpirova","Evgeny Anastasiev"]
__license__ = "GPL"
__version__ = "1.0"
__maintainer__ = "Evgeny Anastasiev"
__status__ = "Production"
import subprocess
import os
pathtobrute = 'searcher'
def read_section(section):
lst = open('pattern2','r').readlines()
result = []
for line in lst:
if line[0] == '[':
i = line.strip()[1:-1]
if i == section:
if (line[0] != '#') and (line.strip() != '') and (line[0] != '['):
result.append(line.strip())
return result
devices = read_section('devices')
volume = read_section('volume')
block_sizes = read_section('block_sizes')
tests = read_section('tests')
partitions = devices[0].split(' ')
size_disk = int(volume[0][:-1])
byte = (volume[0][-1])
sizes = block_sizes[0].split(' ')
################################################################
schemes = 'schemes.csv'
columns = {'groups': 0, 'length': 1, 'disks': 2, 'global_s': 3, 'scheme': 4}
def dict2list(dct):
lst = ['']*len(dct)
for i in dct.items():
lst[columns[i[0]]] = i[1]
return lst
def add_scheme(dct):
lst = dict2list(dct)
schm = ','.join(str(i) for i in lst) + '\n'
with open(schemes, "a") as f:
f.write(schm)
return 0
def search_scheme(params):
f = open(schemes)
for line in f:
array = line.split(',')
match = True
for j in params.items():
try:
pattern = int(array[columns[j[0]]])
except:
pattern = 0
if (pattern != j[1]):
match = False
if match:
return array[columns['scheme']].strip()
return 0
################################################################
def defines(scheme):
sd = scheme.count('1') - 1
ss = scheme.count('s') + scheme.count('S')
eb = scheme.count('e') + scheme.count('E')
gs = scheme.count('g') + scheme.count('G')
dfns = []
dfns.append('#define SUBSTRIPES ' + str(ss) + '\n')
dfns.append('#define SUBSTRIPE_DATA ' + str(sd) + '\n')
dfns.append('#define E_BLOCKS ' + str(eb) + '\n')
dfns.append('#define GLOBAL_S ' + str(gs) + '\n')
return dfns
# Вспомогательная функция вывода массива в годном для C виде
def print_array(array):
st = '{'
for i in array:
st += str(i)
st += ', '
st = st[:-2] + '};\n' # Стираем последнюю запятую и добавляем фигурную скобку
return st
# Эта функция переводит введенную схему в
# схему, понятную сишному модулю.
def get_hex_scheme(scheme):
i = 0
array = []
# Здесь такой дурной цикл по той причине, что в локальном
# синдроме требуется обрабатывать два символа за раз
while i < len(scheme):
if scheme[i].isdigit(): # Блок данных
array.append(hex(int(scheme[i]) - 1))
else:
if ((scheme[i] == 's') or (scheme[i] == 'S')): # Локальный синдром
array.append(hex(191 + int(scheme[i+1])))
i += 1
elif ((scheme[i] == 'e') or (scheme[i] == 'E')): # Empty-block
array.append(hex(0xee))
else: # Глобальный синдром
array.append(hex(0xff))
i += 1
return array
# Следующие 5 функций нужны специально для того,
# чтобы формировать некоторые переменные для файла.
def get_data_scheme(hex_scheme):
array = []
for i in hex_scheme:
fs = i[2]
if ((fs != 'c') and (fs != 'e') and (fs != 'f')):
array.append(i)
return array
def get_ls_places(hex_scheme):
i = 0
array = []
while i < len(hex_scheme):
if (hex_scheme[i][2] == 'c'):
array.append(i)
i += 1
return array
def get_gs(hex_scheme):
i = 0
array = []
while i < len(hex_scheme):
if(hex_scheme[i][2] == 'f'):
array.append(i)
i += 1
return array
def ordered_offset(hex_scheme):
array = []
array[0:0] = (get_ls_places(hex_scheme)) # Сперва получим локальные синдромы
array[1:1] = (get_gs(hex_scheme)) # Потом глобальные
array.append(hex_scheme.index(hex(0xee))) # А потом empty-block
array.sort()
return array
def get_ldb(hex_scheme):
i = len(hex_scheme) - 1
while i >= 0:
fs = hex_scheme[i][2]
if ((fs != 'c') and (fs != 'e') and (fs !='f')):
break;
i -= 1
return i
# Эта функция формирует основной массив строк файла lrc_config.c
def constants(scheme):
cnstns = []
cnstns.append('\n')
cnstns.append('const unsigned char lrc_scheme[(SUBSTRIPE_DATA + 1) * SUBSTRIPES + E_BLOCKS + GLOBAL_S] =\n')
hex_scheme = get_hex_scheme(scheme)
cnstns.append(print_array(hex_scheme))
cnstns.append('\n')
cnstns.append('// it is just lrc_scheme without 0xee, 0xff and 0xcN\n')
cnstns.append('const unsigned char lrc_data[SUBSTRIPE_DATA * SUBSTRIPES] =\n')
data_scheme = get_data_scheme(hex_scheme)
cnstns.append(print_array(data_scheme))
cnstns.append('\n')
cnstns.append('// it is place of global syndrome\n')
gs_array = get_gs(hex_scheme)
cnstns.append('const int lrc_gs[GLOBAL_S] = ')
cnstns.append(print_array(gs_array))
cnstns.append('\n')
cnstns.append('// places of all local syndromes\n')
cnstns.append('const int lrc_ls[SUBSTRIPES] = ')
ls = get_ls_places(hex_scheme)
cnstns.append(print_array(ls))
cnstns.append('// empty place\n')
cnstns.append('const int lrc_eb = ' + str(hex_scheme.index(hex(0xee))) + ';\n')
cnstns.append('// not-data blocks, ordered by increasing\n')
cnstns.append('const int lrc_offset[SUBSTRIPES + E_BLOCKS + GLOBAL_S] = ')
oo = ordered_offset(hex_scheme)
cnstns.append(print_array(oo))
cnstns.append('// number of the last data block\n')
cnstns.append('const int lrc_ldb = ' + str(get_ldb(hex_scheme)) + ';\n')
cnstns.append('\n')
return cnstns
###############################################################
line1 = ','.join(sizes)
with open('results.csv', 'a') as first:
first.write(',%s\n' % (line1))
for i in xrange(len(tests)):
test1 = tests[i].split(' ')
disks_count = int(test1[0])
if len(partitions) < disks_count:
print 'Error, partiti | ons < disks'
exit()
if len(test1) > 2:
if test1[2][:6] == 'scheme':
get_scheme = test1[2][7:]
if test1[2][:6] == 'groups':
groups = int(test1[ | 2][7:])
length = int(test1[3][7:])
if len(test1) > 4:
global_s = int(test1[4][9:])
else:
global_s = 1
param = {'groups': groups, 'length': length, 'disks': disks_count, 'global_s': global_s}
if search_scheme(param) == 0:
with open(os.path.join('%s/' % (pathtobrute), 'defines.c'),'w') as defnes:
defnes.write('#define disks_count %s\n#define groups_count %s\n#define group_len %s\n' % (disks_count, groups, length))
command1 ="cd %s; make all;timeout -s INT 20 ./main 1 1 > tmp.res ; cat tmp.res | grep G | sed -e 's/^.*[\t]//g; s/[ \t]*//g; q' | tr -d '\n'; rm -f tmp.res" % (pathtobrute)
process1 = subprocess.Popen(command1, stdout=subprocess.PIPE, shell=True)
out = process1.stdout.read()
# запуск искалки
# поиск схемы
dct={'groups': groups, 'length': length, 'disks': disks_count, 'global_s': global_s, 'scheme': out}
add_scheme(dct)
get_scheme = out
# get_scheme = #схем |
dbservice/dbservice | dbservice/apps/homes/admin.py | Python | mit | 1,248 | 0 | from django.contrib import admin
from dbservice.apps.utils import MEASUREMENT_UNIT_CHOICES
from . import models
admin.site.register(models.FixedValueMeterPort)
@admin.register(models.VirtualEnergyPo | rt)
class VirtualPortEnergyAdmin(admin.ModelAdmin):
fields = ('name', 'consumption', 'current', 'voltage', 'power | _factor')
view_on_site = True
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "consumption":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[0][0]
)
if db_field.name == "current":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[3][0]
)
if db_field.name == "voltage":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[2][0]
)
if db_field.name == "power_factor":
kwargs["queryset"] = models.MeterPort.objects.filter(
unit=MEASUREMENT_UNIT_CHOICES[6][0]
)
return super(VirtualPortEnergyAdmin, self).formfield_for_foreignkey(
db_field, request, **kwargs)
|
sutartmelson/girder | girder/utility/webroot.py | Python | apache-2.0 | 3,397 | 0.000589 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###############################################################################
# Copyright Kitware Inc.
#
# Licensed under the Apache License, Version 2.0 ( the "License" );
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
###############################################################################
import cherrypy
import mako
import os
from girder import constants
from girder.utility import config
class WebrootBase(object):
"""
Serves a template file in response to GET requests.
This will typically be the base class of any non-API endpoints.
"""
exposed = True
def __init__(self, templatePath):
with open(templatePath) as templateFile:
# This may raise an IOError, but there's no way to recover
self.template = templateFile.read()
# Rendering occurs lazily on the first GET request
self.indexHtml = None
self.vars = {}
self.config = config.getConfig()
def updateHtmlVars(self, vars):
"""
If any of the variables in the index html need to change, call this
with the updated set of variables to render the template with.
"""
self.vars.update(vars)
self.indexHtml = None
def _renderHTML(self):
return mako.template.Template(self.template).render(**self.vars)
def GET(self, **params):
if self.indexHtml is None or self.config['server']['mode'] == 'development':
self.indexHtml = self._renderHTML()
return self.indexHtml
def DELETE(self, **params):
raise cherrypy.HTTPError(405)
def PATCH(self, **params):
raise cherrypy.HTTPError(405)
def POST(self, **params):
raise cherrypy.HTTPError(405)
def PUT(self, **params):
raise cherrypy.HTTPError(405)
class Webroot(WebrootBase):
"""
The webroot endpoint simply serves the main index HTML file.
"""
def __init__(self, templatePath=None):
if not templatePath:
templatePath = os.path.join(constants.PACKAGE_DIR,
'utility', 'webroot.mako')
super(Webroot, self).__init__(templatePath)
self.vars = {
'plugins': [],
'apiRoot': '',
'staticRoot': '',
'title': 'Girder'
}
def _renderHTML(self):
self.vars['pluginCss'] = []
self.vars['pluginJs'] = []
builtDir = os.path.join(constants.STATIC_ROOT_DIR, 'clients', 'web',
'static', 'built', 'plugins')
for plugin in self.vars['plugins']:
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.css')):
self.vars[' | pluginCss | '].append(plugin)
if os.path.exists(os.path.join(builtDir, plugin, 'plugin.min.js')):
self.vars['pluginJs'].append(plugin)
return super(Webroot, self)._renderHTML()
|
SymbiFlow/python-fpga-interchange | fpga_interchange/route_stitching.py | Python | isc | 16,298 | 0.00043 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 The SymbiFlow Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
""" This file defines the RoutingTree class which can be used for constructing
routing trees for route segments from the fpga_interchange.physical_netlist
class PhysicalBelPin/PhysicalSitePin/PhysicalSitePip/PhysicalPip.
Use of the RoutingTree requires having the DeviceResources class loaded for
the relevant part for the design. Use
interchange_capnp.Interchange.read_device_resources to load a device resource
file.
"""
def create_id_map(id_to_segment, segments):
""" Create or update dict from object ids of segments to segments. """
for segment in segments:
segment_id = id(segment)
assert segment_id not in id_to_segment
id_to_segment[segment_id] = segment
create_id_map(id_to_segment, segment.branches)
def check_tree(routing_tree, segment):
""" Recursively checks a routing tree.
Checks for:
- Circular routing trees
- Child segments are connected to their parents.
"""
# Check for circular routing tree
for _ in yield_branches(segment):
pass
# Ensure children are connected to parent.
root_resource = routing_tree.get_device_resource(segment)
for child in segment.branches:
child_resource = routing_tree.get_device_resource(child)
assert root_resource.is_connected(child_resource), (str(segment),
str(child),
root_resource,
child_resource)
check_tree(routing_tree, child)
def yield_branches(routing_branch):
""" Yield all routing branches starting from the given route segment.
This will yield the input route branch in addition to its children.
An AssertionError will be raised for a circular route is detected.
"""
objs = set()
def descend(obj):
obj_id = id(obj)
assert obj_id not in objs
objs.add(obj_id)
yield obj
for seg in obj.branches:
for s in descend(seg):
yield s
for s in descend(routing_branch):
yield s
def sort_branches(branches):
""" Sort branches by the branch tuple.
The branch tuple is:
('bel_pin'/'site_pin'/'site_pip'/'pip', <site>/<tile>, ...)
so sorting in this way ensures that BEL pins are grouped, etc.
This also canonicalize the bran | ch order, which makes comparing trees each,
just normalize both trees, and compare the result.
"""
branches.sort(key=lambda item: item.to_tuple())
def get_tuple_tree(root_branch):
""" Convert a rout branch in a two tuple. """
return root_branch.to_tuple(), tuple(
get_tuple_tree(branch) for branch in root_branch.branches)
class RoutingTree():
""" Utility class for manag | ing stitching of a routing tree. """
def __init__(self, device_resources, site_types, stubs, sources):
# Check that no duplicate routing resources are present.
tuple_to_id = {}
for stub in stubs:
for branch in yield_branches(stub):
tup = branch.to_tuple()
assert tup not in tuple_to_id, tup
tuple_to_id[tup] = id(branch)
for source in sources:
for branch in yield_branches(source):
tup = branch.to_tuple()
assert tup not in tuple_to_id, tup
tuple_to_id[tup] = id(branch)
self.id_to_segment = {}
self.id_to_device_resource = {}
self.stubs = stubs
self.sources = sources
self.connections = None
# Populate id_to_segment and id_to_device_resource maps.
create_id_map(self.id_to_segment, self.stubs)
create_id_map(self.id_to_segment, self.sources)
for segment_id, segment in self.id_to_segment.items():
self.id_to_device_resource[
segment_id] = segment.get_device_resource(
site_types, device_resources)
# Verify initial input makes sense.
self.check_trees()
def segment_for_id(self, segment_id):
""" Get routing segment based on the object id of the routing segment. """
return self.id_to_segment[segment_id]
def normalize_tree(self):
""" Normalize the routing tree by sorted element. """
sort_branches(self.stubs)
sort_branches(self.sources)
for stub in self.stubs:
for branch in yield_branches(stub):
sort_branches(branch.branches)
for source in self.sources:
for branch in yield_branches(source):
sort_branches(branch.branches)
def get_tuple_tree(self):
""" Get tuple tree representation of the current routing tree.
This is suitable for equality checking if normalized with
normalize_tree.
"""
return (tuple(get_tuple_tree(stub) for stub in self.stubs),
tuple(get_tuple_tree(source) for source in self.sources))
def get_device_resource_for_id(self, segment_id):
""" Get the device resource that corresponds to the segment id given. """
return self.id_to_device_resource[segment_id]
def get_device_resource(self, segment):
""" Get the device resource that corresponds to the segment given. """
return self.id_to_device_resource[id(segment)]
def check_trees(self):
""" Check that the routing tree at and below obj is valid.
This method should be called after all route segments have been added
to the node cache.
"""
for stub in self.stubs:
check_tree(self, stub)
for source in self.sources:
assert self.get_device_resource(source).is_root(), source
check_tree(self, source)
def connections_for_segment_id(self, segment_id):
""" Yield all connection resources connected to segment id given. """
resource = self.id_to_device_resource[segment_id]
for site_wire in resource.site_wires():
yield site_wire
for node in resource.nodes():
yield node
def build_connections(self):
""" Create a dictionary of connection resources to segment ids. """
self.connections = {}
for segment_id in self.id_to_segment.keys():
for connection in self.connections_for_segment_id(segment_id):
if connection not in self.connections:
self.connections[connection] = set()
self.connections[connection].add(segment_id)
def get_connection(self, connection_resource):
""" Get list of segment ids connected to connection_resource. """
if self.connections is None:
self.build_connections()
return self.connections[connection_resource]
def reroot(self):
""" Determine which routing segments are roots and non-roots.
Repopulates stubs and sources list with new roots and non-root
segments.
"""
if self.connections is None:
self.build_connections()
segments = self.stubs + self.sources
self.stubs.clear()
self.sources.clear()
source_segment_ids = set()
# Example each connection and find the best root.
for segment_ids in self.connections.values():
root_priority = None
root = None
root_count = 0
for segment_id in segment_ids:
resource = self.get_device_resource_for_id(segment_id)
if resource.is_root():
possible_root_priority = resource.root_priority()
if root is None:
root_priority = possible_root_priority
root = segment_id
root_count = 1
elif possible_root_priority < ro |
googleapis/python-pubsublite | samples/generated_samples/pubsublite_v1_generated_topic_stats_service_compute_time_cursor_async.py | Python | apache-2.0 | 1,540 | 0.000649 | # -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDIT | IONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT | EDIT!
#
# Snippet for ComputeTimeCursor
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-pubsublite
# [START pubsublite_v1_generated_TopicStatsService_ComputeTimeCursor_async]
from google.cloud import pubsublite_v1
async def sample_compute_time_cursor():
# Create a client
client = pubsublite_v1.TopicStatsServiceAsyncClient()
# Initialize request argument(s)
request = pubsublite_v1.ComputeTimeCursorRequest(
topic="topic_value",
partition=986,
)
# Make the request
response = await client.compute_time_cursor(request=request)
# Handle the response
print(response)
# [END pubsublite_v1_generated_TopicStatsService_ComputeTimeCursor_async]
|
thinkopensolutions/tko-addons | tko_br_delivery_sale_stock/models/sale.py | Python | agpl-3.0 | 1,287 | 0.004662 |
from odoo import models, api, _
from odoo.exceptions import UserError
class SaleOrder(models.Model):
_inherit ='sale.order'
@api.multi
def set_delivery_line(self):
# Remove delivery products from the sales order
self._remove_delivery_line()
for order in self:
if order.state not in ('draft', 'sent'):
raise UserError(_('You can add delivery price only on unconfirmed quotations.'))
elif not order.carrier_id:
raise UserError(_('No carrier set for this order.'))
elif not order.delivery_rating_succ | ess:
raise UserError(_('Please use "Check price" in order to compute a shipping price for this quotation.'))
else:
price_unit = order.carrier_id.rate_shipment(order)['price']
# TODO check whether it is safe to use d | elivery_price here
final_price = price_unit * (1.0 + (float(self.carrier_id.margin) / 100.0))
# order._create_delivery_line(carrier, final_price)
# set price in total_frete field and compute total again
order.total_frete = final_price
order.delivery_price = final_price
order._amount_all()
return True |
iEngage/python-sdk | iengage_client/models/user.py | Python | apache-2.0 | 9,891 | 0.000303 | # coding: utf-8
"""
Stakeholder engagement API
This API enables Intelligent Engagement for your Business. iEngage is a platform that combines process, augmented intelligence and rewards to help you intelligently engage customers.
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class User(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, user_id=None, first_name=None, last_name=None, email_id=None, profile_image=None, has_interest_updated=False, birth_date=None, access_token=None, current_user_following=False, current_user_friend=False, equity_score=None, extra_data=None):
"""
User - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and t | he value is json key in definition.
"""
self.swagger_types = {
'user_id': 'int',
'first_name': 'str',
'last_name': 'str',
'email_id': 'str',
| 'profile_image': 'str',
'has_interest_updated': 'bool',
'birth_date': 'datetime',
'access_token': 'str',
'current_user_following': 'bool',
'current_user_friend': 'bool',
'equity_score': 'int',
'extra_data': 'str'
}
self.attribute_map = {
'user_id': 'userId',
'first_name': 'firstName',
'last_name': 'lastName',
'email_id': 'emailId',
'profile_image': 'profileImage',
'has_interest_updated': 'hasInterestUpdated',
'birth_date': 'birthDate',
'access_token': 'accessToken',
'current_user_following': 'currentUserFollowing',
'current_user_friend': 'currentUserFriend',
'equity_score': 'equityScore',
'extra_data': 'extraData'
}
self._user_id = user_id
self._first_name = first_name
self._last_name = last_name
self._email_id = email_id
self._profile_image = profile_image
self._has_interest_updated = has_interest_updated
self._birth_date = birth_date
self._access_token = access_token
self._current_user_following = current_user_following
self._current_user_friend = current_user_friend
self._equity_score = equity_score
self._extra_data = extra_data
@property
def user_id(self):
"""
Gets the user_id of this User.
:return: The user_id of this User.
:rtype: int
"""
return self._user_id
@user_id.setter
def user_id(self, user_id):
"""
Sets the user_id of this User.
:param user_id: The user_id of this User.
:type: int
"""
self._user_id = user_id
@property
def first_name(self):
"""
Gets the first_name of this User.
:return: The first_name of this User.
:rtype: str
"""
return self._first_name
@first_name.setter
def first_name(self, first_name):
"""
Sets the first_name of this User.
:param first_name: The first_name of this User.
:type: str
"""
self._first_name = first_name
@property
def last_name(self):
"""
Gets the last_name of this User.
:return: The last_name of this User.
:rtype: str
"""
return self._last_name
@last_name.setter
def last_name(self, last_name):
"""
Sets the last_name of this User.
:param last_name: The last_name of this User.
:type: str
"""
self._last_name = last_name
@property
def email_id(self):
"""
Gets the email_id of this User.
:return: The email_id of this User.
:rtype: str
"""
return self._email_id
@email_id.setter
def email_id(self, email_id):
"""
Sets the email_id of this User.
:param email_id: The email_id of this User.
:type: str
"""
self._email_id = email_id
@property
def profile_image(self):
"""
Gets the profile_image of this User.
:return: The profile_image of this User.
:rtype: str
"""
return self._profile_image
@profile_image.setter
def profile_image(self, profile_image):
"""
Sets the profile_image of this User.
:param profile_image: The profile_image of this User.
:type: str
"""
self._profile_image = profile_image
@property
def has_interest_updated(self):
"""
Gets the has_interest_updated of this User.
:return: The has_interest_updated of this User.
:rtype: bool
"""
return self._has_interest_updated
@has_interest_updated.setter
def has_interest_updated(self, has_interest_updated):
"""
Sets the has_interest_updated of this User.
:param has_interest_updated: The has_interest_updated of this User.
:type: bool
"""
self._has_interest_updated = has_interest_updated
@property
def birth_date(self):
"""
Gets the birth_date of this User.
:return: The birth_date of this User.
:rtype: datetime
"""
return self._birth_date
@birth_date.setter
def birth_date(self, birth_date):
"""
Sets the birth_date of this User.
:param birth_date: The birth_date of this User.
:type: datetime
"""
self._birth_date = birth_date
@property
def access_token(self):
"""
Gets the access_token of this User.
:return: The access_token of this User.
:rtype: str
"""
return self._access_token
@access_token.setter
def access_token(self, access_token):
"""
Sets the access_token of this User.
:param access_token: The access_token of this User.
:type: str
"""
self._access_token = access_token
@property
def current_user_following(self):
"""
Gets the current_user_following of this User.
:return: The current_user_following of this User.
:rtype: bool
"""
return self._current_user_following
@current_user_following.setter
def current_user_following(self, current_user_following):
"""
Sets the current_user_following of this User.
:param current_user_following: The current_user_following of this User.
:type: bool
"""
self._current_user_following = current_user_following
@property
def current_user_friend(self):
"""
Gets the current_user_friend of this User.
:return: The current_user_friend of this User.
:rtype: bool
"""
return self._current_user_friend
@current_user_friend.setter
def current_user_friend(self, current_user_friend):
"""
Sets the current_user_friend of this User.
:param current_user_friend: The current_user_friend of this User.
:type: bool
"""
self._current_user_friend = current_user_friend
@property
def equity_score(self):
"""
Gets the equity_score of this User.
:return: The equity_score of this User.
:rtype: int
"""
return self._equity_score
@equity_score.setter
def equity_score(self, equity_score):
"""
Sets the equity_score of this User.
:param equity_score: The equity_score of this User.
:type: int
"""
self._equity_score = equity_score
@property
def extra_data(self):
"""
Gets the extra_data of this User.
:return: The |
imoan1983/medlist | src/downloadFiles.py | Python | apache-2.0 | 1,924 | 0.010915 | # -*- coding: utf-8 -*-
import datetime
import os
import urllib.request
import re
def getUrlList(url):
ret = []
baseUrl = re.findall('(http://.+?)/',url)[0]
httpRequest = urllib.request.Request(url)
httpResponse = urllib.request.urlopen(httpRequest)
html = httpResponse.read().decode('utf-8').split('>')
for h in html:
#find link for pdf like <a href='*.pdf'>*</a> also .xls, xlsx, .zip
u = re.findall('<a href="(.+\.)(pdf|xls|xlsx|zip)"', h)
if len(u) > 0:
ret.append(baseUrl + u[0][0] + u[0][1])
return ret
def downloadFiles_exe(url, downloadPath):
try:
urllib.request.urlretrieve(url,downloadPath)
print('[ok] %s' % url)
except:
print('[ng] %s' % url)
return
def downloadFiles(url, downloadDir):
print('[start] %s' % url)
ul = getUrlList(url)
for u in ul:
f = re.findall('http://.+/(.+\.)(pdf|xls|xlsx|zip)',u)[0]
downloadFiles_exe(u, os.path.join(downloadDir, f[0]+f[1]))
return
def main(argv):
if len(argv) == 3:
urlList = argv[1]
downloadDir = argv[2]
else:
| f = open('win.ini','r')
s = f.read().split('\n')
urlList = s[0]
downloadDir = s[1]
f.close()
today = datetime.datetime.today().strftime('%Y%m%d')
#./dl/yyyyMMdd/
os.mkdir(os.path.join(downloadDir, today))
for line in open(urlList,'r'):
#tag \t url
l = line.split('\t')
os.mkdir(os.path.join(downloadDir, today, l[0]))
downloadFiles(l[1], os.path.join(downloadDir, today, l[0]))
|
print(os.path.join(downloadDir, today))
return
if __name__ == "__main__":
import sys
#python3 donwloadFiles.py ../etc/urlList.tsv ../dl/
main(sys.argv)
|
twisted/mantissa | xmantissa/test/historic/stub_organizer2to3.py | Python | mit | 258 | 0.007752 | from axiom.test.historic.stubloader import saveStub
from | axiom.dependency import installOn
from xmantissa.people import Organizer
def createDatabase(s):
installOn(Organizer(store=s), s)
if __name__ == '__main__':
| saveStub(createDatabase, 13142)
|
london-escience/libhpc-cf | libhpc/component/bio.py | Python | bsd-3-clause | 13,873 | 0.008073 | # Copyright (c) 2015, Imperial College London
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the names of the copyright holders nor the names of their
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This file is part of the libhpc-cf Coordination Forms library that has been
# developed as part of the libhpc projects
# (http://www.imperial.ac.uk/lesc/projects/libhpc).
#
# We gratefully acknowledge the Engineering and Physical Sciences Research
# Council (EPSRC) for their support of the projects:
# - libhpc: Intelligent Component-based Development of HPC Applications
# (EP/I030239/1).
# - libhpc Stage II: A Long-term Solution for the Usability, Maintainability
# and Sustainability of HPC Software (EP/K038788/1).
from libhpc.cf.params import Parameter
from libhpc.cf.component import Component
# COMPONENT PARAMETER DEFINITIONS
fastq_split_input = Parameter('fastq_split_input', 'string', 'input', False)
fastq_split_output1 = Parameter('fastq_split_output1', 'string', 'inout', False)
fastq_split_output2 = Parameter('fastq_split_output2', 'string', 'inout', False)
bwa_index_param1 = Parameter('ref_genome_param', 'string', 'input', False)
bwa_index_output_file = Parameter('ref_genome_param', 'string', 'input', False)
bwa_index_result = Parameter('ref_genome_status', 'int', 'output', True)
bwa_aln_ref_genome = Parameter('ref_genome_param', 'string', 'input', False)
bwa_aln_short_read = Parameter('short_read_file', 'string', 'input', False)
bwa_aln_output_file = Parameter('output_file', 'string', 'inout', False)
bwa_aln_result = Parameter('bwa_aln_status', 'int', 'output', True)
bwa_sampe_param1 = Parameter('ref_genome_param', 'string', 'input', False)
bwa_sampe_param2 = Parameter('short_read_indexes_param', 'list', 'input', False)
bwa_sampe_param3 = Parameter('short_read_file_param', 'list', 'input', False)
bwa_sampe_output_file = Parameter('sam_output_file', 'string', 'output', False)
bwa_sampe_result = Parameter('bwa_sample_status', 'int', 'output', True)
samtools_import_param1 = Parameter('ref_genome_param', 'string', 'input', False)
samtools_import_param2 = Parameter('sam_file_param', 'string', 'input', False)
samtools_import_output = Parameter('bam_file_param', 'string', 'inout', False)
samtools_import_result = Parameter('samtools_import_status', 'int', 'output', True)
samtools_sort_baminput = Parameter('samtools_bam_input_param', 'string', 'input', False)
samtools_sort_sortedoutput = Parameter('samtools_sorted_output_param', 'string', 'output', False)
samtools_sort_result = Parameter('samtools_sort_status', 'int', 'output', True)
samtools_index_input = Parameter('samtools_index_input_param', 'string', 'input', False)
samtools_index_output = Parameter('samtools_index_output_param', 'string', 'output', False)
samtools_index_result = Parameter('samtools_index_status', 'string', 'output', True)
samtools_faidx_input = Parameter('samtools_faidx_input_param', 'string', 'input', False)
samtools_faidx_output = Parameter(' | samtools_faidx_output_param', 'string', 'output', False)
samtools_faidx_result = Parameter('samtools_faidx_status', 'string', 'output', True)
samtools_mpileup_input = Parameter('samtools_mpileup_input_param', 'string', 'input', False)
samt | ools_mpileup_output = Parameter('samtools_mpileup_output_param', 'string', 'inout', False)
samtools_mpileup_result = Parameter('samtools_mpileup_status', 'string', 'output', True)
samtools_bcf2vcf_input = Parameter('samtools_bcf2vcf_input_param', 'string', 'input', False)
samtools_bcf2vcf_output = Parameter('samtools_bcf2vcf_output_param', 'string', 'inout', False)
samtools_bcf2vcf_result = Parameter('samtools_bcf2vcf_status', 'string', 'output', True)
picard_remove_duplicates_input = Parameter('picard_remove_duplicates_input', 'string', 'input', False)
picard_remove_duplicates_output = Parameter('picard_remove_duplicates_output', 'string', 'inout', False)
picard_remove_duplicates_metrics = Parameter('picard_remove_duplicates_metrics', 'string', 'inout', False)
picard_remove_duplicates_result = Parameter('picard_remove_duplicates_result', 'int', 'output', True)
picard_add_read_groups_baminput = Parameter('add_read_groups_input_param', 'string', 'input', False)
picard_add_read_groups_rgidinput = Parameter('add_read_groups_rgid_param', 'string', 'input', False)
picard_add_read_groups_rglbinput = Parameter('add_read_groups_rglb_param', 'string', 'input', False)
picard_add_read_groups_rgplinput = Parameter('add_read_groups_rgpl_param', 'string', 'input', False)
picard_add_read_groups_rgpuinput = Parameter('add_read_groups_rgpu_param', 'string', 'input', False)
picard_add_read_groups_rgsminput = Parameter('add_read_groups_rgsm_param', 'string', 'input', False)
#picard_add_read_groups_result = Parameter('add_read_groups_status', 'int', 'output', False)
picard_add_read_groups_output = Parameter('add_read_groups_output_param', 'string', 'output', False)
picard_merge_sam_input = Parameter('merge_sam_input_file_list', 'list', 'input', False)
picard_merge_sam_output = Parameter('merge_sam_output_file', 'string', 'output', False)
picard_merge_sam_status = Parameter('merge_sam_status', 'int', 'output', True)
picard_create_dictionary_ref_input = Parameter('picard_create_dict_ref_input', 'string', 'input', False)
picard_create_dictionary_output = Parameter('picard_create_dict_file', 'string', 'inout', False)
picard_create_dictionary_status = Parameter('picard_create_dict_status', 'int', 'output', True)
picard_build_bam_index_input = Parameter('picard_build_bam_index_input_param', 'string', 'input', False)
picard_build_bam_index_output = Parameter('picard_build_bam_index_output_param', 'string', 'output', False)
picard_build_bam_index_result = Parameter('picard_build_bam_index_status', 'string', 'output', True)
picard_check_reference_input = Parameter('ref_genome_param', 'string', 'input', False)
picard_check_reference_output = Parameter('new_ref_genome_param', 'string', 'inout', False)
gatk_indel_targets_ref_input = Parameter('gatk_indel_targets_ref_input', 'string', 'input', False)
gatk_indel_targets_bam_input = Parameter('gatk_indel_targets_bam_input', 'string', 'input', False)
gatk_indel_targets_output = Parameter('gatk_indel_targets_output', 'string', 'inout', False)
gatk_indel_targets_status = Parameter('gatk_indel_targets_status', 'string', 'output', True)
gatk_indel_realigner_ref_input = Parameter('gatk_realigner_ref_input', 'string', 'input', False)
gatk_indel_realigner_bam_input = Parameter('gatk_realigner_bam_input', 'string', 'input', False)
gatk_indel_realigner_interval_input = Parameter('gatk_realigner_interval_input', 'string', 'input', False)
gatk_indel_realigner_output = Parameter('gatk_realigner_output', 'string', 'inout', |
elainenaomi/sciwonc-dataflow-examples | sbbd2016/experiments/1-postgres/3_workflow_full_10files_primary_nosh_nors_annot_with_proj_3s/pegasus.bDkvI/pegasus-4.6.0/lib/python2.7/dist-packages/Pegasus/service/dashboard/dashboard.py | Python | gpl-3.0 | 20,107 | 0.005719 | # Copyright 2007-2012 University Of Southern California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = "Rajiv Mayani"
from time import localtime, strftime
from sqlalchemy.orm.exc import NoResultFound
from Pegasus.tools import utils
from Pegasus.plots_stats import utils as stats_utils
from Pegasus.db.workflow import stampede_statistics
from Pegasus.service.dashboard import queries
class NoWorkflowsFoundError(Exception):
def __init__(self, **args):
if 'count' in args:
self.count = args['count']
else:
self.count = 0
if 'filtered' in args:
self.filtered = args['filtered']
class Dashboard(object):
def __init__(self, main_db_url, root_wf_id=None, wf_id=None):
self._main_db_url = main_db_url
# If the ID is specified, it means that the query is specific to a workflow.
# So we will now query the main database to get the connection URL for the workflow.
if root_wf_id or wf_id:
self.initialize(root_wf_id, wf_id)
def initialize(self, root_wf_id, wf_id):
try:
workflow = queries.MainDatabase(self._main_db_url)
self._db_id, self._root_wf_uuid, self._wf_db_url = workflow.get_wf_id_url(root_wf_id)
self._wf_id = wf_id
finally:
Dashboard.close(workflow)
@staticmethod
def close(conn):
if conn:
conn.close()
def __get_wf_db_url(self):
if not self._wf_db_url:
raise ValueError, 'workflow database URL is not set'
return self._wf_db_url
def get_root_workflow_list(self, counts_only=False, **table_args):
"""
Get basic information about all workflows running, on all databases. This is for the index page.
Returns a list of workflows.
"""
self._workflows = []
# Now, let's try to access the database
try:
all_workflows = None
all_workflows = queries.MainDatabase(self._main_db_url)
counts = all_workflows.get_workflow_counts()
if counts_only:
if counts[0] == 0:
raise NoWorkflowsFoundError(count=None, filtered=None)
return counts
count, filtered, workflows = all_workflows.get_all_workflows(**table_args)
if workflows:
self._workflows.extend(workflows)
if len(self._workflows) == 0:
# Throw no workflows found error.
raise NoWorkflowsFoundError(count=count, filtered=filtered)
return(count, filtered, self._workflows, counts)
finally:
D | ashboard.close(all_workflows)
def workflow_stats(self):
try:
workflow = stampede_statistics.StampedeStatistics(self.__get_wf_db_url(), False)
workflow.initialize(root_wf_id = self._wf_id)
individual_stats = self._workflow_stat | s(workflow)
workflow2 = stampede_statistics.StampedeStatistics(self.__get_wf_db_url())
workflow2.initialize(self._root_wf_uuid)
all_stats = self._workflow_stats(workflow2)
return { 'individual' : individual_stats, 'all' : all_stats }
finally:
Dashboard.close(workflow)
Dashboard.close(workflow2)
def _workflow_stats(self, workflow):
# tasks
tasks = {}
workflow.set_job_filter('nonsub')
tasks['total_tasks'] = int(workflow.get_total_tasks_status())
tasks['total_succeeded_tasks'] = int(workflow.get_total_succeeded_tasks_status(False))
tasks['total_failed_tasks'] = int(workflow.get_total_failed_tasks_status())
tasks['total_unsubmitted_tasks'] = tasks['total_tasks'] -(tasks['total_succeeded_tasks'] + tasks['total_failed_tasks'])
tasks['total_task_retries'] = int(workflow.get_total_tasks_retries())
tasks['total_task_invocations'] = tasks['total_succeeded_tasks'] + tasks['total_failed_tasks'] + tasks['total_task_retries']
# job status
jobs = {}
workflow.set_job_filter('nonsub')
jobs['total_jobs'] = int(workflow.get_total_jobs_status())
jobs['total_succeeded_jobs'] = int(workflow.get_total_succeeded_jobs_status())
jobs['total_failed_jobs'] = int(workflow.get_total_failed_jobs_status())
jobs['total_unsubmitted_jobs'] = jobs['total_jobs'] -(jobs['total_succeeded_jobs'] + jobs['total_failed_jobs'])
jobs['total_job_retries'] = int(workflow.get_total_jobs_retries())
jobs['total_job_invocations'] = jobs['total_succeeded_jobs'] + jobs['total_failed_jobs'] + jobs['total_job_retries']
# sub workflow
wfs = {}
workflow.set_job_filter('subwf')
wfs['total_sub_wfs'] = int(workflow.get_total_jobs_status())
wfs['total_succeeded_sub_wfs'] = int(workflow.get_total_succeeded_jobs_status())
wfs['total_failed_sub_wfs'] = int(workflow.get_total_failed_jobs_status())
wfs['total_unsubmitted_sub_wfs'] = wfs['total_sub_wfs'] -(wfs['total_succeeded_sub_wfs'] + wfs['total_failed_sub_wfs'])
wfs['total_sub_wfs_retries'] = int(workflow.get_total_jobs_retries())
wfs['total_sub_wfs_invocations'] = wfs['total_succeeded_sub_wfs'] + wfs['total_failed_sub_wfs'] + wfs['total_sub_wfs_retries']
return [tasks, jobs, wfs]
def job_breakdown_stats(self):
try:
workflow = stampede_statistics.StampedeStatistics(self.__get_wf_db_url(), True)
workflow.initialize(root_wf_id = self._wf_id)
content = []
for t in workflow.get_transformation_statistics():
content.append([t.transformation, int(t.count), int(t.success),
int(t.failure), float(t.min), float(t.max), float(t.avg), float(t.sum)])
return content
finally:
Dashboard.close(workflow)
def job_stats(self):
try:
workflow = stampede_statistics.StampedeStatistics(self.__get_wf_db_url(), False)
workflow.initialize(root_wf_id = self._wf_id)
workflow.set_job_filter('all')
job_retry_count_dict = {}
content = []
for job in workflow.get_job_statistics():
kickstart = '0' if job.kickstart == None else float(job.kickstart)
multiplier_factor = '0' if job.multiplier_factor == None else int(job.multiplier_factor)
kickstart_multi = '0' if job.kickstart_multi == None else float(job.kickstart_multi)
remote_cpu_time = '0' if job.remote_cpu_time == None else float(job.remote_cpu_time)
post_time = '0' if job.post_time == None else float(job.post_time)
condor_q_time = '0' if job.condor_q_time == None else float(job.condor_q_time)
resource_delay = '0' if job.resource_delay == None else float(job.resource_delay)
runtime = '0' if job.runtime == None else float(job.runtime)
seqexec = '-' if job.seqexec == None else float(job.seqexec)
seqexec_delay = '-'
if job.seqexec is not None and job.kickstart is not None:
seqexec_delay =(float(job.seqexec) - float(job.kickstart))
if job_retry_count_dict.has_key(job.job_name):
job_retry_count_dict[job.job_name] += 1
else:
job_retry_count_dict[job.job_name] = 1
retry_count = job_retry_count_dict[job.job |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.