repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
looooo/paraBEM | examples/tests/test_parallel.py | Python | gpl-3.0 | 359 | 0 | import paraBEM
from paraBEM im | port | pan3d
from paraBEM.mesh import mesh_object
mesh = mesh_object.from_OBJ("../mesh/box_minimal.obj")
case = pan3d.DirichletDoublet0Case3(mesh.panels)
case.v_inf = paraBEM.Vector3(1, 0, 0)
a = case.panels[0]
b = case.panels[1]
print(a.center, " ", a.n)
print(b.center, " ", b.n)
print(pan3d.doublet_3_0_vsaero(a.center, b))
|
jpinsonault/learning_machine | tests.py | Python | mit | 9,652 | 0.001554 | import unittest
from Computer import Computer
class TestInstructions(unittest.TestCase):
def setUp(self):
self.computer = Computer()
def test_load(self):
self.computer.load_program([['load', [2, '1']]])
self.computer.run_program()
self.failUnless(self.computer.memory['1'].value == 2)
def test_move(self):
self.computer.load_program([
['load', [2, '1']],
['move', ['1', '2']]
])
self.computer.run_program()
self.failUnless(self.computer.memory['2'].value == 2)
def test_dec(self):
self.computer.load_program([
['load', [2, '1']],
['dec', ['1']],
['dec', ['1']],
['dec', ['1']],
])
self.computer.run_program()
self.failUnless(self.computer.memory['1'].value == -1)
def test_inc(self):
self.computer.load_program([
['load', [2, '1']],
['inc', ['1']],
['inc', ['1']],
['inc', ['1']],
])
self.computer.run_program()
self.failUnless(self.computer.memory['1'].value == 5)
def test_clear(self):
self.computer.load_program([
['load', [2, '1']],
['clear', ['1']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 0)
def test_add(self):
self.computer.load_program([
['load', [2, '1']],
['add', [1, '1', '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['2'].value, 3)
self.failUnlessEqual(self.computer.memory['1'].value, 2)
def test_multiply(self):
self.computer.load_program([
['load', [2, '1']],
['multiply', [3, '1', '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['2'].value, 6)
self.failUnlessEqual(self.computer.memory['1'].value, 2)
def test_divide_no_remainder(self):
self.computer.load_program([
['load', [6, '1']],
['divide', ['1', 2, '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['2'].value, 3)
self.failUnlessEqual(self.computer.memory['1'].value, 6)
def test_divide_with_remainder(self):
self.computer.load_program([
['load', [7, '1']],
['divide', ['1', 2, '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['2'].value, 3)
self.failUnlessEqual(self.computer.memory['1'].value, 7)
def test_shift_left(self):
self.computer.load_program([
['load', [6, '1']],
['shift_left', ['1']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 12)
def test_shift_right(self):
self.computer.load_program([
['load', [6, '1']],
['shift_right', ['1']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 3)
def test_bit_or(self):
self.computer.load_program([
['load', [0b010101, '1']],
['load', [0b011111, '2']],
['bit_or', ['1', '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 0b011111)
def test_bit_and(self):
self.computer.load_program([
['load', [0b010101, '1']],
['load', [0b011111, '2']],
['bit_and', ['1', '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 0b010101)
def test_complement(self):
self.computer.load_program([
['load', [0b010101, '1']],
['complement', ['1']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, ~0b010101)
def test_bit_xor(self):
self.computer.load_program([
['load', [0b010101, '1']],
['load', [0b011111, '2']],
['bit_xor', ['1', '2']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['1'].value, 0b010101 ^ 0b011111)
def test_jump(self):
self.computer.load_program([
['jump', [2]],
['load', [1, '2']], # Should never run
['load', [2, '3']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['3'].value, 2)
self.failIfEqual(self.computer.memory['2'].value, 1)
def test_jump_if_neg_on_negative(self):
self.computer.load_program([
['load', [-1, '1']],
['jump_if_neg', ['1', 2]],
['load', [1, '2']], # Should never run
['load', [2, '3']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['3'].value, 2)
self.failIfEqual(self.computer.memory['2'].value, 1)
def test_jump_if_neg_on_positive(self):
self.computer.load_program([
['load', [5, '1']],
['jump_if_neg', ['1', 2]],
['load', [1, '2']], # Should run
['load', [2, '3']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['3'].value, 2)
self.failUnlessEqual(self.computer.memory['2'].value, 1)
def test_jump_i | f_pos_on_positive(self):
self.computer.load_program([
['load', [5, '1']],
['jump_if_pos', ['1', 2]],
['load', [1, '2']], # Should never | run
['load', [2, '3']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['3'].value, 2)
self.failIfEqual(self.computer.memory['2'].value, 1)
def test_jump_if_pos_on_negative(self):
self.computer.load_program([
['load', [-1, '1']],
['jump_if_pos', ['1', 2]],
['load', [1, '2']], # Should run
['load', [2, '3']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['3'].value, 2)
self.failUnlessEqual(self.computer.memory['2'].value, 1)
def test_jump_if_eq_on_equal(self):
self.computer.load_program([
['load', [5, '1']],
['load', [5, '2']],
['jump_if_eq', ['1', '2', 2]],
['load', [1, '3']], # Should never run
['load', [2, '4']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['4'].value, 2)
self.failIfEqual(self.computer.memory['3'].value, 1)
def test_jump_if_eq_on_not_equal(self):
self.computer.load_program([
['load', [5, '1']],
['load', [4, '2']],
['jump_if_eq', ['1', '2', 2]],
['load', [1, '3']], # Should run
['load', [2, '4']],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['4'].value, 2)
self.failUnlessEqual(self.computer.memory['3'].value, 1)
def test_push(self):
self.computer.load_program([
['load', [5, '1']],
['push', ['1']],
['push', [1]],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.stack[0], 5)
self.failUnlessEqual(self.computer.stack[1], 1)
def test_pop_with_non_empty_stack(self):
self.computer.load_program([
['load', [5, '1']],
['push', ['1']],
['push', [1]],
['pop', "3"],
['pop', "4"],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['4'].value, 5)
self.failUnlessEqual(self.computer.memory['3'].value, 1)
def test_pop_with_empty_stack(self):
self.computer.load_program([
['pop', "3"],
['pop', "4"],
])
self.computer.run_program()
self.failUnlessEqual(self.computer.memory['4'].value, 0)
|
bmispelon/django-formtags | tests/forms.py | Python | mit | 388 | 0 | from django import forms
def dummy_validator(value):
if value == 'invalid':
r | aise forms.ValidationError('invalid')
return value
class TestForm(forms.Form):
foo = forms.CharField(required=False, validators=[dummy_validator])
bar = forms.CharField(help_text='help bar', required=False)
baz = forms.C | harField(label='<baz>', help_text='<baz>', required=False)
|
tschalch/pyTray | src/dataStructures/tray_item.py | Python | bsd-3-clause | 847 | 0.005903 | import dataStructures
import logging, os
log = logging.getLogger("tray_item")
log.setLevel(logging.WARN)
class TrayItem:
"""
Parent Class for all items in a tray.
"""
def __init__(self):
self.selected = False
self.changed = False
dataStructures.changingItems.append(self)
self.fiel | ds = []
def SetSelected(self, value):
self.selected = value
def SetChanged(self, state):
self.changed = state
if state:
#import traceback
#traceback.print_stack()
log.debug("TrayItem change registered for %s", self.eleme | nt)
def Clone(self):
clone = TrayItem()
clone.selected = self.selected
clone.data = self.data.copy()
clone.fields = self.fields
return clone
|
wuxue/altanalyze | genmappDBs.py | Python | apache-2.0 | 3,609 | 0.030202 | ###genmapp
#Copyright 2005-2008 J. David Gladstone Institutes, San Francisco California
#Author Nathan Salomonis - nsalomonis@gmail.com
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#THE SOFTWARE IS | PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
#INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
#PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
#HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHE | THER IN AN ACTION
#OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
#SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import sys, string
import math
import os.path
import unique
import copy
import time
import export
def filepath(filename):
fn = unique.filepath(filename)
return fn
def read_directory(sub_dir):
dir_list = unique.read_directory(sub_dir)
return dir_list
def makeUnique(item):
db1={}; list1=[]; k=0
for i in item:
try: db1[i]=[]
except TypeError: db1[tuple(i)]=[]; k=1
for i in db1:
if k==0: list1.append(i)
else: list1.append(list(i))
list1.sort()
return list1
def cleanUpLine(line):
line = string.replace(line,'\n','')
line = string.replace(line,'\c','')
data = string.replace(line,'\r','')
data = string.replace(data,'"','')
return data
def getAnnotations(species):
exportChromosomeStrandCoordinates(species)
def exportChromosomeStrandCoordinates(species):
import EnsemblImport
gene_location_db = EnsemblImport.getEnsemblGeneLocations(species,'RNASeq','key_by_array')
import ExpressionBuilder
gene_biotype_db = ExpressionBuilder.importTranscriptBiotypeAnnotations(species)
export_path = 'GenMAPPDBs/'+species+'/chr_gene_locations.txt'
export_data = export.ExportFile(export_path)
import ExonAnalyze_module
gene_annotation_file = "AltDatabase/ensembl/"+species+"/"+species+"_Ensembl-annotations.txt"
annotate_db = ExonAnalyze_module.import_annotations(gene_annotation_file,'RNASeq')
print 'Annotations for',len(gene_location_db),'genes imported'
sorted_list=[]; protein_coding=0
for gene in gene_location_db:
chr,strand,start,end = gene_location_db[gene]
if gene in gene_biotype_db:
biotype = gene_biotype_db[gene][-1]
if biotype == 'protein_coding': protein_coding+=1
else: biotype = 'NA'
if len(chr)<7:
sorted_list.append([chr,strand,int(start),int(end),gene,biotype])
#else: print chr;sys.exit()
print len(sorted_list),'genes for typical chromosomes present'
print protein_coding, 'protein coding genes present'
sorted_list.sort()
for values in sorted_list:
chr,strand,start,end,gene,biotype=values
try: symbol = annotate_db[gene].Symbol()
except Exception: symbol = ''
values = [gene,symbol,chr,strand,str(start),str(end),biotype]
export_data.write(string.join(values,'\t')+'\n')
export_data.close()
print species, 'chromosome locations exported to:\n',export_path
if __name__ == '__main__':
getAnnotations('Sc')
|
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/ipython-0.8.2-py2.5.egg/IPython/ColorANSI.py | Python | bsd-3-clause | 6,449 | 0.010699 | # -*- coding: utf-8 -*-
"""Tools for coloring text in ANSI terminals.
$Id: ColorANSI.py 2167 2007-03-21 06:57:50Z fperez $"""
#*****************************************************************************
# Copyright (C) 2002-2006 Fernando Perez. <fperez@colorado.edu>
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#*****************************************************************************
from IPython import Release
__author__ = '%s <%s>' % Release.authors['Fernando']
__license__ = Release.license
__all__ = ['TermColors','InputTermColors','ColorScheme','ColorSchemeTable']
import os
from IPython.ipstruct import Struct
def make_color_table(in_class):
"""Build a set of color attributes in a class.
Helper function for building the *TermColors classes."""
color_templates = (
("Black" , "0;30"),
("Red" , "0;31"),
("Green" , "0;32"),
("Brown" , "0;33"),
("Blue" , "0;34"),
("Purple" , "0;35"),
("Cyan" , "0;36"),
("LightGray" , "0;37"),
("DarkGray" , "1;30"),
("LightRed" , "1;31"),
("LightGreen" , "1;32"),
("Yellow" , "1;33"),
("LightBlue" , "1;34"),
("LightPurple" , "1;35"),
("LightCyan" , "1;36"),
("White" , "1;37"), )
for name,value in color_templates:
setattr(in_class,name,in_class._base % value)
class TermColors:
"""Color escape sequences.
This class defines the escape sequences for all the standard (ANSI?)
colors in terminals. Also defines a NoColor escape which is just the null
string, suitable for defining 'dummy' color schemes in terminals which get
confused by color escapes.
This class should be used as a mixin for building color schemes."""
NoColor = '' # for color schemes in color-less terminals.
Normal = '\033[0m' # Reset normal coloring
_base = '\033[%sm' # Template for all other colors
# Build the actual color table as a set of class attributes:
make_color_table(TermColors)
class InputTermColors:
"""Color escape sequences for input prompts.
This class is similar to TermColors, but the escapes are wrapped in \001
and \002 so that readline can properly know the length of each line and
can wrap lines accordingly. Use this class for any colored text which
needs to be used in input prompts, such as in calls to raw_input().
This class defines the escape sequences for all the standard (ANSI?)
colors in terminals. Also defines a NoColor escape which is just the null
string, suitable for defining 'dummy' color schemes in terminals which get
confused by color escapes.
This class should be used as a mixin for building color schemes."""
NoColor = '' # for color schemes in color-less terminals.
if os.name == 'nt' and os.environ.get('TERM','dumb') == 'emacs':
# (X)emacs on W32 gets confused with \001 and \002 so we remove them
Normal = '\033[0m' # Reset normal coloring
_base = '\033[%sm' # Template for all other colors
else:
Normal = '\001\033[0m\002' # Reset normal coloring
_base = '\001\033[%sm\002' # Template for all other colors
# Build the actual color table as a set of class attributes:
make_color_table(InputTermColors)
class ColorScheme:
"""Generic color scheme class. Just a name and a Struct."""
def __init__(self,__scheme_name_,colordict=None,**colormap):
self.name = __scheme_name_
if colordict is None:
self.colors = Struct(**colormap)
else:
self.colors = Struct(colordict)
def copy(self,name=None):
"""Return a full copy of the object, optionally renaming it."""
if name is None:
name = self.name
return ColorScheme(name,self.colors.__dict__)
class ColorSchemeTable(dict):
"""General class to handle tables of color schemes.
It's basically a dict of color schemes with a couple of shorthand
attributes and some convenient methods.
active_scheme_name -> obvious
active_colors -> actual color table of the active scheme"""
def __init__(self,scheme_list=None,default_scheme=''):
"""Create a table of color schemes.
The table can | be created empty and manually filled or it can be
created with a list of valid color schemes AND the | specification for
the default active scheme.
"""
# create object attributes to be set later
self.active_scheme_name = ''
self.active_colors = None
if scheme_list:
if default_scheme == '':
raise ValueError,'you must specify the default color scheme'
for scheme in scheme_list:
self.add_scheme(scheme)
self.set_active_scheme(default_scheme)
def copy(self):
"""Return full copy of object"""
return ColorSchemeTable(self.values(),self.active_scheme_name)
def add_scheme(self,new_scheme):
"""Add a new color scheme to the table."""
if not isinstance(new_scheme,ColorScheme):
raise ValueError,'ColorSchemeTable only accepts ColorScheme instances'
self[new_scheme.name] = new_scheme
def set_active_scheme(self,scheme,case_sensitive=0):
"""Set the currently active scheme.
Names are by default compared in a case-insensitive way, but this can
be changed by setting the parameter case_sensitive to true."""
scheme_names = self.keys()
if case_sensitive:
valid_schemes = scheme_names
scheme_test = scheme
else:
valid_schemes = [s.lower() for s in scheme_names]
scheme_test = scheme.lower()
try:
scheme_idx = valid_schemes.index(scheme_test)
except ValueError:
raise ValueError,'Unrecognized color scheme: ' + scheme + \
'\nValid schemes: '+str(scheme_names).replace("'', ",'')
else:
active = scheme_names[scheme_idx]
self.active_scheme_name = active
self.active_colors = self[active].colors
# Now allow using '' as an index for the current active scheme
self[''] = self[active]
|
tdyas/pants | src/python/pants/auth/cookies.py | Python | apache-2.0 | 2,261 | 0.001769 | # Copyright 2018 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
import os
from http.cookiejar import LWPCookieJar
from pants.process.lock import OwnerPrintingInterProcessFileLock
from pants.subsystem.subsystem import Subsystem
from pants.util.dirutil import safe_mkdir_for
from pants.util.memo import memoized_property
class Cookies(Subsystem):
options_scope = "cookies"
@classmethod
def register_options(cls, register):
super().register_options(register | )
register(
"--path",
advanced=True,
fingerprint=True,
default=os.path.join(register.bootstrap.pants_bootstrapdir, "auth", "cookies"),
help="Path to file that stores persistent cookies. "
"Defaults to <pants bootstrap dir>/auth/cookies.",
)
def update(self, cookies):
"""Add specified cookies to our cookie jar, and persists it.
:param cookies: Any iterable that yields http.cookiejar.Cookie in | stances, such as a CookieJar.
"""
cookie_jar = self.get_cookie_jar()
for cookie in cookies:
cookie_jar.set_cookie(cookie)
with self._lock:
cookie_jar.save()
def get_cookie_jar(self):
"""Returns our cookie jar."""
cookie_file = self._get_cookie_file()
cookie_jar = LWPCookieJar(cookie_file)
if os.path.exists(cookie_file):
cookie_jar.load()
else:
safe_mkdir_for(cookie_file)
# Save an empty cookie jar so we can change the file perms on it before writing data to it.
with self._lock:
cookie_jar.save()
os.chmod(cookie_file, 0o600)
return cookie_jar
def _get_cookie_file(self):
# We expanduser to make it easy for the user to config the cookies into their homedir.
return os.path.realpath(os.path.expanduser(self.get_options().path))
@memoized_property
def _lock(self):
"""An identity-keyed inter-process lock around the cookie file."""
lockfile = "{}.lock".format(self._get_cookie_file())
safe_mkdir_for(lockfile)
return OwnerPrintingInterProcessFileLock(lockfile)
|
ObsidianBlk/GemRB--Unofficial- | gemrb/GUIScripts/bg2/Start2.py | Python | gpl-2.0 | 9,946 | 0.044842 | # GemRB - Infinity Engine Emulator
# Copyright (C) 2003 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
#this is essentially Start.py from the SoA game, except for a very small change
import GemRB
import GUICommon
StartWindow = 0
TutorialWindow = 0
QuitWindow = 0
ExitButton = 0
SinglePlayerButton = 0
OptionsButton = 0
MultiPlayerButton = 0
MoviesButton = 0
BackButton = 0
def OnLoad():
global StartWindow, TutorialWindow, QuitWindow
global ExitButton, OptionsButton, MultiPlayerButton, MoviesButton, SinglePlayerButton, BackButton
global SinglePlayerButton
skip_videos = GemRB.GetVar ("SkipIntroVideos")
GemRB.LoadWindowPack("START", 640, 480)
#tutorial subwindow
if not GUICommon.GameIsBG2Demo():
TutorialWindow = GemRB.LoadWindow (5)
TextAreaControl = TutorialWindow.GetControl (1)
CancelButton = TutorialWindow.GetControl (11)
PlayButton = TutorialWindow.GetControl (10)
TextAreaControl.SetText (44200)
CancelButton.SetText (13727)
PlayButton.SetText (33093)
PlayButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PlayPress)
CancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, CancelTut)
PlayButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
#quit subwindow
QuitWindow = GemRB.LoadWindow (3)
QuitTextArea = QuitWindow.GetControl (0)
CancelButton = QuitWindow.GetControl (2)
ConfirmButton = QuitWindow.GetControl (1)
QuitTextArea.SetText (19532)
CancelButton.SetText (13727)
ConfirmButton.SetText (15417)
ConfirmButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ExitConfirmed)
CancelButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ExitCancelled)
ConfirmButton.SetFlags (IE_GUI_BUTTON_DEFAULT, OP_OR)
CancelButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
#main window
StartWindow = GemRB.LoadWindow (0)
StartWindow.SetFrame ()
#this is the ToB specific part of Start.py
if GemRB.GetVar("oldgame")==1:
if GUICommon.HasTOB():
StartWindow.SetPicture("STARTOLD")
if not skip_videos:
GemRB.PlayMovie ("INTRO15F", 1)
else:
if not skip_videos:
GemRB.PlayMovie ("INTRO", 1)
#end ToB specific part
SinglePlayerButton = StartWindow.GetControl (0)
ExitButton = StartWindow.GetControl (3)
OptionsButton = StartWindow.GetControl (4)
MultiPlayerButton = StartWindow.GetControl (1)
MoviesButton = StartWindow.GetControl (2)
BackButton = StartWindow.GetControl (5)
StartWindow.CreateLabel(0x0fff0000, 0,450,640,30, "REALMS", "", 1)
Label=StartWindow.GetControl (0x0fff0000)
Label.SetText (GEMRB_VERSION)
if GUICommon.HasTOB():
BackButton.SetState (IE_GUI_BUTTON_ENABLED)
BackButton.SetText (15416)
else:
BackButton.SetState (IE_GUI_BUTTON_DISABLED)
BackButton.SetText ("")
SinglePlayerButton.SetState (IE_GUI_BUTTON_ENABLED)
ExitButton.SetState (IE_GUI_BUTTON_ENABLED)
OptionsButton.SetState (IE_GUI_BUTTON_ENABLED)
if GUICommon.GameIsBG2Demo():
MultiPlayerButton.SetState (IE_GUI_BUTTON_DISABLED)
MoviesButton.SetState (IE_GUI_BUTTON_DISABLED)
else:
MultiPlayerButton.SetState (IE_GUI_BUTTON_ENABLED)
MultiPlayerButton.SetText (15414)
MultiPlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, MultiPlayerPress)
MoviesButton.SetState (IE_GUI_BUTTON_ENABLED)
MoviesButton.SetText (15415)
MoviesButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, MoviesPress)
SinglePlayerButton.SetText (15413)
ExitButton.SetText (15417)
OptionsButton.SetText (13905)
SinglePlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, SinglePlayerPress)
ExitButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ExitPress)
OptionsButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, OptionsPress)
BackButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, Restart)
ExitButton.SetFlags (IE_GUI_BUTTON_CANCEL, OP_OR)
QuitWindow.SetVisible (WINDOW_INVISIBLE)
if not GUICommon.GameIsBG2Demo():
TutorialWindow.SetVisible (WINDOW_INVISIBLE)
StartWindow.SetVisible (WINDOW_VISIBLE)
MusicTable | = GemRB.LoadTable ("songlist")
# the table has useless rownames, so we can't search for BG2Theme
theme = MusicTable.GetValue ("33", "RESOURCE")
GemRB.LoadMusicPL (theme, 1)
return
def SinglePlayerPress():
SinglePlayerButton.SetText (13728)
SinglePlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, NewSingle)
MultiPlayerButton.SetText (13729)
MultiPlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, LoadSingle)
MultiPlayerButton.SetState (IE | _GUI_BUTTON_ENABLED)
if not GUICommon.GameIsBG2Demo():
if GemRB.GetVar("oldgame")==1:
MoviesButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, Tutorial)
MoviesButton.SetText (33093)
else:
MoviesButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ImportGame)
MoviesButton.SetText (71175)
ExitButton.SetText (15416)
ExitButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, BackToMain)
OptionsButton.SetText ("")
OptionsButton.SetState (IE_GUI_BUTTON_DISABLED)
BackButton.SetText ("")
BackButton.SetState (IE_GUI_BUTTON_DISABLED)
return
def MultiPlayerPress():
OptionsButton.SetText ("")
SinglePlayerButton.SetText (20642)
ExitButton.SetText (15416)
MultiPlayerButton.SetText ("")
MoviesButton.SetText (11825)
MultiPlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, None)
SinglePlayerButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, ConnectPress)
MoviesButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, PregenPress)
ExitButton.SetEvent (IE_GUI_BUTTON_ON_PRESS, BackToMain)
MultiPlayerButton.SetState (IE_GUI_BUTTON_DISABLED)
OptionsButton.SetState (IE_GUI_BUTTON_DISABLED)
return
def ConnectPress():
#well...
#GemRB.SetVar("PlayMode",2)
return
def PregenPress():
if StartWindow:
StartWindow.Unload()
if QuitWindow:
QuitWindow.Unload()
if TutorialWindow:
TutorialWindow.Unload()
#do not start game after chargen
GemRB.SetVar("PlayMode",-1) #will allow export
GemRB.SetVar("Slot",1)
GemRB.LoadGame(None)
GemRB.SetNextScript ("CharGen")
return
def LoadSingle():
if StartWindow:
StartWindow.Unload()
if QuitWindow:
QuitWindow.Unload()
if TutorialWindow:
TutorialWindow.Unload()
if GemRB.GetVar ("oldgame") == 0:
GemRB.SetVar ("PlayMode", 2)
GemRB.SetVar ("SaveDir", 1)
else:
GemRB.SetVar ("PlayMode", 0)
GemRB.SetVar ("SaveDir", 0)
GemRB.SetNextScript ("GUILOAD")
return
def NewSingle():
if StartWindow:
StartWindow.Unload()
if QuitWindow:
QuitWindow.Unload()
if TutorialWindow:
TutorialWindow.Unload()
if GemRB.GetVar ("oldgame") == 0:
GemRB.SetVar ("PlayMode", 2)
GemRB.SetVar ("SaveDir", 1)
else:
GemRB.SetVar ("PlayMode", 0)
GemRB.SetVar ("SaveDir", 0)
GemRB.SetVar("Slot",1)
GemRB.LoadGame(None)
GemRB.SetNextScript ("CharGen")
return
def ImportGame():
if StartWindow:
StartWindow.Unload()
if QuitWindow:
QuitWindow.Unload()
if TutorialWindow:
TutorialWindow.Unload()
#now this is tricky, we need to load old games, but set up the expansion
GemRB.SetVar ("PlayMode", 0)
GemRB.SetVar ("SaveDir", 0)
GemRB.SetNextScript ("GUILOAD")
return
def Tutorial():
StartWindow.SetVisible (WINDOW_INVISIBLE)
TutorialWindow.SetVisible (WINDOW_VISIBLE)
return
def PlayPress():
if StartWindow:
StartWindow.Unload()
if QuitWindow:
QuitWindow.Unload()
if TutorialWindow:
TutorialWindow.Unload()
GemRB.SetVar("PlayMode",1) #tutorial
GemRB.SetVar("SaveDir",0)
GemRB.SetVar("Slot",1)
GemRB.LoadGame(None)
GemRB.SetNextScript ("CharGen")
return
def CancelTut():
TutorialWindow.SetVisible (WINDOW_INVISIBLE)
StartWindow.SetVisible (WINDOW_VISIBLE)
return
def ExitPress():
StartWindow.SetVisible (WINDOW_INVISIBLE)
QuitWindow.SetVisible (WINDOW_VISIBLE)
return
def ExitConfirmed():
GemRB.Quit()
return
def OptionsPress():
#apparently the order is important |
Nehoroshiy/urnn | manifolds/__init__.py | Python | mit | 100 | 0.01 | from .unitary import Unitary
from .unitary_kron import UnitaryKron
__all | __ = [Unitary, UnitaryKro | n] |
gdi2290/django | tests/postgres_tests/models.py | Python | bsd-3-clause | 822 | 0 | from django.contrib.postgres.fields import ArrayField, HStoreField
from django.db import models
class IntegerArrayModel(models.Model):
field = ArrayField(models.IntegerField | ())
class NullableIntegerArrayModel(models.Model):
field = ArrayField(models.IntegerField(), blank=True, null=True)
class CharArrayModel(models.Model):
field = ArrayField(models.CharField(max_length=10))
class DateTimeArrayModel(models.Model):
field = ArrayField(models.DateTimeField())
class Nes | tedIntegerArrayModel(models.Model):
field = ArrayField(ArrayField(models.IntegerField()))
class HStoreModel(models.Model):
field = HStoreField(blank=True, null=True)
class CharFieldModel(models.Model):
field = models.CharField(max_length=16)
class TextFieldModel(models.Model):
field = models.TextField()
|
Vagab0nd/SiCKRAGE | sickchill/oldbeard/dailysearcher.py | Python | gpl-3.0 | 3,608 | 0.002494 | import datetime
import threading
import sickchill.oldbeard.search_queue
from sickchill import logger, settings
from sickchill.helper.exceptions import MultipleShowObjectsException
from sickchill.show.Show import Show
from . import common, db, network_timezones
class DailySearcher(object): # pylint:disable=too-few-public-methods
def __init__(self):
self.lock = threading.Lock()
self.amActive = False
def run(self, force=False): # pylint:disable=too-many-branches
"""
Runs the daily searcher, queuing selected episodes for search
:param force: Force search
"""
if self.amActive:
return
self.amActive = True
logger.info(_("Searching for new released episodes ..."))
if not network_timezones.network_dict:
network_timezones.update_network_dict()
if network_timezones.network_dict:
curDate = (datetime.date.today() + datetime.timedelta(days=1)).toordinal()
else:
curDate = (datetime.date.today() + datetime.timedelta(days=2)).toordinal()
curTime = datetime.datetime.now(network_timezones.sb_timezone)
main_db_con = db.DBConnection()
sql_results = main_db_con.select("SELECT showid, airdate, season, episode FROM tv_episodes WHERE status = ? AND (airdate <= ? and airdate > 1)",
[common.UNAIRED, curDate])
sql_l = []
show = None
for sqlEp in sql_results:
try:
if not show or int(sqlEp["showid"]) != show.indexerid:
show = Show.find(settings.showList, int(sqlEp["showid"]))
# for when there is orphaned series in the database but not loaded into our showlist
if not show or show.paused:
continue
except MultipleShowObjectsException:
logger.info("ERROR: expected to find a single show matching " + str(sqlEp['showid']))
continue
if show.airs and show.network:
# This is how you assure it is always converted to local time
air_time = network_timezones.parse_date_time(sqlEp['airdate'], show.airs, show.network).astimezone(network_timezones.sb_timezone)
# filter out any episodes that haven't started airing yet,
# but set them to the default status while they are airing
# so they are snatched faster
if air_time > curTime:
continue
ep = show.getEpisode(sqlEp["season"], sqlEp["episode"])
with ep.lock:
if ep.season == 0:
logger.info("New episode " + ep.pretty_name() + " airs today, setting status to SKIPPED because is a special season")
ep.status = common.SKIPPED
else:
logger.info("New episode {0} airs today, setting to default episode status for this show: {1}".format(ep.pretty_name(), common.statusStrings[ep.show.default_ep_status]))
ep.status = ep.show.default_ep_status
sql_l.append(ep.get_sql())
if sql_l:
main_db_con = db.DBConnection()
main_db_con.mass_action(sql_l)
else:
logg | er.info("No new released episodes found ...")
# queue episode for daily search
dailysearch_queue_item = sickchill.oldbeard.search_queue.DailySearchQueueItem()
settings.searchQu | eueScheduler.action.add_item(dailysearch_queue_item)
self.amActive = False
|
LinuxCircle/tea5767 | hello.py | Python | mit | 114 | 0.008772 | pr | int("TEA5767 FM Radio project")
print("dipto@linuxcircle.com")
print("Excellent codes will be uploaded here!") | |
lmazuel/azure-sdk-for-python | azure-mgmt-resource/azure/mgmt/resource/resources/v2016_02_01/models/resource_group.py | Python | mit | 1,961 | 0 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class ResourceGroup(Model):
"""Resource group information.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The ID of the resource group.
:vartype id: str
:param name: The Name of the resource group.
:type name: str
:param properties:
:type properties:
~azure.mgmt.resource.resources.v2016_02_01.models.ResourceGroupProperties
:param location: The location of the resource group. It cannot be changed
after the resource group has been created. Has to be one of the supported
Azure Locations, such as West US, East US, West Europe, East Asia, etc.
:type location: str
:param tags: The tags attached to the resource group.
:type tags: dict[str, str]
"""
_validation = {
'id': {'readonly': True},
'location': {'required': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'properties': {'key': 'properties', 'type': 'ResourceGroupProperties'},
'location': { | 'key': 'location', 'type': 'str'},
'tags': {'key': 'tags', 'type': '{str}'},
}
def __init__(self, location, name=None, properties=None, tags=None):
super(ResourceGroup, self).__init__()
self.id = None
self.name = name
self.propert | ies = properties
self.location = location
self.tags = tags
|
coblo/pyiscclib | tests/test_iscclib_image.py | Python | bsd-2-clause | 600 | 0 | # -*- coding: utf-8 -*-
import pytest
from os.path import dirname, join
from iscclib.image import ImageID
TEST_IMG = join(dirname(__file__), '4.2.04.jpg')
TEST_CODE = u'CAUD7P6NU73ID | '
TEST_IDENT = 13733935459959803788
def test_image_id_min_max():
max_value = 2 ** 64 - 1
iid = ImageID(ident=max_value)
assert iid.code == ImageID.CODE_MAX
def test_image_id_from_image():
iid = ImageID.from_image(TEST_IMG)
assert iid.code == TEST_CODE
assert iid.ident == TEST_IDENT
def test_image_id_non_64_bits_raises():
with pytest.raises(ValueError):
ImageID(1 | , bits=32)
|
ragavvenkatesan/Convolutional-Neural-Networks | tests/layers/test_conv_pool.py | Python | mit | 23,881 | 0.020225 | import unittest
import numpy
import theano
from yann.layers.conv_pool import conv_pool_layer_2d as cl
from yann.layers.conv_pool import dropout_conv_pool_layer_2d as dcl
from yann.layers.conv_pool import deconv_layer_2d as dl
from yann.layers.conv_pool import dropout_deconv_layer_2d as ddl
try:
from unittest.mock import Mock
except ImportError:
from mock import Mock,patch
class TestConvPool(unittest.TestCase):
def setUp(self):
self.verbose = 3
self.channels = 1
self.mean_subtract = False
self.rng = None
self.borrow = True
self.input_shape = (1,1,10,10)
self.input_ndarray = numpy.random.rand(1,1,10,10)
self.output_dropout_ndarray= numpy.zeros((1,1,10,10))
self.output_test = numpy.zeros((1,1,10,10))
self.output_train = numpy.ones((1,1,10,10))
self.input_tensor = theano.shared(self.input_ndarray)
self.gamma = theano.shared(value=numpy.ones((self.channels,),dtype=theano.config.floatX), name = 'gamma', borrow = self.borrow)
self.beta = theano.shared(value=numpy.zeros((self.channels,),dtype=theano.config.floatX), name = 'beta', borrow=self.borrow)
self.running_mean = theano.shared(value=numpy.zeros((self.channels,), dtype=theano.config.floatX), name = 'population_mean', borrow = self.borrow)
self.running_var = theano.shared(value=numpy.ones((self.channels,),dtype=theano.config.floatX), name = 'population_var', borrow=self.borrow)
self.w = theano.shared(value=
numpy.asarray(0.01 * numpy.random.standard_normal(size=(1,1,3,3)),
dtype=theano.config.floatX), borrow=True,
name='filterbank')
self.b = theano.shared(value=numpy.zeros((10,), dtype=theano.config.floatX),
name='bias', borrow=True)
self.input_params = (self.w, self.b,self.gamma, self.beta, self.running_mean, self.running_var)
self.input_params_none = (None, None,None, None, None, self.running_var)
self.conv_pool_layer_2d_name = "cl"
self.dropout_conv_pool_layer_2d_name = "dcl"
self.deconv_pool_layer_2d_name = "dl"
self.dropout_deconv_pool_layer_2d_name = "ddl"
self.dropout_rate = 1
self.default_param_value = [1.]
self.custom_param_value = [1., 1.,1.]
self.pool_size_mismatch_exception_msg = " Unpool operation not yet supported be deconv layer"
self.activation_tuple_exception_msg = "Deconvolution layer does not support maxout activation"
@patch('theano.tensor.unbroadcast')
@patch('yann.layers.conv_pool._activate')
@patch('yann.layers.conv_pool.batch_normalization_test')
@patch('yann.layers.conv_pool.batch_normalization_train')
def test1_conv_pool_layer_2d(self,mock_batch_normalization_train,mock_batch_normalization_test,mock_activate,mock_unbroadcast):
mock_unbroadcast.return_value = 1
mock_activate.return_value = (self.input_ndarray, self.input_shape)
mock_batch_normalization_train.return_value = (self.output_train,1,1,1,1)
mock_batch_normalization_test.return_value =self.output_test
self.conv_pool_layer_2d = cl(
input = self.input_tensor,
id = self.conv_pool_layer_2d_name,
input_shape = self.input_shape,
nkerns=10,
verbose = self.verbose,
batch_norm = True
)
self.assertEqual(self.conv_pool_layer_2d.id,self.conv_pool_layer_2d_name)
self.assertEqual(self.conv_pool_layer_2d.output_shape,self.input_shape)
self.assertTrue(numpy.allclose(self.conv_pool_layer_2 | d.output,self.input_ndarray))
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.inference,self.input_ndarray))
@patch('theano.tensor.unbroadcast')
@patch('yann.layers.conv_pool. | _activate')
@patch('yann.layers.conv_pool.batch_normalization_test')
@patch('yann.layers.conv_pool.batch_normalization_train')
def test2_conv_pool_layer_2d_ip_none(self,mock_batch_normalization_train,mock_batch_normalization_test,mock_activate,mock_unbroadcast):
mock_unbroadcast.return_value = 1
mock_activate.return_value = (self.input_ndarray, self.input_shape)
mock_batch_normalization_train.return_value = (self.output_train,1,1,1,1)
mock_batch_normalization_test.return_value =self.output_test
self.conv_pool_layer_2d = cl(
input = self.input_tensor,
id = self.conv_pool_layer_2d_name,
input_shape = self.input_shape,
nkerns=10,
verbose = self.verbose,
input_params= self.input_params_none,
batch_norm = True
)
self.assertEqual(self.conv_pool_layer_2d.id,self.conv_pool_layer_2d_name)
self.assertEqual(self.conv_pool_layer_2d.output_shape,self.input_shape)
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.output,self.input_ndarray))
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.inference,self.input_ndarray))
@patch('theano.tensor.unbroadcast')
@patch('yann.layers.conv_pool._activate')
@patch('yann.layers.conv_pool.batch_normalization_test')
@patch('yann.layers.conv_pool.batch_normalization_train')
def test3_conv_pool_layer_2d_ip_vals(self,mock_batch_normalization_train,mock_batch_normalization_test,mock_activate,mock_unbroadcast):
mock_unbroadcast.return_value = 1
mock_activate.return_value = (self.input_ndarray, self.input_shape)
mock_batch_normalization_train.return_value = (self.output_train,1,1,1,1)
mock_batch_normalization_test.return_value =self.output_test
self.conv_pool_layer_2d = cl(
input = self.input_tensor,
id = self.conv_pool_layer_2d_name,
input_shape = self.input_shape,
nkerns=10,
verbose = self.verbose,
input_params= self.input_params,
batch_norm = True
)
self.assertEqual(self.conv_pool_layer_2d.id,self.conv_pool_layer_2d_name)
self.assertEqual(self.conv_pool_layer_2d.output_shape,self.input_shape)
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.output,self.input_ndarray))
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.inference,self.input_ndarray))
@patch('theano.tensor.unbroadcast')
@patch('yann.layers.conv_pool._activate')
@patch('yann.layers.conv_pool.batch_normalization_test')
@patch('yann.layers.conv_pool.batch_normalization_train')
def test4_conv_pool_layer_2d_no_bn(self,mock_batch_normalization_train,mock_batch_normalization_test,mock_activate,mock_unbroadcast):
mock_unbroadcast.return_value = 1
mock_activate.return_value = (self.input_ndarray, self.input_shape)
mock_batch_normalization_train.return_value = (self.output_train,1,1,1,1)
mock_batch_normalization_test.return_value =self.output_test
self.conv_pool_layer_2d = cl(
input = self.input_tensor,
id = self.conv_pool_layer_2d_name,
input_shape = self.input_shape,
nkerns=10,
verbose = self.verbose,
batch_norm = False
)
self.assertEqual(self.conv_pool_layer_2d.id,self.conv_pool_layer_2d_name)
self.assertEqual(self.conv_pool_layer_2d.output_shape,self.input_shape)
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.output,self.input_ndarray))
self.assertTrue(numpy.allclose(self.conv_pool_layer_2d.inference,self.input_ndarray))
@patch('yann.layers.conv_pool._dropout')
def test5_dropout_conv_pool_layer_2d_layer(self,mock_dropout):
mock_dropout.return_value |
anselmobd/fo2 | src/comercial/migrations/0012_metafaturamento_faturamento_integer.py | Python | mit | 464 | 0 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-02-10 21:39
from __future__ import unicode_literals
from django.db import migrations, models
class | Migration(migrations.Migration):
dependencies = [
('comercial', '0011_metafaturamento'),
]
operations = [
migrations.AlterField(
model_name='metafaturamento',
name='faturamento',
field=mod | els.IntegerField(default=0),
),
]
|
2degrees/wsgi-xsendfile | setup.py | Python | bsd-3-clause | 2,005 | 0 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (c) 2010-2015, 2degrees Limited.
# All Rights Reserved.
#
# This file is part of wsgi-xsendfile <http://pythonhosted.org/xsendfile/>,
# which is subject to the provisions of the BSD at
# <http://dev.2degreesnetwork.com/p/2degrees-license.html>. A copy of the
# license should accompany this distribution. THIS SOFTWARE IS PROVIDED "AS IS"
# AND ANY AND ALL EXPRESS OR IMPLIED WARRANTIES ARE DISCLAIMED, INCLUDING, BUT
# NOT LIMITED TO, THE IMPLIED WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST
# INFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE.
#
##############################################################################
import os
from setuptools import setup
_CURRENT_DIR = os.path.abspath(os.path.dirname(__file__))
_README = open(os.path.join(_CURRENT_DIR, "README.rst")).read()
_VERSION = open(os.path.join(_CURRENT_DIR, "VERSION.txt")).readline().rstrip()
setup(
name="xsendfile",
version=_VERSION,
description="X-Sendfile implementation in Python/WSGI",
long_description=_README,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Web Environment",
"Intended Audience :: Developers",
"License :: OSI Approved :: BSD License",
"Natural Language :: English",
| "Operating System :: OS Independent",
"Programming Language :: Python :: 2",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: WSGI",
"Topic :: Security",
],
key | words="x-sendfile xsendfile x-accel authorization token url hot-link",
author="2degrees Limited",
author_email="2degrees-floss@2degreesnetwork.com",
url="http://pythonhosted.org/xsendfile/",
license="BSD (http://dev.2degreesnetwork.com/p/2degrees-license.html)",
py_modules=["xsendfile"],
install_requires=["Paste >= 2.0.2", "six >= 1.10"],
test_suite="nose.collector",
)
|
opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/support/migrations/0005_extend_icon_url_size.py | Python | mit | 437 | 0.002288 | # Generated by Django 2.2.10 on 2020-04-05 11:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('support', '0004_templateconfirmationcomment'),
]
operations = [
migrations.AlterField(
model_name='priority',
name='icon_url',
field=models.URLField(blank=True, max_length=500, verbose_name='i | con url'),
| ),
]
|
LowerSilesians/ursa-rest-sqlserver | ursa_rest_sqlserver/manage.py | Python | apache-2.0 | 817 | 0.001224 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "ursa_rest_sqlserver.settings")
try:
from django.core.management import execute_from_command_line
| except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't | import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
spaam/svtplay-dl | lib/svtplay_dl/tests/test_service.py | Python | mit | 2,933 | 0.002387 | import unittest
from svtplay_dl.service import Generic
from svtplay_dl.service import opengraph_get
from svtplay_dl.service import Service
from svtplay_dl.service import service_handler
from svtplay_dl.service.services import sites
from svtplay_dl.utils.parser import setup_defaults
class MockService(Service):
supported_domains = ["example.com", "example.net"]
class ServiceTest(unittest.TestCase):
def test_supports(self):
assert MockService.handles("http://example.com/video.swf?id=1")
assert MockService.handles("http://example.net/video.swf?id=1")
assert MockService.handles("http://www.example.com/video.swf?id=1")
assert MockService.handles("http://www.example.net/video.swf?id=1")
class service_handlerTest(unittest.TestCase):
def test_service_handler(self):
config = setup_defaults()
assert not service_handler(sites, config, "localhost")
class service_handlerTest2(unittest.TestCase):
def test_service_handler(self):
config = setup_defaults()
assert isinstance(service_handler(sites, config, "https://www.svtplay.se"), Service)
class service_opengraphGet(unittest.TestCase):
text = '<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg"><meta'
def test_og_get(self):
assert opengraph_get(self.text, "image") == "http://example.com/img3.jpg"
class service_opengraphGet_none(unittest.TestCase):
text = '<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg"><meta'
def test_og_get(self):
assert not opengraph_get(self.text, "kalle")
class service_opengraphGet2(unittest.TestCase):
text = '<html><head><meta name="og:image" property="og:image" content="http://example.com/img3.jpg">'
def test_og_get(self):
assert opengraph_get(self.text, "image") == "http://example.com/img3.jpg"
class test_generic(unittest.TestCase):
def test_nothing(self):
config = setup_defaults()
generic = Generic(config, "http://example.com")
data = "hejsan"
assert generic._match(data, sites) == ("http://example.com", None)
def test_hls(self):
| config = setup_defaults()
generic = Generic(config, "http://example.com")
data = 'source src="http://example.com/hls.m3u8" type="application/x-mpegURL"'
assert isinstance(generic._match(data, sites)[1], Service)
def test_tv | 4(self):
config = setup_defaults()
generic = Generic(config, "http://example.com")
data = "rc=https://www.tv4play.se/iframe/video/12499319 "
assert isinstance(generic._match(data, sites)[1], Service)
def test_vimeo(self):
config = setup_defaults()
generic = Generic(config, "http://example.com")
data = 'src="https://player.vimeo.com/video/359281775" '
assert isinstance(generic._match(data, sites)[1], Service)
|
vishnu-kumar/PeformanceFramework | rally_os/plugins/openstack/scenarios/tempest/tempest.py | Python | apache-2.0 | 4,269 | 0 | # Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed | under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY | KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally import consts
from rally.plugins.openstack import scenario
from rally.plugins.openstack.scenarios.tempest import utils
from rally.task import validation
class TempestScenario(scenario.OpenStackScenario):
"""Benchmark scenarios that launch Tempest tests."""
@validation.tempest_tests_exists()
@validation.required_openstack(admin=True)
@scenario.configure(context={"tempest": {}})
@utils.tempest_log_wrapper
def single_test(self, test_name, log_file, tempest_conf=None):
"""Launch a single Tempest test by its name.
:param test_name: name of tempest scenario for launching
:param log_file: name of file for junitxml results
:param tempest_conf: User specified tempest.conf location
"""
if (not test_name.startswith("tempest.api.")
and test_name.split(".")[0] in consts.TempestTestsAPI):
test_name = "tempest.api." + test_name
self.context["verifier"].run(test_name, log_file=log_file,
tempest_conf=tempest_conf)
@validation.required_openstack(admin=True)
@scenario.configure(context={"tempest": {}})
@utils.tempest_log_wrapper
def all(self, log_file, tempest_conf=None):
"""Launch all discovered Tempest tests by their names.
:param log_file: name of file for junitxml results
:param tempest_conf: User specified tempest.conf location
"""
self.context["verifier"].run("", log_file=log_file,
tempest_conf=tempest_conf)
@validation.tempest_set_exists()
@validation.required_openstack(admin=True)
@scenario.configure(context={"tempest": {}})
@utils.tempest_log_wrapper
def set(self, set_name, log_file, tempest_conf=None):
"""Launch all Tempest tests from a given set.
:param set_name: set name of tempest scenarios for launching
:param log_file: name of file for junitxml results
:param tempest_conf: User specified tempest.conf location
"""
if set_name == "full":
testr_arg = ""
elif set_name == "smoke":
testr_arg = "smoke"
else:
testr_arg = "tempest.api.%s" % set_name
self.context["verifier"].run(testr_arg, log_file=log_file,
tempest_conf=tempest_conf)
@validation.tempest_tests_exists()
@validation.required_openstack(admin=True)
@scenario.configure(context={"tempest": {}})
@utils.tempest_log_wrapper
def list_of_tests(self, test_names, log_file, tempest_conf=None):
"""Launch all Tempest tests from a given list of their names.
:param test_names: list of tempest scenarios for launching
:param log_file: name of file for junitxml results
:param tempest_conf: User specified tempest.conf location
"""
self.context["verifier"].run(" ".join(test_names), log_file=log_file,
tempest_conf=tempest_conf)
@validation.required_openstack(admin=True)
@scenario.configure(context={"tempest": {}})
@utils.tempest_log_wrapper
def specific_regex(self, regex, log_file, tempest_conf=None):
"""Launch Tempest tests whose names match a given regular expression.
:param regex: regexp to match Tempest test names against
:param log_file: name of file for junitxml results
:param tempest_conf: User specified tempest.conf location
"""
self.context["verifier"].run(regex, log_file=log_file,
tempest_conf=tempest_conf)
|
iamweilee/pylearn | threading-example-1.py | Python | mit | 2,573 | 0.008162 | '''
×¢ÒâÏß³ÌÖ§³ÖÄ£¿éÊÇ¿ÉÑ¡µÄ, ÓпÉÄÜÔÚһЩ Python ½âÊÍÆ÷Öв»¿ÉÓÃ.
Ö´ÐÐ Python ³ÌÐòµÄʱºò, Êǰ´ÕÕ´ÓÖ÷Ä£¿é¶¥¶ËÏòÏÂÖ´ÐеÄ.
Ñ»·ÓÃÓÚÖØ¸´Ö´Ðв¿·Ö´úÂë, º¯ÊýºÍ·½·¨»á½«¿ØÖÆÁÙÊ±ÒÆ½»µ½³ÌÐòµÄÁíÒ»²¿·Ö.
ͨ¹ýÏß³Ì, ÄãµÄ³ÌÐò¿ÉÒÔÔÚͬʱ´¦Àí¶à¸öÈÎÎñ. ÿ¸öÏ̶߳¼ÓÐËü×Ô¼ºµÄ¿ØÖÆÁ÷.
ËùÒÔÄã¿ÉÒÔÔÚÒ»¸öÏß³ÌÀï´ÓÎļþ¶ÁÈ¡Êý¾Ý, Áí¸öÏòÆÁÄ»Êä³öÄÚÈÝ.
ΪÁ˱£Ö¤Á½¸öÏ߳̿ÉÒÔͬʱ·ÃÎÊÏàͬµÄÄÚ²¿Êý¾Ý, Python ʹÓÃÁË globalinterpreter lock (È«¾Ö½âÊÍÆ÷Ëø).
ÔÚͬһʱ¼äÖ»¿ÉÄÜÓÐÒ»¸öÏß³ÌÖ´ÐÐPython ´úÂë;
Python ʵ¼ÊÉÏÊÇ×Ô¶¯µØÔÚÒ»¶ÎºÜ¶ÌµÄʱ¼äºóÇл»µ½Ï¸öÏß³ÌÖ´ÐÐ, »òÕߵȴýÒ»¸öÏß³ÌÖ´ÐÐÒ»ÏîÐèҪʱ¼äµÄ²Ù×÷(ÀýÈçµÈ´ýͨ¹ý socket ´«ÊäµÄÊý¾Ý, »òÊÇ´ÓÎļþÖжÁÈ¡Êý¾Ý).
È«¾ÖËøÊÂʵÉϲ¢²»ÄܱÜÃâÄã³ÌÐòÖеÄÎÊÌâ.
¶à¸öÏ̳߳¢ÊÔ·ÃÎÊÏàͬµÄÊý¾Ý»áµ¼ÖÂÒ쳣״̬.
ÀýÈçÒÔϵĴúÂë:
def getitem(key):
item = cache.get(key)
if item is None:
# not in cache; create a new one
item = create_new_item(key)
cache[key] = item
return item
Èç¹û²»Í¬µÄÏß³ÌÏȺóʹÓÃÏàͬµÄ key µ÷ÓÃÕâÀïµÄ getitem ·½·¨, ÄÇôËüÃǺܿÉÄܻᵼÖÂÏàͬµÄ²ÎÊýµ÷ÓÃÁ½´Î create_new_item.
´ó¶àʱºòÕâÑù×öûÓÐÎÊÌâ, µ«ÔÚijЩʱºò»áµ¼ÖÂÑÏÖØ´íÎó.
²»¹ýÄã¿ÉÒÔʹÓà lock objects À´Í¬²½Ïß³Ì.
Ò»¸öÏß³ÌÖ»ÄÜÓµÓÐÒ»¸ö lock object, ÕâÑù¾Í¿ÉÒÔÈ·±£Ä³¸öʱ¿ÌÖ»ÓÐÒ»¸öÏß³ÌÖ´ÐÐ getitem º¯Êý.
'''
'''
ÔÚ´ó¶àÏÖ´ú²Ù×÷ϵͳÖÐ, ÿ¸ö³ÌÐòÔÚËü×ÔÉíµÄ½ø³Ì( process ) ÄÚÖ´ÐÐ.
ÎÒÃÇͨ¹ýÔÚ shell ÖмüÈëÃüÁî»òÖ±½ÓÔڲ˵¥ÖÐÑ¡ÔñÀ´Ö´ÐÐÒ»¸ö³ÌÐò/½ø³Ì.
Python ÔÊÐíÄãÔÚÒ»¸ö½Å±¾ÄÚÖ´ÐÐÒ»¸öеijÌÐò.
´ó¶à½ø³ÌÏà¹Øº¯Êýͨ¹ý os Ä£¿é¶¨Òå.
'''
'''
(¿ÉÑ¡) threading Ä£¿éΪÏß³ÌÌṩÁËÒ»¸ö¸ß¼¶½Ó¿Ú, Èç ÏÂÀý Ëùʾ.
ËüÔ´×Ô | Java µÄÏß³ÌʵÏÖ.
ºÍµÍ¼¶µÄ thread Ä£¿éÏàͬ, Ö»ÓÐÄãÔÚ±àÒë½âÊÍÆ÷ʱ´ò¿ªÁËÏß³ÌÖ§³Ö²Å¿ÉÒÔʹÓÃËü .
ÄãÖ»ÐèÒª¼Ì³Ð Thread Àà, ¶¨ÒåºÃ run ·½·¨, ¾Í¿ÉÒÔ´´½¨Ò» ¸öеÄÏß³Ì.
ʹÓÃʱÊ×ÏÈ´´½¨¸ÃÀàµÄÒ»¸ö»ò¶à¸öʵÀý, È»ºóµ÷Óà start ·½·¨ | .
ÕâÑùÿ¸öʵÀýµÄ run ·½·¨¶¼»áÔËÐÐÔÚËü×Ô¼ºµÄÏß³ÌÀï.
'''
import threading
import time, random
class Counter:
def __init__(self):
self.lock = threading.Lock()
self.value = 0
def increment(self):
self.lock.acquire() # critical section
self.value = value = self.value + 1
self.lock.release()
return value
counter = Counter()
class Worker(threading.Thread):
#def __init__(self):
# threading.Thread.__init__(self)
def run(self):
for i in range(10):
# pretend we're doing something that takes 10?00 ms
value = counter.increment() # increment global counter
time.sleep(random.randint(10, 100) / 1000.0)
print self.getName(), "-- task", i, "finished", value
#
# try it
for i in range(10):
Worker().start() # start a worker |
coherence-project/Coherence | coherence/extern/et.py | Python | mit | 4,662 | 0.001287 | # -*- coding: utf-8 -*-
#
# Licensed under the MIT li | cense
# http://opensource.org/licenses/mit-license.php
#
# Copyright 2006,2007 Frank Scholz <coherence@beebits.net>
# Copyright 2014 Hartmut Goebel <h.goebel@crazy-compilers.com>
#
"""
little helper to get the proper ElementTree package
"""
import re
import exceptions
try:
import cElementTree as ET
import elementtree
except ImportError:
try:
from elementtree import ElementTree as ET
import elem | enttree
except ImportError:
# this seems to be necessary with the python2.5 on the Maemo platform
try:
from xml.etree import cElementTree as ET
from xml import etree as elementtree
except ImportError:
try:
from xml.etree import ElementTree as ET
from xml import etree as elementtree
except ImportError:
raise ImportError("ElementTree: no ElementTree module found, "
"critical error")
utf8_escape = re.compile(eval(r'u"[&<>\"]+"'))
escape = re.compile(eval(r'u"[&<>\"\u0080-\uffff]+"'))
def new_encode_entity(text, pattern=utf8_escape):
def escape_entities(m, map=elementtree.ElementTree._escape_map):
"""
map reserved and non-ascii characters to numerical entities
"""
out = []
append = out.append
for char in m.group():
t = map.get(char)
if t is None:
t = "&#%d;" % ord(char)
append(t)
if type(text) == unicode:
return ''.join(out)
else:
return u''.encode('utf-8').join(out)
try:
if type(text) == unicode:
return elementtree.ElementTree._encode(
escape.sub(escape_entities, text), 'ascii')
else:
return elementtree.ElementTree._encode(
utf8_escape.sub(escape_entities, text.decode('utf-8')), 'utf-8')
except TypeError:
elementtree.ElementTree._raise_serialization_error(text)
elementtree.ElementTree._encode_entity = new_encode_entity
# it seems there are some ElementTree libs out there
# which have the alias XMLParser and some that haven't.
#
# So we just use the XMLTreeBuilder method for now
# if XMLParser isn't available.
if not hasattr(ET, 'XMLParser'):
def XMLParser(encoding='utf-8'):
return ET.XMLTreeBuilder()
ET.XMLParser = XMLParser
def namespace_map_update(namespaces):
for uri, prefix in namespaces.items():
elementtree.ElementTree.register_namespace(prefix, uri)
class ElementInterface(elementtree.ElementTree._ElementInterface): pass
def indent(elem, level=0):
"""
generate pretty looking XML, based upon:
http://effbot.org/zone/element-lib.htm#prettyprint
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
for elem in elem:
indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def parse_xml(data, encoding="utf-8", dump_invalid_data=False):
try:
parser = ET.XMLParser(encoding=encoding)
except exceptions.TypeError:
parser = ET.XMLParser()
# my version of twisted.web returns page_infos as a dictionary in
# the second item of the data list
# :fixme: This must be handled where twisted.web is fetching the data
if isinstance(data, (list, tuple)):
data = data[0]
try:
data = data.encode(encoding)
except UnicodeDecodeError:
pass
# Guess from who we're getting this?
data = data.replace('\x00', '')
try:
parser.feed(data)
except Exception, error:
if dump_invalid_data:
print error, repr(data)
parser.close()
raise
else:
return ET.ElementTree(parser.close())
def qname(tag, ns=None):
if not ns:
return tag
return "{%s}%s" % (ns, tag)
def textElement(parent, tag, namespace, text):
"""Create a subelement with text content."""
elem = ET.SubElement(parent, qname(tag, namespace))
elem.text = text
return elem
def textElementIfNotNone(parent, tag, namespace, text):
"""If text is not none, create a subelement with text content."""
if text is None:
return
if not isinstance(text, basestring):
text = unicode(text)
return textElement(parent, tag, namespace, text)
|
andreikop/qutepart | qutepart/syntax/data/regenerate-definitions-db.py | Python | lgpl-2.1 | 3,241 | 0.005554 | #!/usr/bin/env python3
import os.path
import json
import sys
_MY_PATH = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(_MY_PATH, '..', '..', '..'))
from qutepart.syntax.loader import loadSyntax
from qutepart.syntax import SyntaxManager, Syntax
def _add_php(targetFileName, srcFileName):
os.system("./generate-php.pl > xml/{} < xml/{}".format(targetFileName, srcFileName))
def main():
os.chdir(_MY_PATH)
_add_php('javascript-php.xml', 'javascript.xml')
_add_php('css-php.xml', 'css.xml')
_add_php('html-php.xml', 'html.xml')
xmlFilesPath = os.path.join(_MY_PATH, 'xml')
xmlFileNames = [fileName for fileName in os.listdir(xmlFilesPath) \
if fileName.endswith('.xml')]
syntaxNameToXmlFileName = {}
mimeTypeToXmlFileName = {}
extensionToXmlFileName = {}
firstLineToXmlFileName = {}
for xmlFileName in xmlFileNames:
xmlFilePath = os.path.join(xmlFilesPath, xmlFileName)
syntax = Syntax(None)
loadSyntax(syntax, xmlFilePath)
if not syntax.name in syntaxNameToXmlFileName or \
syntaxNameToXmlFileName[syntax.name][0] < syntax.priority: |
syntaxNameToXmlFileName[syntax.name] = (syntax.priority, xmlFileNam | e)
if syntax.mimetype:
for mimetype in syntax.mimetype:
if not mimetype in mimeTypeToXmlFileName or \
mimeTypeToXmlFileName[mimetype][0] < syntax.priority:
mimeTypeToXmlFileName[mimetype] = (syntax.priority, xmlFileName)
if syntax.extensions:
for extension in syntax.extensions:
if extension not in extensionToXmlFileName or \
extensionToXmlFileName[extension][0] < syntax.priority:
extensionToXmlFileName[extension] = (syntax.priority, xmlFileName)
if syntax.firstLineGlobs:
for glob in syntax.firstLineGlobs:
if not glob in firstLineToXmlFileName or \
firstLineToXmlFileName[glob][0] < syntax.priority:
firstLineToXmlFileName[glob] = (syntax.priority, xmlFileName)
# remove priority, leave only xml file names
for dictionary in (syntaxNameToXmlFileName,
mimeTypeToXmlFileName,
extensionToXmlFileName,
firstLineToXmlFileName):
newDictionary = {}
for key, item in dictionary.items():
newDictionary[key] = item[1]
dictionary.clear()
dictionary.update(newDictionary)
# Fix up php first line pattern. It contains <?php, but it is generated from html, and html doesn't contain it
firstLineToXmlFileName['<?php*'] = 'html-php.xml'
result = {
'syntaxNameToXmlFileName' : syntaxNameToXmlFileName,
'mimeTypeToXmlFileName' : mimeTypeToXmlFileName,
'extensionToXmlFileName' : extensionToXmlFileName,
'firstLineToXmlFileName' : firstLineToXmlFileName,
}
with open('syntax_db.json', 'w', encoding='utf-8') as syntaxDbFile:
json.dump(result, syntaxDbFile, sort_keys=True, indent=4)
print('Done. Do not forget to commit the changes')
if __name__ == '__main__':
main()
|
coala/coala | tests/parsing/CliParsingTest.py | Python | agpl-3.0 | 3,059 | 0 | import argparse
import unittest
from coalib.parsing.CliParsing import parse_cli, check_conflicts
class CliParserTest(unittest.TestCase):
def setUp(self):
self.test_arg_parser = argparse.ArgumentParser()
self.test_arg_parser.add_argument('-t', nargs='+', dest='test')
self.test_arg_parser.add_argument('-S',
'--settings',
nargs='+',
dest='settings')
@staticmethod
def dict_from_sections(parsed_sections):
parsed_dict = {}
for section_name, section in parsed_sections.items():
parsed_dict[section_name] = (
set([(key,
str(v | alue)) for key, value in section.contents.items()]))
return parsed_dict
def test_parse_cli(self):
# regular parse
parsed_sections = parse_cli(
['-t', 'ignored1', 'ignored2',
'-t', 'taken',
'-S', 'section1.key1,section2.key2=value1,value2',
'section2.key2=only_this_value',
'SECTION2.key2a=k2a',
'invalid.=shouldnt_be_shown',
'.=not_eith | er',
'.key=only_in_cli',
'default_key1,default_key2=single_value',
'default_key3=first_value,second_value'],
arg_parser=self.test_arg_parser)
expected_dict = {
'cli': {
('test', 'taken'),
('key', 'only_in_cli'),
('default_key1', 'single_value'),
('default_key2', 'single_value'),
('default_key3', 'first_value,second_value')},
'section1': {
('key1', 'value1,value2')},
'section2': {
('key2', 'only_this_value'),
('key2a', 'k2a')}}
self.assertEqual(parsed_sections['cli'].name, 'cli')
self.assertEqual(self.dict_from_sections(parsed_sections),
expected_dict)
def test_check_conflicts(self):
sections = parse_cli(arg_list=['--save', '--no-config'])
with self.assertRaisesRegex(SystemExit, '2') as cm:
check_conflicts(sections)
self.assertEqual(cm.exception.code, 2)
sections = parse_cli(arg_list=['--no-config', '-S', 'val=42'])
self.assertTrue(check_conflicts(sections))
sections = parse_cli(arg_list=['--relpath'])
with self.assertRaisesRegex(SystemExit, '2') as cm:
check_conflicts(sections)
self.assertEqual(cm.exception.code, 2)
sections = parse_cli(arg_list=['--output', 'iraiseValueError'])
with self.assertRaisesRegex(SystemExit, '2') as cm:
check_conflicts(sections)
self.assertEqual(cm.exception.code, 2)
sections = parse_cli(arg_list=['--no-config', '--config', '.coafile'])
with self.assertRaisesRegex(SystemExit, '2') as cm:
check_conflicts(sections)
self.assertEqual(cm.exception.code, 2)
|
FreeJournal/freejournal | controllers/controller.py | Python | mit | 15,637 | 0.003645 | from models.keyword import Keyword
from models.document import Document
from models.collection import Collection
from bitmessage.bitmessage import Bitmessage
from models.fj_message import FJMessage
from models.signature import Signature
from cache.cache import Cache
from config import DOCUMENT_DIRECTORY_PATH, MAIN_CHANNEL_ADDRESS
from freenet.FreenetConnection import FreenetConnection
from jsonschema import *
from models.json_schemas import *
from sqlalchemy.exc import IntegrityError
from random import randint
from async import run_as_thread
import json
import time
import base64
import datetime
import hashlib
import sys
import os
class Controller:
def __init__(self):
self.connection = Bitmessage()
self.cache = Cache()
self.download_threads = set()
def _check_signature(self, fj_message):
"""
Checks that the signature is the correct sha256 hash of the address's public keys and payload
:param fj_message: the message containing the collection and signature
:return: True if the signatures match, False otherwise
"""
h = hashlib.sha256(fj_message["pubkey"] + fj_message['payload']).hexdigest()
if h == fj_message["signature"]:
print "Signature Verified"
return True
else:
print "Signature Not Verified"
return False
def _save_document(self, data, file_name, testing_mode=False):
"""
Private helper function for writing file data to disk.
Creates the file to the directory specified in config.py.
:param data: the file data
:param file_name: the name of the file
:return: a boolean indicating success
"""
try:
if testing_mode:
file_path = file_name
else:
file_path = os.path.expanduser(DOCUMENT_DIRECTORY_PATH) + file_name
open(file_path, 'w').write(data)
return True
except Exception as e:
return False
def _get_document(self, hash):
"""
Private helper function for getting document data
from freenet.
:param hash: the Content Hash Key for a document
:return: the file data if successful, None otherwise
"""
data = None
#Try obtaining a freenet connection
try:
freenet_connection = FreenetConnection()
except Exception as e:
print("Couldn't connect to freenet")
return data
try:
data = freenet_connection.get(hash)
except Exception as e:
pass
return data
def _hash_document_filenames(self, documents, collection):
"""
Private helper function for hashing a collection of
documents file names so that file name conflicts will be
rare.
:param documents: a list of document objects
"""
for document in documents:
#Create a new file name out of a hash to deal with possible naming conflicts
file_name = document.filename
if not document.filename:
file_name = document.title + str(randint(0, 100))
name, extension = os.path.splitext(file_name)
hash_name = document.hash
new_file_name = hash_name + extension
#Save the new file name to the cache so it can be viewed later
document.filename = new_file_name
self.cache.insert_new_document_in_collection(document, collection)
@run_as_thread
def _download_documents(self, collection_title, documents):
"""
A function that downloads documents from a collection in a new thread.
:param collection_title: the title of the collection
:param documents: the list of document objects to download
"""
print("Downloading documents for " + collection_title)
print("Number of Documents to download: " + str(len(documents)))
doc_counter = 0
for document in documents:
# Store and validate that the document has a file name
file_name = document.filename
if not file_name:
file_name = collection_title + str(doc_counter) + document.title
doc_counter += 1
# Try obtaining the file data from freenet
data = self._get_document(document.hash)
if not data:
print("Couldn't download " + file_name + " from freenet")
continue
# If the file data was successfully downloaded, save the data to disk
success = self._save_document(data, file_name)
if success:
print("Successfully downloaded " + file_name + " from freenet")
else:
print("Couldn't save document data to disk (check that the document"
+ " directory path exists and appropriate permissions are set")
def _build_docs_keywords(self, payload, collection):
"""
Builds a list of Keyword objects and a list of Document objects from the received json.
:param payload: The payload of the FJ Message including the documents and keywords
:return: Two lists representing the documents and keywords of the FJ Message
"""
for key in payload["keywords"]:
db_key = self.cache.get_keyword_by_id(key["id"])
if db_key is not None:
collection.keywords.append(db_key)
else:
collection.keywords.append(Keyword(name=key["name"]))
|
for doc in payload["documents"]:
db_doc = self.cache.get_document_by_hash(doc["hash"])
if db_doc is not None:
collection.documents.append(db_doc)
else:
collection.documents.append(Document(collection_address=doc["address"], description=doc["description"],
| hash=doc["hash"], title=doc["title"], filename=doc["filename"], accesses=doc["accesses"]))
def _cache_collection(self, payload, message):
"""
Checks to see if this collection is already in the cache. If it is we update the collection with the new data.
Otherwise a new collection is made and cached.
:param message: the Bitmessage message containing an FJ_message
:param payload: the contents of the FJ_message
"""
# Grabbing the text representations of the documents and keywords and rebuilding them
#docs, keywords = self._build_docs_keywords(payload)
cached_collection = self.cache.get_collection_with_address(payload["address"])
if cached_collection is None:
collection_model = Collection(
title=payload["title"],
description=payload["description"],
address=payload["address"],
btc=payload["btc"],
creation_date=datetime.datetime.strptime(payload["creation_date"], "%A, %d. %B %Y %I:%M%p"),
oldest_date=datetime.datetime.strptime(payload["oldest_date"], "%A, %d. %B %Y %I:%M%p"),
latest_broadcast_date=datetime.datetime.strptime(payload["latest_broadcast_date"], "%A, %d. %B %Y %I:%M%p"),
votes=payload['votes'],
votes_last_checked=datetime.datetime.strptime(payload["votes_last_checked"], "%A, %d. %B %Y %I:%M%p"),
)
self._build_docs_keywords(payload, collection_model)
signature = Signature(pubkey=message["pubkey"], signature=message["signature"], address=payload["address"])
try:
self.cache.insert_new_collection(collection_model)
self.cache.insert_new_collection(signature)
self._hash_document_filenames(collection_model.documents, collection_model)
self.download_threads.add(self._download_documents(collection_model.title, collection_model.documents))
print "Cached New Collection"
return True
except IntegrityError as m:
print m.message
r |
joyhope/open62541 | tools/pyUANamespace/ua_namespace.py | Python | lgpl-3.0 | 30,372 | 0.013499 | #!/usr/bin/env/python
# -*- coding: utf-8 -*-
###
### Author: Chris Iatrou (ichrispa@core-vector.net)
### Version: rev 13
###
### This program was created for educational purposes and has been
### contributed to the open62541 project by the author. All licensing
### terms for this source is inherited by the terms and conditions
### specified for by the open62541 project (see the projects readme
### file for more information on the LGPL terms and restrictions).
###
### This program is not meant to be used in a production environment. The
### author is not liable for any complications arising due to the use of
### this program.
###
from __future__ import print_function
import sys
from time import struct_time, strftime, strptime, mktime
from struct import pack as structpack
from logger import *;
from ua_builtin_types import *;
from ua_node_types import *;
from ua_constants import *;
from open62541_MacroHelper import open62541_MacroHelper
def getNextElementNode(xmlvalue):
if xmlvalue == None:
return None
xmlvalue = xmlvalue.nextSibling
while not xmlvalue == None and not xmlvalue.nodeType == xmlvalue.ELEMENT_NODE:
xmlvalue = xmlvalue.nextSibling
return xmlvalue
###
### Namespace Organizer
###
class opcua_namespace():
""" Class holding and managing a set of OPCUA nodes.
This class handles parsing XML description of namespaces, instantiating
nodes, linking references, graphing the namespace and compiling a binary
representation.
Note that nodes assigned to this class are not restricted to having a
single namespace ID. This class represents the entire physical address
space of the binary representation and all nodes that are to be included
in that segment of memory.
"""
nodes = []
nodeids = {}
aliases = {}
__linkLater__ = []
__binaryIndirectPointers__ = []
name = ""
knownNodeTypes = ""
def __init__(self, name):
self.nodes = []
self.knownNodeTypes = ['variable', 'object', 'method', 'referencetype', \
'objecttype', 'variabletype', 'methodtype', \
'datatype', 'referencetype', 'aliases']
self.name = name
self.nodeids = {}
self.aliases = {}
self.__binaryIndirectPointers__ = []
def linkLater(self, pointer):
""" Called by nodes or references who have parsed an XML reference to a
node represented by a string.
No return value
XML String representations of references have the form 'i=xy' or
'ns=1;s="This unique Node"'. Since during the parsing of this attribute
only a subset of nodes are known/parsed, this reference string cannot be
linked when encountered.
References register themselves with the namespace to have their target
attribute (string) parsed by linkOpenPointers() when all nodes are
created, so that target can be dereferenced an point to an actual node.
"""
self.__linkLater__.append(pointer)
def getUnlinkedPointers(self):
""" Return the list of references registered for linking during the next call
of linkOpenPointers()
"""
return self.__linkLater__
def unlinkedItemCount(self):
""" Returns the number of unlinked references that will be processed during
the next call of linkOpenPointers()
"""
return len(self.__linkLater__)
def buildAliasList(self, xmlelement):
""" Parses the <Alias> XML Element present in must XML NodeSet definitions.
No return value
Contents the Alias element are stored in a dictionary for further
dereferencing during pointer linkage (see linkOpenPointer()).
"""
if not xmlelement.tagName == "Aliases":
log(self, "XMLElement passed is not an Aliaslist", LOG_LEVEL_ERROR)
return
for al in xmlelement.childNodes:
if al.nodeType == al.ELEMENT_NODE:
if al.hasAttribute("Alias"):
aliasst = al.getAttribute("Alias")
if sys.version_info[0] < 3:
aliasnd = unicode(al.firstChild.data)
else:
aliasnd = al.firstChild.data
if no | t aliasst in self.aliases:
self.aliases[aliasst] = aliasnd
log(self, "Added new alias \"" + str(aliasst) + "\" == \"" + str(aliasnd) + "\"")
else | :
if self.aliases[aliasst] != aliasnd:
log(self, "Alias definitions for " + aliasst + " differ. Have " + self.aliases[aliasst] + " but XML defines " + aliasnd + ". Keeping current definition.", LOG_LEVEL_ERROR)
def getNodeByBrowseName(self, idstring):
""" Returns the first node in the nodelist whose browseName matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.browseName()):
matches.append(n)
if len(matches) > 1:
log(self, "Found multiple nodes with same ID!?", LOG_LEVEL_ERROR)
if len(matches) == 0:
return None
else:
return matches[0]
def getNodeByIDString(self, idstring):
""" Returns the first node in the nodelist whose id string representation
matches idstring.
"""
matches = []
for n in self.nodes:
if idstring==str(n.id()):
matches.append(n)
if len(matches) > 1:
log(self, "Found multiple nodes with same ID!?", LOG_LEVEL_ERROR)
if len(matches) == 0:
return None
else:
return matches[0]
def createNode(self, ndtype, xmlelement):
""" createNode is instantiates a node described by xmlelement, its type being
defined by the string ndtype.
No return value
If the xmlelement is an <Alias>, the contents will be parsed and stored
for later dereferencing during pointer linking (see linkOpenPointers).
Recognized types are:
* UAVariable
* UAObject
* UAMethod
* UAView
* UAVariableType
* UAObjectType
* UAMethodType
* UAReferenceType
* UADataType
For every recognized type, an appropriate node class is added to the node
list of the namespace. The NodeId of the given node is created and parsing
of the node attributes and elements is delegated to the parseXML() and
parseXMLSubType() functions of the instantiated class.
If the NodeID attribute is non-unique in the node list, the creation is
deferred and an error is logged.
"""
if not isinstance(xmlelement, dom.Element):
log(self, "Error: Can not create node from invalid XMLElement", LOG_LEVEL_ERROR)
return
# An ID is manditory for everything but aliases!
id = None
for idname in ['NodeId', 'NodeID', 'nodeid']:
if xmlelement.hasAttribute(idname):
id = xmlelement.getAttribute(idname)
if ndtype == 'aliases':
self.buildAliasList(xmlelement)
return
elif id == None:
log(self, "Error: XMLElement has no id, node will not be created!", LOG_LEVEL_INFO)
return
else:
id = opcua_node_id_t(id)
if str(id) in self.nodeids:
# Normal behavior: Do not allow duplicates, first one wins
#log(self, "XMLElement with duplicate ID " + str(id) + " found, node will not be created!", LOG_LEVEL_ERROR)
#return
# Open62541 behavior for header generation: Replace the duplicate with the new node
log(self, "XMLElement with duplicate ID " + str(id) + " found, node will be replaced!", LOG_LEVEL_INFO)
nd = self.getNodeByIDString(str(id))
self.nodes.remove(nd)
self.nodeids.pop(str(nd.id()))
node = None
if (ndtype == 'variable'):
node = opcua_node_variable_t(id, self)
elif (ndtype == 'object'):
node = opcua_node_object_t(id, self)
elif (ndtype == 'method'):
node = opcua_node_method_t(id, self)
elif (ndtype == 'objecttype'):
node = opcua_node_objectType_t(id, self)
elif (ndtype == 'variabletype'):
node = opcua_node_variableType_t(id, self)
elif (ndtype == 'methodtype'):
node = opcua_node_methodType_t(id, self)
elif (ndtype == 'datatype'):
node = opcua_node_dataType_t(id, self)
elif (ndtype == 'referencetype'):
node = opcua_node_referenceType_t( |
kkzzzzzz/Deep-Learing | Sensor/code.py | Python | mit | 991 | 0.00904 | #coding:utf-8
# 感知器 y = f(Wn * x + b)
# 代码实现的是一个逻辑AND操作,输入最后一项一直为1,代表我们可以理解偏置项b的特征值输入一直为1
# | 这样就是 y = f(Wn+1*[x,1]), Wn+1就是b
# https://www.zybuluo.com/hanbingtao/note/433855
from numpy import array, dot, random
from random import choice
def fun_1_or_0(x): return 0 if x < 0 else 1
training_data = [(array([0, 0, 1]), 0), (array([0, 1, 1]), 0),
(array([1, 0, 1]), 0), (array([1, 1, 1]), 1)]
weights = random.random(3)
print("before traning, weight | s:",weights)
learning_rate = 0.2
num_iteratios = 100
for i in range(num_iteratios):
input, truth = choice(training_data)
result = dot(weights, input)
error = truth - fun_1_or_0(result)
weights += learning_rate * error * input
print("after traning, weights:",weights)
for x, _ in training_data:
result = dot(x, weights)
print("{}:{}->{}".format(x[:2], result, fun_1_or_0(result)))
|
Jay-Jay-D/LeanSTP | Algorithm.Python/AddUniverseSelectionModelAlgorithm.py | Python | apache-2.0 | 2,903 | 0.013444 | # QUANTCONNECT.COM - Democratizing Finance, Empowering Individuals.
# Lean Algorithmic Trading Engine v2.0. Copyright 2014 QuantConnect Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from clr import AddReference
AddReference("System")
AddReference("QuantConnect.Algorithm")
AddReference("QuantConnect.Algorithm.Framework")
AddReference("QuantConnect.Common")
from System import *
from QuantConnect import *
from QuantConnect.Orders import *
from QuantConnect.Algorithm import *
from QuantConnect.Algorithm.Framework import *
from QuantConnect.Algorithm.Framework.Alphas import *
from QuantConnect.Algorithm.Framework.Execution import *
from QuantConnect.Algorithm.Framework.Portfolio import *
from QuantConnect.Algorithm.Framework.Selection import *
from datetime import timedelta
### <summary>
### Test algorithm using 'QCAlgorithm.AddUniverseSelection(IUniverseSelectionModel)'
### </summary>
class AddUniverseSelectionModelAlgorithm(QCAlgorithm):
def Initialize(self):
''' Initialise the data and resolution required, as well as the cash and start-end dates for your algorithm. All algorithms must initialized.'''
self.SetStartDate(2013,10,8) #Set Start Date
self.SetEndDate(2013,10,11) #Set End Date
self.SetCash(100000) #Set Strategy Cash
self.UniverseSettings.Resolution = Resolution.Daily;
# set algorithm framework models
self.SetAlpha(ConstantAlphaModel(InsightType.Price, InsightDirection.Up, timedelta(minutes = 20), 0.025, None))
self.SetPortfolioConstruction(EqualWeightingPortfolioConstructionModel())
self.SetExecution(ImmediateExecutionModel())
self.SetUniverseSelection(ManualUniverseSelectionModel([ Symbol.Create("SPY", SecurityType.Equity, Market.USA) ]))
self.AddUniverseSelection(ManualUniverseSelectionModel([ Symbol. | Create("AAPL", SecurityType.Equity, Market.USA) ]))
self.AddUniverseSelection(ManualUniverseSelectionModel(
Symbol.Create("SPY", SecurityType.Equity, Market.USA), # duplicate will be ignored
Symbol.Create("FB", SecurityType.Equity, Market.USA)))
def OnEndOfAlgorithm(self):
if self.UniverseManager.Count != 3:
raise ValueError("Unexpected universe cou | nt")
if self.UniverseManager.ActiveSecurities.Count != 3:
raise ValueError("Unexpected active securities") |
brutalic/pynet_brutal | class6/library/eos_vlan.py | Python | apache-2.0 | 18,361 | 0.001362 | #!/usr/bin/python
#
# Copyright (c) 2015, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
DOCUMENTATION = """
---
module: eos_vlan
short_description: Manage VLAN resources in EOS
description:
- The eos_vlan module manages VLAN configurations on Arista EOS nodes.
version_added: 1.0.0
category: Bridging
author: Arista EOS+
requirements:
- Arista EOS 4.13.7M or later with command API enabled
- Python Client for eAPI 0.3.0 or later
notes:
- All configuration is idempotent unless otherwise specified
- Supports eos metaparameters for using the eAPI transport
- Supports stateful resource configuration.
options:
vlanid:
description:
- The unique VLAN identifier associated with this resource. The value
for this identiifer must be in the range of 1 to 4094.
required: true
default: null
choices: []
aliases: []
version_added: 1.0.0
name:
description:
- An ASCII string identifer for this VLAN. The default value for the
VLAN name is VLANxxxx where xxxx is the four digit VLAN ID.
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
enable:
description:
- Configures the administrative state for the VLAN. If enable is True
then the VLAN is administratively enabled. If enable is False then
the VLAN is administratively disabled.
default: true
required: false
choices: ['True', 'False']
aliases: []
version_added: 1.0.0
trunk_groups:
description:
- Configures the list of trunk groups associated with the VLAN in the
node configuration. The list of trunk groups is a comma separated
list. The default value for trunk_groups is an empty list.
- "Note: The list of comma delimited values must not include spaces."
required: false
default: null
choices: []
aliases: []
version_added: 1.0.0
"""
EXAMPLES = """
- name: ensures vlan 100 is configured
eos_vlan: vlanid=100 state=present
- name: ensures vlan 200 is not configured
eos_vlan: vlanid=200 state=absent
- name: configures the vlan name
eos_vlan: vlanid=1 name=TEST_VLAN_1
- name: configure trunk groups for vlan 10
eos_vlan: vlanid=10 trunk_groups=tg1,tg2,tg3
"""
#<<EOS_COMMON_MODULE_START>>
import syslog
import collections
from ansible.module_utils.basic import *
try:
import pyeapi
PYEAPI_AVAILABLE = True
except ImportError:
PYEAPI_AVAILABLE = False
DEFAULT_SYSLOG_PRIORITY = syslog.LOG_NOTICE
DEFAULT_CONNECTION = 'localhost'
TRANSPORTS = ['socket', 'http', 'https', 'http_local']
class EosConnection(object):
__attributes__ = ['username', 'password', 'host', 'transport', 'port']
def __init__(self, **kwargs):
self.connection = kwargs['connection']
self.transport = kwargs.get('transport')
self.username = kwargs.get('username')
self.password = kwargs.get('password')
self.host = kwargs.get('host')
self.port = kwargs.get('port')
self.config = kwargs.get('config')
def connect(self):
if self.config is not None:
pyeapi.load_config(self.config)
config = dict()
if self.connection is not None:
config = pyeapi.config_for(self.connection)
if not config:
msg = 'Connection name "{}" not found'.format(self.connection)
for k | ey in self.__attributes__:
if getattr(self, key) is not None:
config[key] = getattr(self, key)
if 'transport' not in config:
raise ValueError('Connection must define a transport')
connection = pyeapi.client.make_connection(**config)
node = pyeapi.client.Node(connection, **config)
try:
node.enable('show version')
except (pyeapi.ea | pilib.ConnectionError, pyeapi.eapilib.CommandError):
raise ValueError('unable to connect to {}'.format(node))
return node
class EosAnsibleModule(AnsibleModule):
meta_args = {
'config': dict(),
'username': dict(),
'password': dict(),
'host': dict(),
'connection': dict(default=DEFAULT_CONNECTION),
'transport': dict(choices=TRANSPORTS),
'port': dict(),
'debug': dict(type='bool', default='false'),
'logging': dict(type='bool', default='true')
}
stateful_args = {
'state': dict(default='present', choices=['present', 'absent']),
}
def __init__(self, stateful=True, autorefresh=False, *args, **kwargs):
kwargs['argument_spec'].update(self.meta_args)
self._stateful = stateful
if stateful:
kwargs['argument_spec'].update(self.stateful_args)
## Ok, so in Ansible 2.0,
## AnsibleModule.__init__() sets self.params and then
## calls self.log()
## (through self._log_invocation())
##
## However, self.log() (overridden in EosAnsibleModule)
## references self._logging
## and self._logging (defined in EosAnsibleModule)
## references self.params.
##
## So ... I'm defining self._logging without "or self.params['logging']"
## *before* AnsibleModule.__init__() to avoid a "ref before def".
##
## I verified that this works with Ansible 1.9.4 and 2.0.0.2.
## The only caveat is that the first log message in
## AnsibleModule.__init__() won't be subject to the value of
## self.params['logging'].
self._logging = kwargs.get('logging')
super(EosAnsibleModule, self).__init__(*args, **kwargs)
self.result = dict(changed=False, changes=dict())
self._debug = kwargs.get('debug') or self.boolean(self.params['debug'])
self._logging = kwargs.get('logging') or self.params['logging']
self.log('DEBUG flag is %s' % self._debug)
self.debug('pyeapi_version', self.check_pyeapi())
self.debug('stateful', self._stateful)
self.debug('params', self.params)
self._attributes = self.map_argument_spec()
self.validate()
self._autorefresh = autorefresh
self._node = EosConnection(**self.params)
self._node.connect()
self._node = self.connect()
self._instance = None
self.desired_state = self.params['state'] if self._stateful else None
self.exit_after_flush = kwargs.get('exit_after_flush')
@property
def instance(self):
if self._instance:
return self._instance
func = self.func('instance')
if not func:
self.fail('Module does not support "instance"')
try:
self._instance = func(self)
|
yuyuyu101/VirtualBox-NetBSD | src/VBox/GuestHost/OpenGL/state_tracker/state_defs.py | Python | gpl-2.0 | 1,397 | 0.004295 | # Copyright (c) 2001, Stanford University
# All rights reserved.
#
# See the file LICENSE.txt for information on redistributing this software.
import sys
import apiutil
apiutil.CopyrightDef()
print """DESCRIPTION ""
EXPORTS
"""
keys = apiutil.GetDispatchedFunctions(sys.argv[1]+"/APIspec.txt")
for func_name in apiutil.AllSpecials( 'state' ):
print "crState%s" % func_name
for func_name in apiutil.AllSpecials( 'state_feedback' ):
print "crStateFeedback%s" % func_name
for func_name in apiutil.AllSpecials( 'state_select' ):
print "crStateSelect%s" % func_name
print """crStateInit
crStateReadPixels
crStateGetChromiumParametervCR
crStateCreateContext
crStateCreateContextEx
crStateDestroyContext
crStateDiffContext
crStateSwitchContext
crStateMakeCurrent
crStateSetCurrent
crStateFlushFunc
crStateFlushArg
crStateDiffAPI
crStateSetCurrentPointers
crStateResetCurrentPointers
crStateCurrentRecover
crStateTransformUpdateTransform
crStateColorMaterialRecover
crStateError
crStateUpdateColorBits
crStateClientInit
crStateGetCurrent
crStateLimitsInit
crStateMergeExtensions
crStateRast | erPosUpdate
crStateText | ureCheckDirtyImages
crStateExtensionsInit
crStateSetExtensionString
crStateUseServerArrays
crStateUseServerArrayElements
crStateComputeVersion
crStateTransformXformPointMatrixf
crStateTransformXformPointMatrixd
crStateInitMatrixStack
crStateLoadMatrix
__currentBits
"""
|
gridcf/gct | gridftp/net_manager/test/port_plus_one.py | Python | apache-2.0 | 297 | 0 | def pre_listen(task_id, transport, attr_array):
new_attrs = []
for (scope, name, value) in attr_array:
if scope == transport and name == 'port':
value = str(int(value) + 1)
new_attr = (scope, | name, value)
new_attrs. | append(new_attr)
return new_attrs
|
BackupTheBerlios/cuon-svn | cuon_server/WEB/html/fromGladeToHtml.py | Python | gpl-3.0 | 9,485 | 0.017294 | # -*- coding: utf-8 -*-
##Copyright (C) [2010] [Jürgen Hamel, D-32584 Löhne]
##This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as
##published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version.
##This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied
##warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
##for more details.
##You should have received a copy of the GNU General Public License along with this program; if not, write to the
##Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import sys
import xml.dom.minidom as dom
#from xml.etree.ElementTree import ElementTree
class createHtml():
def __init__(self, gladeFile):
self.Doc = dom.parse(gladeFile +'.glade')
self.HTML = 'XUL'
self.s = ''
self.sEnd = []
self.sMenuEnd = None
self.startConvert(gladeFile)
def startConvert(self, gladeFile):
if self.HTML == 'XUL':
self.s= '<?xml version="1.0"?> \n<?xml-stylesheet href="chrome://global/skin/" type="text/css"?> \n<!-- Extremely recommended to keep this css include!! --> \n'
else:
self.s = '<HTML><BODY>\n'
#print self.Doc.toxml()
tagRoot = self.Doc.documentElement
print 'Root = ', tagRoot
self.setTags(tagRoot)
#
#
#
#
#
self.sEnd.reverse()
if self.sMenuEnd:
self.s += self.sMenuEnd
self.sMenuEnd = None
for s in self.sEnd:
self.s += s
if self.HTML == 'XUL':
self.s += ''
else:
self.s += '\n </BODY></HTML>'
print self.s
f = open(gladeFile+'.xul', 'w')
f.write(self.s)
f.close()
def setTags(self, tagRoot):
tags = tagRoot.childNodes
#print tags
for tag in tags:
try:
#print '1-- ', tag, tag.nodeType
if tag.nodeType == 1:
val = tag.attributes['class'].value
print 'val = ', val
if val not in ['GtkMenuBar', 'GtkMenuItem', 'GtkMenu']:
if self.sMenuEnd:
self.s += self.sMenuEnd
self.sMenuEnd = None
if val == 'GtkWindow':
self.s += self.getGtkWindow(tag)
elif val == 'GtkVBox':
self.s += self.getGtkVBox(tag)
elif val == 'GtkMenuBar':
self.s += self.getGtkMenuBar(tag)
elif val == 'GtkMenuItem':
print 'val = ', val
self.s += self.getGtkMenuItem(tag)
except Exception, params:
#print Exception, params
pass
self.setTags(tag)
def getGtkWindow(self, tag):
# set html properties from glade
id = tag.attributes['id'].value
atts = tag.childNodes
#at1 = tag.getAttributeNode('width_request')
#print atts
for att in atts:
#print 'att = ', att
childs = att.childNodes
try:
if att.getAttribute('name'):
#print '1--- ', att.getAttribute('name')
for child in childs:
print 'child = ', child.nodeValue
except:
pass
if self.HTML == 'XUL':
self.s +='<window id="' + id + '" title="cuon1" xmlns="http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul">\n '
self.sEnd.append( '\n</window>\n')
def getGtkVBox(self, tag):
# set html properties from glade
id = tag.attributes['id'].value
atts = tag.childNodes
#at1 = tag.getAttributeNode('width_request')
#print atts
for att in atts:
#print 'att = ', att
childs = att.childNodes
try:
if att.getAttribute('name'):
#print '1--- ', att.getAttribute('name')
for child in childs:
print 'child = ', child.nodeValue
except:
pass
if self.HTML == 'XUL':
self.s +='<vbox >\n '
self.sEnd.append( '\n</vbox>\n')
def getGtkMenuBar(self, tag):
# set html properties from glade
id = tag.attributes['id'].value
atts = tag.childNodes
#at1 = tag.getAttributeNode('width_request')
#print atts
for att in atts:
#print 'att = ', att
childs = att.childNodes
try:
if att.getAttribute('name'):
#print '1--- ', att.getAttribute('name')
for child in childs:
print 'child = ', child.nodeValue
except:
pass
if self.HTML == 'XUL':
self.s +='<menubar id="' + id + '">\n '
self.sEnd.append( '\n</menubar>\n')
def getGtkMenuItem(self, tag):
# set html properties from glade
label = None
id = tag.attributes['id'].value
#print 'find Menuitem 1'
atts = tag.childNodes
#print 'find Menuitem 2'
mainmenu = False
#print 'find Menuitem'
#at1 = tag.getAttributeNode('width_request')
#print atts
for att in atts:
print 'atts nodename = ', att.nodeName
childs = att.childNodes
try:
if att.nodeName == 'property':
if att.getAttribute('name'):
print '1--- ', att.getAttribute('name')
for child in childs:
print 'child Menuitem = ', child.nodeValue
print 'child Menuitem2 = ', child.nodeName
try:
if att.getAttribute('name') == 'label':
label = child.nodeValue
except:
pass
elif att.nodeName == 'signal':
if att.getAttribute('name'):
print '1--- ', att.getAttribute('name')
for child in childs:
print 'child signal 1= ', child.nodeValue
print 'child Signal 2 = ', child.nodeName
t | ry:
signal = child.nodeValue
except:
pass
elif att.getAttribute('handler'):
print '1--- ', att.getAttribute('handler')
| for child in childs:
print 'child handler 1= ', child.nodeValue
print 'child handler 2 = ', child.nodeName
try:
handler = child.nodeValue
except:
pass
except:
pass
|
gEndelf/cmsplugin-testimonials | cmsplugin_testimonials/admin.py | Python | mit | 88 | 0 | f | rom django.contrib import admin
import models
admin.site.register(models.Testimonia | l)
|
drayside/kodkod | libs/.waf-1.6.6-c57dd0fa119e23d36c23d598487c6880/waflib/Tools/c_preproc.py | Python | mit | 16,576 | 0.083072 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/svn/docs/wafbook/single.html#_obtaining_the_waf_file
import sys
if sys.hexversion < 0x020400f0: from sets import Set as set
import re,sys,os,string,traceback
from waflib import Logs,Build,Utils,Errors
from waflib.Logs import debug,error
class PreprocError(Errors.WafError):
pass
POPFILE='-'
recursion_limit=150
go_absolute=False
standard_includes=['/usr/include']
if Utils.is_win32:
standard_includes=[]
use_trigraphs=0
strict_quotes=0
g_optrans={'not':'!','and':'&&','bitand':'&','and_eq':'&=','or':'||','bitor':'|','or_eq':'|=','xor':'^','xor_eq':'^=','compl':'~',}
re_lines=re.compile('^[ \t]*(#|%:)[ \t]*(ifdef|ifndef|if|else|elif|endif|include|import|define|undef|pragma)[ \t]*(.*)\r*$',re.IGNORECASE|re.MULTILINE)
re_mac=re.compile("^[a-zA-Z_]\w*")
re_fun=re.compile('^[a-zA-Z_][a-zA-Z0-9_]*[(]')
re_pragma_once=re.compile('^\s*once\s*',re.IGNORECASE)
re_nl=re.compile('\\\\\r*\n',re.MULTILINE)
re_cpp=re.compile(r"""(/\*[^*]*\*+([^/*][^*]*\*+)*/)|//[^\n]*|("(\\.|[^"\\])*"|'(\\.|[^'\\])*'|.[^/"'\\]*)""",re.MULTILINE)
trig_def=[('??'+a,b)for a,b in zip("=-/!'()<>",r'#~\|^[]{}')]
chr_esc={'0':0,'a':7,'b':8,'t':9,'n':10,'f':11,'v':12,'r':13,'\\':92,"'":39}
NUM='i'
OP='O'
IDENT='T'
STR='s'
CHAR='c'
tok_types=[NUM,STR,IDENT,OP]
exp_types=[r"""0[xX](?P<hex>[a-fA-F0-9]+)(?P<qual1>[uUlL]*)|L*?'(?P<char>(\\.|[^\\'])+)'|(?P<n1>\d+)[Ee](?P<exp0>[+-]*?\d+)(?P<float0>[fFlL]*)|(?P<n2>\d*\.\d+)([Ee](?P<exp1>[+-]*?\d+))?(?P<float1>[fFlL]*)|(?P<n4>\d+\.\d*)([Ee](?P<exp2>[+-]*?\d+))?(?P<float2>[fFlL]*)|(?P<oct>0*)(?P<n0>\d+)(?P<qual2>[uUlL]*)""",r'L?"([^"\\]|\\.)*"',r'[a-zA-Z_]\w*',r'%:%:|<<=|>>=|\.\.\.|<<|<%|<:|<=|>>|>=|\+\+|\+=|--|->|-=|\*=|/=|%:|%=|%>|==|&&|&=|\|\||\|=|\^=|:>|!=|##|[\(\)\{\}\[\]<>\?\|\^\*\+&=:!#;,%/\-\?\~\.]',]
re_clexer=re.compile('|'.join(["(?P<%s>%s)"%(name,part)for name,part in zip(tok_types,exp_types)]),re.M)
accepted='a'
ignored='i'
undefined='u'
skipped='s'
def repl(m):
s=m.group(1)
if s:
return' '
return m.group(3)or''
def filter_comments(filename):
code=Utils.readf(filename)
if use_trigraphs:
for(a,b)in trig_def:code=code.split(a).join(b)
code=re_nl.sub('',code)
code=re_cpp.sub(repl,code)
return[(m.group(2),m.group(3))for m in re.finditer(re_lines,code)]
prec={}
ops=['* / %','+ -','<< >>','< <= >= >','== !=','& | ^','&& ||',',']
for x in range(len(ops)):
syms=ops[x]
for u in syms.split():
prec[u]=x
def trimquotes(s):
if not s:return''
s=s.rstrip()
if s[0]=="'"and s[-1]=="'":return s[1:-1]
return s
def reduce_nums(val_1,val_2,val_op):
try:a=0+val_1
except TypeError:a=int(val_1)
try:b=0+val_2
except TypeError:b=int(val_2)
d=val_op
if d=='%':c=a%b
elif d=='+':c=a+b
elif d=='-':c=a-b
elif d=='*':c=a*b
elif d=='/':c=a/b
elif d=='^':c=a^b
elif d=='|':c=a|b
elif d=='||':c=int(a or b)
elif d=='&':c=a&b
elif d=='&&':c=int(a and b)
elif d=='==':c=int(a==b)
elif d=='!=':c=int(a!=b)
elif d=='<=':c=int(a<=b)
elif d=='<':c=int(a<b)
elif d=='>':c=int(a>b)
elif d=='>=':c=int(a>=b)
elif d=='^':c=int(a^b)
elif d=='<<':c=a<<b
elif d=='>>':c=a>>b
else:c=0
return c
def get_num(lst):
if not lst:raise PreprocError("empty list for get_num")
(p,v)=lst[0]
if p==OP:
if v=='(':
count_par=1
i=1
while i<len(lst):
(p,v)=lst[i]
if p==OP:
if v==')':
count_par-=1
if count_par==0:
break
elif v=='(':
count_par+=1
i+=1
else:
raise PreprocError("rparen expected %r"%lst)
(num,_)=get_term(lst[1:i])
return(num,lst[i+1:])
elif v=='+':
return get_num(lst[1:])
elif v=='-':
num,lst=get_num(lst[1:])
return(reduce_nums('-1',num,'*'),lst)
elif v=='!':
num,lst=get_num(lst[1:])
return(int(not int(num)),lst)
elif v=='~':
return(~int(num),lst)
else:
raise PreprocError("Invalid op token %r for get_num"%lst)
elif p==NUM:
return v,lst[1:]
elif p==IDENT:
return 0,lst[1:]
else:
raise PreprocError("Invalid token %r for get_num"%lst)
def get_term(lst):
if not lst:raise PreprocError("empty list for get_term")
num,lst=get_num(lst)
if not lst:
return(num,[])
(p,v)=lst[0]
if p==OP:
if v=='&&'and not num:
return(num,[])
elif v=='||'and num:
return(num,[])
elif v==',':
return get_term(lst[1:])
elif v=='?':
count_par=0
i=1
while i<len(lst):
(p,v)=lst[i]
if p==OP:
if v==')':
count_par-=1
elif v=='(':
count_par+=1
elif v==':':
if count_par==0:
break
i+=1
else:
raise PreprocError("rparen expected %r"%lst)
if int(num):
return get_term(lst[1:i])
| else:
return get_term(lst[i+1:])
else:
num2,lst=get_nu | m(lst[1:])
if not lst:
num2=reduce_nums(num,num2,v)
return get_term([(NUM,num2)]+lst)
p2,v2=lst[0]
if p2!=OP:
raise PreprocError("op expected %r"%lst)
if prec[v2]>=prec[v]:
num2=reduce_nums(num,num2,v)
return get_term([(NUM,num2)]+lst)
else:
num3,lst=get_num(lst[1:])
num3=reduce_nums(num2,num3,v2)
return get_term([(NUM,num),(p,v),(NUM,num3)]+lst)
raise PreprocError("cannot reduce %r"%lst)
def reduce_eval(lst):
num,lst=get_term(lst)
return(NUM,num)
def stringize(lst):
lst=[str(v2)for(p2,v2)in lst]
return"".join(lst)
def paste_tokens(t1,t2):
p1=None
if t1[0]==OP and t2[0]==OP:
p1=OP
elif t1[0]==IDENT and(t2[0]==IDENT or t2[0]==NUM):
p1=IDENT
elif t1[0]==NUM and t2[0]==NUM:
p1=NUM
if not p1:
raise PreprocError('tokens do not make a valid paste %r and %r'%(t1,t2))
return(p1,t1[1]+t2[1])
def reduce_tokens(lst,defs,ban=[]):
i=0
while i<len(lst):
(p,v)=lst[i]
if p==IDENT and v=="defined":
del lst[i]
if i<len(lst):
(p2,v2)=lst[i]
if p2==IDENT:
if v2 in defs:
lst[i]=(NUM,1)
else:
lst[i]=(NUM,0)
elif p2==OP and v2=='(':
del lst[i]
(p2,v2)=lst[i]
del lst[i]
if v2 in defs:
lst[i]=(NUM,1)
else:
lst[i]=(NUM,0)
else:
raise PreprocError("Invalid define expression %r"%lst)
elif p==IDENT and v in defs:
if isinstance(defs[v],str):
a,b=extract_macro(defs[v])
defs[v]=b
macro_def=defs[v]
to_add=macro_def[1]
if isinstance(macro_def[0],list):
del lst[i]
for x in range(len(to_add)):
lst.insert(i,to_add[x])
i+=1
else:
args=[]
del lst[i]
if i>=len(lst):
raise PreprocError("expected '(' after %r (got nothing)"%v)
(p2,v2)=lst[i]
if p2!=OP or v2!='(':
raise PreprocError("expected '(' after %r"%v)
del lst[i]
one_param=[]
count_paren=0
while i<len(lst):
p2,v2=lst[i]
del lst[i]
if p2==OP and count_paren==0:
if v2=='(':
one_param.append((p2,v2))
count_paren+=1
elif v2==')':
if one_param:args.append(one_param)
break
elif v2==',':
if not one_param:raise PreprocError("empty param in funcall %s"%p)
args.append(one_param)
one_param=[]
else:
one_param.append((p2,v2))
else:
one_param.append((p2,v2))
if v2=='(':count_paren+=1
elif v2==')':count_paren-=1
else:
raise PreprocError('malformed macro')
accu=[]
arg_table=macro_def[0]
j=0
while j<len(to_add):
(p2,v2)=to_add[j]
if p2==OP and v2=='#':
if j+1<len(to_add)and to_add[j+1][0]==IDENT and to_add[j+1][1]in arg_table:
toks=args[arg_table[to_add[j+1][1]]]
accu.append((STR,stringize(toks)))
j+=1
else:
accu.append((p2,v2))
elif p2==OP and v2=='##':
if accu and j+1<len(to_add):
t1=accu[-1]
if to_add[j+1][0]==IDENT and to_add[j+1][1]in arg_table:
toks=args[arg_table[to_add[j+1][1]]]
if toks:
accu[-1]=paste_tokens(t1,toks[0])
accu.extend(toks[1:])
else:
accu.append((p2,v2))
accu.extend(toks)
elif to_add[j+1][0]==IDENT and to_add[j+1][1]=='__VA_ARGS__':
va_toks=[]
st=len(macro_def[0])
pt=len(args)
for x in args[pt-st+1:]:
va_toks.extend(x)
va_toks.append((OP,','))
if va_toks:va_toks.pop()
if len(accu)>1:
(p3,v3)=accu[-1]
(p4,v4)=accu[- |
jonathanxqs/lintcode | 171.py | Python | mit | 1,786 | 0.014558 | import copy
class Solution:
# @param strs: A list of strings
# @return: A list of strings
def anagrams(self, strs):
# write your code here
str1=copy.deepcopy(strs)
def hashLize(s):
dicts1= dict()
for i in range(26):
dicts1[chr(i+ord("a"))]=0
for j in s:
if j in dicts1.keys():
dicts1[j]+=1
return dicts1
def sortLize(s):
s1=list(s)
s1.sort()
return "".join(s1)
check_dict=dict()
for i in range(len(strs)):
str_s1=sortLize(strs[i])
if str_s1 in check_dict.keys():
check_dict[str_s1].append(strs[i])
else:
check_dict[str_s1]=[]
check_dict[str_s1].append(strs[i])
str_rt=[]
for i in check_dict.keys():
if (len(check_dict[i]) > 1):
str_rt.extend(check_dict[i])
return str_rt
#Total Runtime: 835 ms
# for i in range(len(strs)):
# str1[i]=hashLize(strs[i])
# str_rt=[]
# flag = [0 for i in range(len(strs))]
# for i in range(len(strs)):
# if flag[i]:
# continue
# for j in range(i+1 | ,len(strs)):
# if i==j:
# continue
# if flag[j]:
# continue
# if str1[i]==str1[j]:
# if flag[i]==0:
# str_rt.append(strs[i])
# flag[i] = 1
# flag[j] = 1
| # str_rt.append(strs[j])
|
karmix/blivet | blivet/partitioning.py | Python | gpl-2.0 | 81,110 | 0.001763 | # partitioning.py
# Disk partitioning functions.
#
# Copyright (C) 2009, 2010, 2011, 2012, 2013 Red Hat, Inc.
#
# This copyrighted material is made available to anyone wishing to use,
# modify, copy, or redistribute it subject to the terms and conditions of
# the GNU General Public License v.2, or (at your option) any later version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY expressed or implied, including the implied warranties of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
# Public License for more details. You should have received a copy of the
# GNU General Public License along with this program; if not, write to the
# Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the
# source code or documentation are not subject to the GNU General Public
# License and may only be used or replicated with the express permission of
# Red Hat, Inc.
#
# Red Hat Author(s): Dave Lehman <dlehman@redhat.com>
#
from operator import gt, lt
from decimal import Decimal
import functools
import gi
gi.require_version("BlockDev", "1.0")
from gi.repository import BlockDev as blockdev
import parted
from .errors import DeviceError, PartitioningError
from .flags import flags
from .devices import Device, PartitionDevice, LUKSDevice, devicePathToName
from .size import Size
from .i18n import _
from .util import stringize, unicodeize, compare
import logging
log = logging.getLogger("blivet")
def partitionCompare(part1, part2):
""" More specifically defined partitions come first.
< 1 => x < y
0 => x == y
> 1 => x > y
:param part1: the first partition
:type part1: :class:`devices.PartitionDevice`
:param part2: the other partition
:type part2: :class:`devices.PartitionDevice`
:return: see above
:rtype: int
"""
ret = 0
# start sector overrides all other sorting factors
part1_start = part1.req_start_sector
part2_start = part2.req_start_sector
if part1_start is not None and part2_start is None:
return -1
elif part1_start is None and part2_start is not None:
return 1
elif part1_start is not None and part2_start is not None:
return compare(part1_start, part2_start)
if part1.req_base_weight:
ret -= part1.req_base_weight
if part2.req_base_weight:
ret += part2.req_base_weight
# more specific disk specs to the front of the list
# req_disks being empty is equivalent to it being an infinitely long list
if part1.req_disks and not part2.req_disks:
ret -= 500
elif not part1.req_disks and part2.req_disks:
ret += 500
else:
ret += compare(len(part1.req_disks), len(part2.req_disks)) * 500
# primary-only to the front of the list
ret -= compare(part1.req_primary, part2.req_primary) * 200
# fixed size requests to the front
ret += compare(part1.req_grow, part2.req_grow) * 100
# larger requests go to the front of the list
ret -= compare(part1.req_base_size, part2.req_base_size) * 50
# potentially larger growable requests go to the front
if part1.req_grow and part2.req_grow:
if not part1.req_max_size and part2.req_max_size:
ret -= 25
elif part1.req_max_size and not part2.req_max_size:
ret += 25
else:
ret -= compare(part1.req_max_size, part2.req_max_size) * 25
# give a little bump based on mountpoint
if hasattr(part1.format, "mountpoint") and \
hasattr(part2.format, "mountpoint"):
ret += compare(part1.format.mountpoint, part2.format.mountpoint) * 10
if ret > 0:
ret = 1
elif ret < 0:
ret = -1
return ret
_partitionCompareKey = functools.cmp_to_key(partitionCompare)
def getNextPartitionType(disk, no_primary=None):
""" Return the type of partition to create next on a disk.
Return a parted partition type value representing the type of the
next partition we will create on this disk.
If there is only one free primary partition and we can create an
extended partition, we do that.
If there are free primary slots and an extended partition we will
recommend creating a primary partition. This can be overridden
with the keyword argument no_primary.
:param disk: the disk from which a partition may be allocated
:type disk: :class:`parted.Disk`
:keyword no_primary: refuse to return :const:`parted.PARTITION_NORMAL`
:returns: the chosen partition type
:rtype: a parted PARTITION_* constant
"""
part_type = None
extended = disk.getExtendedPartition()
supports_extended = disk.supportsFeature(parted.DISK_TYPE_EXTENDED)
logical_count = len(disk.getLogicalPartitions())
max_logicals = disk.getMaxLogicalPartitions()
primary_count = disk.primaryPartitionCount
if primary_count < disk.maxPrimaryPartitionCount:
if primary_count == disk.maxPrimaryPartitionCount - 1:
# can we make an extended partition? now's our chance.
if not extended and supports_extended:
part_type = parted.PARTITION_EXTENDED
elif not extended:
# extended partitions not supported. primary or nothing.
if not no_primary:
part_type = parted.PARTITION_NORMAL
else:
# there is an extended and a free primary
if not no_primary:
| part_type = parted.PARTITION_NORMAL
elif logical_count < max_logicals:
# we have an extended with logical slots, so use one.
part_type = parted.PARTITION_LOGICAL
else:
# there are two or more primary slots left. use one unless we're
# not supposed to make primaries.
if not no_primary:
part_type = parted.PARTITION_NORMAL
el | if extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
elif extended and logical_count < max_logicals:
part_type = parted.PARTITION_LOGICAL
return part_type
def getBestFreeSpaceRegion(disk, part_type, req_size, start=None,
boot=None, best_free=None, grow=None,
alignment=None):
""" Return the "best" free region on the specified disk.
For non-boot partitions, we return the largest free region on the
disk. For boot partitions, we return the first region that is
large enough to hold the partition.
Partition type (parted's PARTITION_NORMAL, PARTITION_LOGICAL) is
taken into account when locating a suitable free region.
For locating the best region from among several disks, the keyword
argument best_free allows the specification of a current "best"
free region with which to compare the best from this disk. The
overall best region is returned.
:param disk: the disk
:type disk: :class:`parted.Disk`
:param part_type: the type of partition we want to allocate
:type part_type: one of parted's PARTITION_* constants
:param req_size: the requested size of the partition in MiB
:type req_size: :class:`~.size.Size`
:keyword start: requested start sector for the partition
:type start: int
:keyword boot: whether this will be a bootable partition
:type boot: bool
:keyword best_free: current best free region for this partition
:type best_free: :class:`parted.Geometry`
:keyword grow: indicates whether this is a growable request
:type grow: bool
:keyword alignment: disk alignment requirements
:type alignment: :class:`parted.Alignment`
"""
log.debug("getBestFreeSpaceRegion: disk=%s part_type=%d req_size=%s "
"boot=%s best=%s grow=%s start=%s",
disk.device.path, part_type, req_size, boot, best_free, grow,
|
osantana/ami-push | ami_push/bridge.py | Python | mit | 1,262 | 0 | # coding: utf-8
import asyncio
import logging
from panoramisk import Manager
from .controller import Controller, DEFAULT_MAX_QUEUES, DEFAULT_MAX_QUEUE_SIZE
from .messages import MessageWrapper
class Bridge:
def __init__(self, options, filters, push_configs):
self.loop = asyncio.get_event_loop()
max_queues = options.pop("max_size", DEFAULT_MAX_QUEUES)
max_queue_size = options.pop("max_queue_size", DEFAULT_MAX_QUEUE_SIZE)
self.controller = Cont | roller(self.loop, max_queues, max_queue_size)
self.controller.load_configs(filters, push_configs)
options.pop("loop", None) # discard invalid argument
self.manager = Manager(loop=self.loop, **options)
self.manager.log.addHandler(logging.NullHandler())
self.manager.register_event("*", self.handle_events)
@asyncio.coroutine
def handle_even | ts(self, manager, message):
wrapper = MessageWrapper(message)
yield from self.controller.handle(wrapper)
@asyncio.coroutine
def connect(self):
yield from self.manager.connect()
def run(self):
try:
self.loop.run_until_complete(self.connect())
self.loop.run_forever()
finally:
self.loop.close()
|
Juanvvc/scfs | webserver/cherrypy/test/modwsgi.py | Python | gpl-2.0 | 4,799 | 0.002917 | """Wrapper for mod_wsgi, for use as a CherryPy HTTP server.
To autostart modwsgi, the "apache" executable or script must be
on your system path, or you must override the global APACHE_PATH.
On some platforms, "apache" may be called "apachectl" or "apache2ctl"--
create a symlink to them if needed.
KNOWN BUGS
==========
##1. Apache processes Range headers automatically; CherryPy's truncated |
## output is then truncated again by Apache. See test_core.testRanges.
## This was worked around in http://www.cherrypy.org/changeset/1319.
2. Apache does not allow custom HTTP methods like CONNECT as per the spec.
See test_core.testHTTPMethods.
3. Max request header and body setti | ngs do not work with Apache.
##4. Apache replaces status "reason phrases" automatically. For example,
## CherryPy may set "304 Not modified" but Apache will write out
## "304 Not Modified" (capital "M").
##5. Apache does not allow custom error codes as per the spec.
##6. Apache (or perhaps modpython, or modpython_gateway) unquotes %xx in the
## Request-URI too early.
7. mod_wsgi will not read request bodies which use the "chunked"
transfer-coding (it passes REQUEST_CHUNKED_ERROR to ap_setup_client_block
instead of REQUEST_CHUNKED_DECHUNK, see Apache2's http_protocol.c and
mod_python's requestobject.c).
8. When responding with 204 No Content, mod_wsgi adds a Content-Length
header for you.
9. When an error is raised, mod_wsgi has no facility for printing a
traceback as the response content (it's sent to the Apache log instead).
10. Startup and shutdown of Apache when running mod_wsgi seems slow.
"""
import os
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
import re
import time
from cherrypy.test import test
def read_process(cmd, args=""):
pipein, pipeout = os.popen4("%s %s" % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r"(not recognized|No such file|not found)", firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
APACHE_PATH = "apache"
CONF_PATH = "test_mw.conf"
conf_modwsgi = """
# Apache2 server conf file for testing CherryPy with modpython_gateway.
DocumentRoot "/"
Listen %%s
LoadModule wsgi_module modules/mod_wsgi.so
LoadModule env_module modules/mod_env.so
WSGIScriptAlias / %s
SetEnv testmod %%s
""" % os.path.join(curdir, 'modwsgi.py')
def start(testmod, port, conf_template):
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
f = open(mpconf, 'wb')
try:
f.write(conf_template % (port, testmod))
finally:
f.close()
result = read_process(APACHE_PATH, "-k start -f %s" % mpconf)
if result:
print result
def stop():
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, "-k stop")
class ModWSGITestHarness(test.TestHarness):
"""TestHarness for ModWSGI and CherryPy."""
use_wsgi = True
def _run(self, conf):
from cherrypy.test import webtest
webtest.WebCase.PORT = self.port
webtest.WebCase.harness = self
webtest.WebCase.scheme = "http"
webtest.WebCase.interactive = self.interactive
print
print "Running tests:", self.server
conf_template = conf_modwsgi
# mod_wsgi, since it runs in the Apache process, must be
# started separately for each test, and then *that* process
# must run the setup_server() function for the test.
# Then our process can run the actual test.
success = True
for testmod in self.tests:
try:
start(testmod, self.port, conf_template)
suite = webtest.ReloadingTestLoader().loadTestsFromName(testmod)
result = webtest.TerseTestRunner(verbosity=2).run(suite)
success &= result.wasSuccessful()
finally:
stop()
if success:
return 0
else:
return 1
loaded = False
def application(environ, start_response):
import cherrypy
global loaded
if not loaded:
loaded = True
modname = "cherrypy.test." + environ['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.config.update({
"log.error_file": os.path.join(curdir, "test.log"),
"environment": "test_suite",
"engine.SIGHUP": None,
"engine.SIGTERM": None,
})
cherrypy.engine.start(blocking=False)
return cherrypy.tree(environ, start_response)
|
AlienCowEatCake/ImageViewer | src/ThirdParty/Exiv2/exiv2-0.27.5-Source/tests/bugfixes/redmine/test_issue_1202.py | Python | gpl-3.0 | 1,550 | 0.001935 | # -*- coding: utf-8 -*-
import system_tests
@system_tests.CopyFiles("$data_path/exiv2-bug1202.jpg")
class CheckFocusContinuous(metaclass=system_tests.CaseMeta):
url = "http://dev.exiv2.org/issues/1202"
filename = "$data_path/exiv2-bug1202_copy.jpg"
commands = [
"""$exiv2 -M"set Exif.CanonCs.FocusContinuous SShort 0" $filename""",
"""$exiv2 -K Exif.CanonCs.FocusContinuous $filename""",
"""$exiv2 -M"set Exif.CanonCs.FocusContinuous SShort 1" $filename""",
"""$exiv2 -K Exif.CanonCs.FocusContinuous $filename""",
"""$exiv2 -M"set Exif.CanonCs.FocusContinuous SShort 8" $filename""",
"""$exiv2 -K Exif. | CanonCs.FocusContinuous $filename""",
"""$exiv2 -M"set Exif.CanonCs.FocusContinuous SShort 9" $filename""",
"""$exiv2 -K Exif.CanonCs.FocusContinuous $filename""",
"""$exiv2 -M"set | Exif.CanonCs.FocusContinuous SShort -1" $filename""",
"""$exiv2 -K Exif.CanonCs.FocusContinuous $filename""",
]
stdout = [
"",
"Exif.CanonCs.FocusContinuous Short 1 Single\n",
"",
"Exif.CanonCs.FocusContinuous Short 1 Continuous\n",
"",
"Exif.CanonCs.FocusContinuous Short 1 Manual\n",
"",
"Exif.CanonCs.FocusContinuous Short 1 (9)\n",
"",
"Exif.CanonCs.FocusContinuous Short 1 (65535)\n",
]
stderr = [""] * len(stdout)
retval = [0] * len(stdout)
|
asavonic/moldynam | data/particles.py | Python | gpl-2.0 | 1,052 | 0.03327 | import random
import sys
class Particle:
def __init__( self ):
pass
def __str__( self ):
return str( "%f %f %f %f %f %f 0 0 0" % ( self.pos[0],
self.pos[1],
self.pos[2],
self.vel[0],
self.vel[1],
self.vel[2] ) )
def create_random_particles( num, area_size ):
particles = [ Particle() for x in range( 0, num ) ]
for particle | in particles:
particle.pos = tuple( random.uniform( 0, comp ) for comp in area_size )
particle.vel = tuple( random.uniform( 0, comp / 100.0 ) for comp in area_size )
return particles
if len(sys.argv) < 2:
sys.stderr.write("please specify particles number\n")
| sys.exit(1)
particles_num = int(sys.argv[1])
particles = create_random_particles( particles_num, ( 10, 10, 10 ) )
for particle in particles:
print( particle )
|
voltaire/minecraft-site | app/run.py | Python | bsd-3-clause | 111 | 0 | #!/usr/b | in/env python
from app import app
if __name__ == "__main__":
app.run(debug=T | rue, host='0.0.0.0')
|
savanu/servo | components/style/gecko/regen_atoms.py | Python | mpl-2.0 | 8,113 | 0.002465 | #!/usr/bin/env python
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import re
import os
import sys
from io import BytesIO
GECKO_DIR = os.path.dirname(__file__.replace('\\', '/'))
sys.path.insert(0, os.path.join(os.path.dirname(GECKO_DIR), "properties"))
import build
PRELUDE = """
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
/* Autogenerated file created by components/style/gecko/binding_tools/regen_atoms.py, DO NOT EDIT DIRECTLY */
"""[1:]
def gnu_symbolify(source, ident):
return "_ZN{}{}{}{}E".format(len(source.CLASS), source.CLASS, len(ident), ident)
def msvc64_symbolify(source, ident):
return "?{}@{}@@2PEAV{}@@EA".format(ident, source.CLASS, source.TYPE)
def msvc32_symbolify(source, ident):
# Prepend "\x01" to avoid LLVM prefixing the mangled name with "_".
# See https://github.com/rust-lang/rust/issues/36097
return "\\x01?{}@{}@@2PAV{}@@A".format(ident, source.CLASS, source.TYPE)
class GkAtomSource:
PATTERN = re.compile('^(GK_ATOM)\(([^,]*),[^"]*"([^"]*)"\)',
re.MULTILINE)
FILE = "include/nsGkAtomList.h"
CLASS = "nsGkAtoms"
TYPE = "nsStaticAtom"
class CSSPseudoElementsAtomSource:
PATTERN = re.compile('^(CSS_PSEUDO_ELEMENT)\(([^,]*),[^"]*"([^"]*)",',
re.MULTILINE)
FILE = "include/nsCSSPseudoElementList.h"
CLASS = "nsCSSPseudoElements"
# NB: nsICSSPseudoElement is effectively the same as a nsStaticAtom, but we need
# this for MSVC name mangling.
TYPE = "nsICSSPseudoElement"
class CSSAnonBoxesAtomSource:
PATTERN = re.compile('^(CSS_ANON_BOX|CSS_NON_INHERITING_ANON_BOX|CSS_WRAPPER_ANON_BOX)\(([^,]*),[^"]*"([^"]*)"\)',
re.MULTILINE)
FILE = "include/nsCSSAnonBoxList.h"
CLASS = "nsCSSAnonBoxes"
TYPE = "nsICSSAnonBoxPseudo"
SOURCES = [
GkAtomSource,
CSSPseudoElementsAtomSource,
CSSAnonBoxesAtomSource,
]
def map_atom(ident):
if ident in {"box", "loop", "match", "mod", "ref",
"self", "type", "use", "where", "in"}:
return ident + "_"
return ident
class Atom:
def __init__(self, source, macro_name, ident, value):
self.ident = "{}_{}".format(source.CLASS, ident)
self.original_ident = ident
self.value = value
self.source = source
self.macro = macro_name
if self.is_anon_box():
assert self.is_inheriting_anon_box() or self.is_non_inheriting_anon_box()
def cpp_class(self):
return self.source.CLASS
def gnu_symbol(self):
return gnu_symbolify(self.source, self.original_ident)
def msvc32_symbol(self):
return msvc32_symbolify(self.source, self.original_ident)
def msvc64_symbol(self):
return msvc64_symbolify(self.source, self.original_ident)
def type(self):
return self.source.TYPE
def capitalized(self):
return self.original_ident[0].upper() + self.original_ident[1:]
def is_anon_box(self):
return self.type() == "nsICSSAnonBoxPseudo"
def is_non_inheriting_anon_box(self):
return self.macro == "CSS_NON_INHERITING_ANON_BOX"
def is_inheriting_anon_box(self):
return (self.macro == "CSS_ANON_BOX" or
self.macro == "CSS_WRAPPER_ANON_BOX")
def is_tree_pseudo_element(self):
return self.value.startswith(":-moz-tree-")
def collect_atoms(objdir):
atoms = []
for source in SOURCES:
path = os.path.abspath(os.path.join(objdir, source.FILE))
print("cargo:rerun-if-changed={}".format(path))
with open(path) as f:
content = f.read()
for result in source.PATTERN.finditer(content):
atoms.append(Atom(source, result.group(1), result.group(2), result.group(3)))
return atoms
class FileAvoidWrite(BytesIO):
"""File-like object that buffers output and only writes if content changed."""
def __init__(self, filename):
BytesIO.__init__(self)
self.name = filename
def write(self, buf):
if isinstance(buf, unicode):
buf = buf.encode('utf-8')
BytesIO.write(self, buf)
def close(self):
buf = self.getvalue()
BytesIO.close(self)
try:
with open(self.name, 'rb') as f:
old_content = f.read()
if old_content == buf:
print("{} is not changed, skip".format(self.name))
return
except IOError:
pass
with open(self.name, 'wb') as f:
f.write(buf)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if not self.closed:
self.close()
IMPORTS = ("\nuse gecko_bindings::structs::nsStaticAtom;"
"\nuse string_cache::Atom;\n\n")
ATOM_TEMPLATE = (" #[link_name = \"{link_name}\"]\n"
" pub static {name}: *mut {type};")
UNSAFE_STATIC = ("#[inline(always)]\n"
"pub unsafe fn atom_from_static(ptr: *mut nsStaticAtom) -> Atom {\n"
" Atom::from_static(ptr)\n"
"}\n\n")
CFG_IF = '''
cfg_if! {{
if #[cfg(not(target_env = "msvc"))] {{
extern {{
{gnu}
}}
}} else if #[cfg(target_pointer_width = "64")] {{
extern {{
{msvc64}
}}
}} else {{
extern {{
{msvc32}
}}
}}
}}
'''
RULE_TEMPLATE = ('("{atom}") =>\n '
'{{ '
# FIXME(bholley): Uncomment this when rust 1.14 is released.
# See the comment in components/style/lib.rs.
# ' #[allow(unsafe_code)] #[allow(unused_unsafe)] '
'unsafe {{ $crate::string_cache::atom_macro::atom_from_static'
'($crate::string_cache::atom_macro::{name} as | *mut _) }}'
| ' }};')
MACRO = '''
#[macro_export]
macro_rules! atom {{
{}
}}
'''
def write_atom_macro(atoms, file_name):
def get_symbols(func):
return '\n'.join([ATOM_TEMPLATE.format(name=atom.ident,
link_name=func(atom),
type=atom.type()) for atom in atoms])
with FileAvoidWrite(file_name) as f:
f.write(PRELUDE)
f.write(IMPORTS)
for source in SOURCES:
if source.TYPE != "nsStaticAtom":
f.write("pub enum {} {{}}\n\n".format(source.TYPE))
f.write(UNSAFE_STATIC)
gnu_symbols = get_symbols(Atom.gnu_symbol)
msvc32_symbols = get_symbols(Atom.msvc32_symbol)
msvc64_symbols = get_symbols(Atom.msvc64_symbol)
f.write(CFG_IF.format(gnu=gnu_symbols, msvc32=msvc32_symbols, msvc64=msvc64_symbols))
macro_rules = [RULE_TEMPLATE.format(atom=atom.value, name=atom.ident) for atom in atoms]
f.write(MACRO.format('\n'.join(macro_rules)))
def write_pseudo_elements(atoms, target_filename):
pseudos = []
for atom in atoms:
if atom.type() == "nsICSSPseudoElement" or atom.type() == "nsICSSAnonBoxPseudo":
pseudos.append(atom)
pseudo_definition_template = os.path.join(GECKO_DIR, "pseudo_element_definition.mako.rs")
print("cargo:rerun-if-changed={}".format(pseudo_definition_template))
contents = build.render(pseudo_definition_template, PSEUDOS=pseudos)
with FileAvoidWrite(target_filename) as f:
f.write(contents)
def generate_atoms(dist, out):
atoms = collect_atoms(dist)
write_atom_macro(atoms, os.path.join(out, "atom_macro.rs"))
write_pseudo_elements(atoms, os.path.join(out, "pseudo_element_definition.rs"))
if __name__ == "__main__":
if len(sys.argv) != 3:
print("Usage: {} dist out".format(sys.argv[0]))
exit(2)
generate_atoms(sys.argv[1], sys.argv[2])
|
JeremyRubin/bitcoin | test/functional/feature_nulldummy.py | Python | mit | 7,178 | 0.004319 | #!/usr/bin/env python3
# Copyright (c) 2016-2020 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test NULLDUMMY softfork.
Connect to a single node.
Generate 2 blocks (save the coinbases for later).
Generate COINBASE_MATURITY (CB) more blocks to ensure the coinbases are mature.
[Policy/Consensus] Check that NULLDUMMY compliant transactions are accepted in block CB + 3.
[Policy] Check that non-NULLDUMMY transactions are rejected before activation.
[Consensus] Check that the new NULLDUMMY rules are not enforced on block CB + 4.
[Policy/Consensus] Check that the new NULLDUMMY rules are enforced on block CB + 5.
"""
import time
from test_framework.blocktools import (
COINBASE_MATURITY,
NORMAL_GBT_REQUEST_PARAMS,
add_witness_commitment,
create_block,
create_transaction,
)
from test_framework.messages import CTransaction
from test_framework.script import CScript
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal, assert_raises_rpc_error
NULLDUMMY_ERROR = "non-mandatory-script-verify-flag (Dummy CHECKMULTISIG argument must be zero)"
def trueDummy(tx):
scriptSig = CScript(tx.vin[0].scriptSig)
newscript = []
for i in scriptSig:
if len(newscript) == 0:
assert len(i) == 0
newscript.append(b'\x51')
else:
newscript.append(i)
tx.vin[0].scriptSig = CScript(newscript)
tx.rehash()
class NULLDUMMYTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
# This script tests NULLDUMMY activation, which is part of the 'segwit' deployment, so we go through
# normal segwit activation here (and don't use the default always-on behaviour).
self.extra_args = [[
f'-segwitheight={COINBASE_MATURITY + 5}',
'-addresstype=legacy',
]]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def run_test(self):
self.nodes[0].createwallet(wallet_name='wmulti', disable_private_keys=True)
wmulti = self.nodes[0].get_wallet_rpc('wmulti')
w0 = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
self.address = w0.getnewaddress()
self.pubkey = w0.getaddressinfo(self.address)['pubkey']
self.ms_address = wmulti.addmultisigaddress(1, [self.pubkey])['address']
self.wit_address = w0.getnewaddress(address_type='p2sh-segwit')
self.wit_ms_address = wmulti.addmultisigaddress(1, [self.pubkey], '', 'p2sh-segwit')['address']
if not self.options.descriptors:
# Legacy wallets need to import these so that they are watched by the wallet. This is unnecessary (and does not need to be tested) for descriptor wallets
wmulti.importaddress(self.ms_address)
wmulti.importaddress(self.wit_ms_address)
self.coinbase_blocks = self.nodes[0].generate(2) # block height = 2
coinbase_txid = []
for i in self.coinbase_blocks:
coinbase_txid.append(self.nodes[0].getblock(i)['tx'][0])
self.nodes[0].generate(COINBASE_MATURITY) # block height = COINBASE_MATURITY + 2
self.lastblockhash = self.nodes[0].getbestblockhash()
self.lastblockheight = COINBASE_MATURITY + 2
self.lastblocktime = int(time.time()) + self.lastblockheight
self.log.info(f"Test 1: NULLDUMMY compliant base transactions should be accepted to mempool and mined before activation [{COINBASE_MATURITY + 3}]")
test1txs = [create_transaction(self.nodes[0], coinbase_txid[0], self.ms_address, amount=49)]
txid1 = self.nodes[0].sendrawtransaction(test1txs[0].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], txid1, self.ms_address, amount=48))
txid2 = self.nodes[0].sendrawtransaction(test1txs[1].serialize_with_witness().hex(), 0)
test1txs.append(create_transaction(self.nodes[0], coinbase_txid[1], self.wit_ms_address, amount=49))
txid3 = self.nodes[0].sendrawtransaction(test1txs[2].serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test1txs, False, True)
self.log.info("Test 2: Non-NULLDUMMY base multisig transaction should not be accepted to mempool before activation")
test2tx = create_transaction(self.nodes[0], txid2, self.ms_address, amount=47)
trueDummy(test2tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test2tx.serialize_with_witness().hex(), 0)
self.log.info(f"Test 3: Non-NULLDUMMY base transactions should be accepted in a block before activation [{COINBASE_MATURITY + 4}]")
self.block_submit(self.nodes[0], [test2tx], False, True)
self.log.info("Test 4: Non-NULLDUMMY base multisig transaction is invalid after activation")
test4tx = create_transaction(self.nodes[0], test2tx.hash, self.address, amount=46)
test6txs = [CTransaction(test4tx)]
trueDummy(test4tx)
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test4tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test4tx])
self.log.info("Test 5: Non-NULLDUMMY P2WSH multisig transaction invalid after activation")
test5tx = create_transaction(self.nodes[0], txid3, self.wit_address, amount=48)
test6txs.append(CTransaction(test5tx))
test5tx.wit.vtxinwit[0].scriptWitness.stack[0] = b'\x01'
assert_raises_rpc_error(-26, NULLDUMMY_ERROR, self.nodes[0].sendrawtransaction, test5tx.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], [test5tx], True)
self.log.info(f"Test 6: NULLDUMMY compliant base/witness transactions should be accepted to mempool and in block after activation [{COINBASE_MATURITY + 5}]")
for i in test6txs:
self.nodes[0].sendrawtransaction(i.serialize_with_witness().hex(), 0)
self.block_submit(self.nodes[0], test6txs, True, True)
def block_submit(self, node, txs, witness=False, accept=False):
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
assert_equal(tmpl['previousblockhash'], self.lastblockhash)
assert_equal(tmpl['height'], self.lastblockheight + 1)
| block = create_block(tmpl=tmpl, ntime=self.lastblocktime + 1)
for tx in txs:
tx.rehash()
block.vtx.append(tx)
block.hashMerkleRoot = block.calc_merkle_root()
witness and add_witness_commitment(block)
block.rehash()
block.solve()
assert_equal(None if accept else 'block-validation-failed', node.submitblock(block.serialize().hex()))
if (accept):
assert_equal(node.getbestblockhash(), block.hash)
s | elf.lastblockhash = block.hash
self.lastblocktime += 1
self.lastblockheight += 1
else:
assert_equal(node.getbestblockhash(), self.lastblockhash)
if __name__ == '__main__':
NULLDUMMYTest().main()
|
111pontes/ydk-py | cisco-ios-xe/ydk/models/cisco_ios_xe/_meta/_tailf_netconf_monitoring.py | Python | apache-2.0 | 3,575 | 0.015944 |
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION, ANYXML_CLASS
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'NetconfTcpIdentity' : {
'meta_info' : _MetaInfoClass('NetconfTcpIdentity',
False,
[
],
'tailf-netconf-monitoring',
'netconf-tcp',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'RestHttpIdentity' : {
'meta_info | ' : _MetaInfoClass('RestHttpIdentity',
False,
[
],
| 'tailf-netconf-monitoring',
'rest-http',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'CliSshIdentity' : {
'meta_info' : _MetaInfoClass('CliSshIdentity',
False,
[
],
'tailf-netconf-monitoring',
'cli-ssh',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'WebuiHttpIdentity' : {
'meta_info' : _MetaInfoClass('WebuiHttpIdentity',
False,
[
],
'tailf-netconf-monitoring',
'webui-http',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'CliConsoleIdentity' : {
'meta_info' : _MetaInfoClass('CliConsoleIdentity',
False,
[
],
'tailf-netconf-monitoring',
'cli-console',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'CliTcpIdentity' : {
'meta_info' : _MetaInfoClass('CliTcpIdentity',
False,
[
],
'tailf-netconf-monitoring',
'cli-tcp',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'RestHttpsIdentity' : {
'meta_info' : _MetaInfoClass('RestHttpsIdentity',
False,
[
],
'tailf-netconf-monitoring',
'rest-https',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'SnmpUdpIdentity' : {
'meta_info' : _MetaInfoClass('SnmpUdpIdentity',
False,
[
],
'tailf-netconf-monitoring',
'snmp-udp',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
'WebuiHttpsIdentity' : {
'meta_info' : _MetaInfoClass('WebuiHttpsIdentity',
False,
[
],
'tailf-netconf-monitoring',
'webui-https',
_yang_ns._namespaces['tailf-netconf-monitoring'],
'ydk.models.cisco_ios_xe.tailf_netconf_monitoring'
),
},
}
|
box/flaky | test/test_multiprocess_string_io.py | Python | apache-2.0 | 1,556 | 0 | from io import StringIO
from unittest import TestCase
from | genty import genty, genty_dataset
@genty
class TestMultiprocessSt | ringIO(TestCase):
_unicode_string = 'Plain Hello'
_unicode_string_non_ascii = 'ńőń ȁŝćȉȉ ŝƭȕƒƒ'
def setUp(self):
super().setUp()
from flaky.multiprocess_string_io import MultiprocessingStringIO
self._string_io = StringIO()
self._mp_string_io = MultiprocessingStringIO()
del self._mp_string_io.proxy[:]
self._string_ios = (self._string_io, self._mp_string_io)
@genty_dataset(
no_writes=([], ''),
one_write=([_unicode_string], _unicode_string),
two_writes=(
[_unicode_string, _unicode_string_non_ascii],
'{}{}'.format(_unicode_string, _unicode_string_non_ascii),
)
)
def test_write_then_read(self, writes, expected_value):
for string_io in self._string_ios:
for item in writes:
string_io.write(item)
self.assertEqual(string_io.getvalue(), expected_value)
@genty_dataset(
no_writes=([], ''),
one_write=([_unicode_string], _unicode_string),
two_writes=(
[_unicode_string, _unicode_string_non_ascii],
'{}{}'.format(_unicode_string, _unicode_string_non_ascii),
)
)
def test_writelines_then_read(self, lines, expected_value):
for string_io in self._string_ios:
string_io.writelines(lines)
self.assertEqual(string_io.getvalue(), expected_value)
|
scpeters/catkin_tools | catkin_tools/jobs/utils.py | Python | apache-2.0 | 6,578 | 0.001976 | # Copyright 2014 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import shutil
from catkin_tools.common import mkdir_p
from catkin_tools.common import get_cached_recursive_build_depends_in_workspace
from catkin_tools.resultspace import get_resultspace_environment
from catkin_tools.execution.events import ExecutionEvent
def get_env_loaders(package, context):
"""Get a list of env loaders required to build this package."""
sources = []
# If installing to isolated folders or not installing, but devel spaces are not merged
if (context.install and context.isolate_install) or (not context.install and context.isolate_devel):
# Source each package's install or devel space
space = context.install_space_abs if context.install else context.devel_space_abs
# Get the recursive dependcies
depends = get_cached_recursive_build_depends_in_workspace(package, context.packages)
# For each dep add a line to source its setup file
for dep_pth, dep in depends:
source_path = os.path.join(space, dep.name, 'env.sh')
sources.append(source_path)
else:
# Get the actual destination of this package
if context.link_devel and not context.install:
source_path = os.path.join(context.package_final_path(package), 'env.sh')
else:
source_path = os.path.join(context.package_dest_path(package), 'env.sh')
sources = [source_path]
return sources
def get_env_loader(package, context):
"""This function returns a function object which extends a base environment
based on a set of environments to load."""
def load_env(base_env):
# Copy the base environment to extend
job_env = dict(base_env)
# Get the paths to the env loaders
env_loader_paths = get_env_loaders(package, context)
# If DESTDIR is set, set _CATKIN_SETUP_DIR as well
if context.destdir is not None:
job_env['_CATKIN_SETUP_DIR'] = context.package_dest_path(package)
for env_loader_path in env_loader_paths:
# print(' - Loading resultspace env from: {}'.format(env_loader_path))
resultspace_env = get_resultspace_environment(
os.path.split(env_loader_path)[0],
base_env=job_env,
quiet=True,
cached=context.use_env_cache,
strict=False)
job_env.update(resultspace_env)
return job_env
return load_env
def makedirs(logger, event_queue, path):
"""FunctionStage functor that makes a path of directories."""
mkdir_p(path)
return 0
def copyfiles(logger, event_queue, source_paths, dest_path):
"""FunctionStage functor that copies one or more files"""
for source_path in source_paths:
shutil.copy(source_path, dest_path)
return 0
def rmfile(logger, event_queue, path):
"""FunctionStage functor that removes a file."""
if os.path.exists(path):
os.remove(path)
return 0
def rmdirs(logger, event_queue, paths):
"""FunctionStage functor that removes a directory tree."""
return rmfiles(logger, event_queue, paths, remove_empty=False)
def rmfiles(logger, event_queue, paths, dry_run, remove_empty=False, empty_root='/'):
"""FunctionStage functor that removes a list of files and directories.
If remove_empty is True, then this will also remove directories which
become emprt after deleting the files in `paths`. It will delete files up
to the path specified by `empty_root`.
"""
# Determine empty directories
if remove_empty:
# First get a list of directories to check
dirs_to_check = set()
for path in paths:
# Make sure the file is given by an absolute path and it exists
if not os.path.isabs(path) or not os.path.exists(path):
continue
# Only look in the devel space
while empty_root.find(path) != 0:
# Pop up a directory
path, dirname = os.path.split(path)
# Skip if this path isn't a directory
if not os.path.isdir(path):
| continue
dirs_to_check.add(path)
# For each | directory which may be empty after cleaning, visit them
# depth-first and count their descendants
dir_descendants = dict()
for path in sorted(dirs_to_check, key=lambda k: -len(k.split(os.path.sep))):
# Get the absolute path to all the files currently in this directory
files = [os.path.join(path, f) for f in os.listdir(path)]
# Filter out the files which we intend to remove
files = [f for f in files if f not in paths]
# Compute the minimum number of files potentially contained in this path
dir_descendants[path] = sum([
(dir_descendants.get(f, 1) if os.path.isdir(f) else 1)
for f in files
])
# Schedule the directory for removal if removal of the given files will make it empty
if dir_descendants[path] == 0:
paths.append(path)
# REmove the paths
for index, path in enumerate(paths):
# Remove the path
if os.path.exists(path):
if os.path.isdir(path):
logger.out('Removing directory: {}'.format(path))
if not dry_run:
shutil.rmtree(path)
else:
logger.out(' Removing file: {}'.format(path))
if not dry_run:
os.remove(path)
else:
logger.err('Warning: File {} could not be deleted because it does not exist.'.format(path))
# Report progress
event_queue.put(ExecutionEvent(
'STAGE_PROGRESS',
job_id=logger.job_id,
stage_label=logger.stage_label,
percent=str(index / float(len(paths)))))
return 0
|
koduj-z-klasa/python101 | docs/pyqt/todopw/baza_z5.py | Python | mit | 2,242 | 0 | # -*- coding: utf-8 -*-
from peewee import *
from datetime import datetime
baza = SqliteDatabase('adresy.db')
class BazaModel(Model): # klasa bazowa
class Meta:
database = baza
class Osoba(BazaModel):
login = CharField(null=False, unique=True)
haslo = CharField()
class Meta:
order_by = ('login',)
class Zadanie(BazaModel):
tresc = TextField(null=False)
datad = DateTimeField(default=datetime.now)
wykonane = BooleanField(default=False)
osoba = ForeignKeyField(Osoba, related_name='zadania')
class Meta:
order_by = ('datad',)
def polacz():
baza.connect() # nawiązujemy połączenie z bazą
baza.create_tables([Osoba, Zadanie], True) # tworzymy tabele
ladujDane() # wstawiamy początkowe dane
return True
def loguj(login, haslo):
try:
osoba, created = Osoba.get_or_create(login=login, haslo=haslo)
return osoba
except IntegrityError:
return None
def ladujDane():
""" Przygotowanie początkowych danych testowych """
if Osoba.select().count() > 0:
return
osoby = ('adam', 'ewa')
zadania = ('Pierwsze zadanie', 'Drugie zadanie', 'Trzecie zadanie')
for login in osoby:
o = Osoba(login=login, haslo='123')
o.save()
for tresc in zadania:
z = Zadanie(tresc=tresc, osoba=o)
z.save()
baza.commit()
baza.close()
def czytajDane(osoba):
""" Pobranie zadań danego użytkownika z bazy """
zadania = [] # lista zadań
wpisy = Zadanie.select().where(Zadanie.osoba == osoba)
for z in wpisy:
zadania.append([
z.id, # identyfikator zadania
z.tresc, # | treść zadania
'{0:%Y-%m-%d %H:%M:%S}'.format(z.datad), # data dodania
z.wykonane, # bool: cz | y wykonane?
False]) # bool: czy usunąć?
return zadania
def dodajZadanie(osoba, tresc):
""" Dodawanie nowego zadania """
zadanie = Zadanie(tresc=tresc, osoba=osoba)
zadanie.save()
return [
zadanie.id,
zadanie.tresc,
'{0:%Y-%m-%d %H:%M:%S}'.format(zadanie.datad),
zadanie.wykonane,
False]
pola = ['Id', 'Zadanie', 'Dodano', 'Zrobione', 'Usuń']
|
stanford-rc/shine | lib/Shine/Lustre/Router.py | Python | gpl-2.0 | 3,044 | 0.003614 | # Router.py -- Shine Lustre Router
# Copyright (C) 2010-2013 CEA
#
# This file is part of shine
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
"""
Classes for Shine framework to manage Lustre LNET routers.
"""
import os
from Shine.Lustre.Component import Component, ComponentError, \
MOUNTED, OFFLINE, TARGET_ERROR, RUNTIME_ERROR
from Shine.Lustre.Actions.StartRouter import StartRouter
from Shine.Lustre.Actions.StopRouter import StopRouter
class Router(Component):
"""
Manages a LNET ro | uter in Shine framework.
"""
TYPE = 'router'
DISPLAY_ORDER = 1
START_ORDER = 1
#
# Text form for different router states.
#
# Could be nearly merged with Target state_text_map if | MOUNTED value
# becomes the same.
STATE_TEXT_MAP = {
None: "unknown",
OFFLINE: "offline",
TARGET_ERROR: "ERROR",
MOUNTED: "online",
RUNTIME_ERROR: "CHECK FAILURE"
}
def longtext(self):
"""
Return the routeur server name.
"""
return "router on %s" % self.server
def lustre_check(self):
"""
Check Router health at Lustre level.
Check LNET routing capabilities and change object state
based on the results.
"""
# LNET is not loaded
if not os.path.isfile("/proc/sys/lnet/routes"):
self.state = OFFLINE
return
# Read routing information
try:
routes = open("/proc/sys/lnet/routes")
# read only first line
state = routes.readline().strip().lower()
except:
self.state = RUNTIME_ERROR
raise ComponentError(self, "Could not read routing information")
# routing info tells this is ok?
if state == "routing enabled":
self.state = MOUNTED
elif state == "routing disabled":
self.state = TARGET_ERROR
raise ComponentError(self, "Misconfigured router")
else:
self.state = RUNTIME_ERROR
raise ComponentError(self, "Bad routing status")
#
# Client actions
#
def start(self, **kwargs):
"""Start a Lustre router."""
return StartRouter(self, **kwargs)
def stop(self, **kwargs):
"""Stop a Lustre router."""
return StopRouter(self, **kwargs)
|
cliffano/swaggy-jenkins | clients/python-experimental/generated/openapi_client/model/pipeline_step_impl.py | Python | mit | 3,115 | 0.001284 | # coding: utf-8
"""
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification # noqa: E501
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
import typing # noqa: F401
from frozendict import frozendict # noqa: F401
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from openapi_client.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
class PipelineStepImpl(
DictSchema
):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
_class = StrSchema
@classmethod
@property
def _links(cls) -> typing.Type['PipelineStepImpllinks']:
return PipelineStepImpllinks
displayName = StrSchema
durationInMillis = IntSchema
id = StrSchema
@classmethod
@property
def input(cls) -> typing.Type['InputStepImpl']:
return InputStepImpl
result = StrSchema
startTime = StrSchema
state = StrSchema
def __new__(
cls,
*args: typing.Union[dict, frozendict, ],
_class: typing.Union[_class, Unset] = unset,
_links: typing.Union['PipelineStepImpllinks', Unset] = unset,
displayName: typing.Union[displayName, Unset] = unset,
durationInMillis: typing.Union[durationInMillis, Unset] = unset,
id: typing.Union[id, Unset] = unset,
input: typing.Union['InputStepImpl', Unset] = unset,
result: typing.Union[result, Unse | t] = unset,
startTime: typ | ing.Union[startTime, Unset] = unset,
state: typing.Union[state, Unset] = unset,
_instantiation_metadata: typing.Optional[InstantiationMetadata] = None,
**kwargs: typing.Type[Schema],
) -> 'PipelineStepImpl':
return super().__new__(
cls,
*args,
_class=_class,
_links=_links,
displayName=displayName,
durationInMillis=durationInMillis,
id=id,
input=input,
result=result,
startTime=startTime,
state=state,
_instantiation_metadata=_instantiation_metadata,
**kwargs,
)
from openapi_client.model.input_step_impl import InputStepImpl
from openapi_client.model.pipeline_step_impllinks import PipelineStepImpllinks
|
xandors/dino | dino/__main__.py | Python | apache-2.0 | 941 | 0.001063 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
dino.__main__
~~~~~~~~~~~~~~~~~~~~~
The main entry point for the command line interface.
Invoke as ``dino`` (if installed)
or ``python -m dino`` (no install required).
"""
from __future__ import absolute_import, unicode_literals
import logging
import sys
from dino.log import configure_stream
logger = logging.getLogger(__name__)
def cli():
"""Add s | ome useful functionality here or import from a submodule."""
# configure root logger to print to STDERR
configure_stream(level='DEBUG')
# launch the command line interface
logger.debug('Booting up command line interface')
# ...oops, it wasn't implemented yet!
logger.error('Please implement the co | mmand line interface!')
raise NotImplementedError('Dino Dependency Injection CLI not implemented yet')
if __name__ == '__main__':
# exit using whatever exit code the CLI returned
sys.exit(cli())
|
pisun2/python | static.py | Python | apache-2.0 | 711 | 0.007032 | python manage.py collectstatic
python manage.py runserver --nostatic
urlpatterns += patterns('',
(r'^static/suit/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_SUIT_TEMPLAT | E}),
)
urlpatterns += patterns('',
(r'^static/admin/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.DJANGO_ADMIN_TEMPLATE}),
)
SITE_PATH = os.path.dirname(__file__)
REPO_ROOT = os.path.normpath(os.path.join(S | ITE_PATH, '..'))
MEDIA_ROOT = os.path.join(REPO_ROOT, 'public/media')
DJANGO_SUIT_TEMPLATE = os.path.join(REPO_ROOT, 'static/suit')
DJANGO_EDITOR = os.path.join(REPO_ROOT, 'static/django_summernote')
DJANGO_ADMIN_TEMPLATE = os.path.join(REPO_ROOT, 'static/admin')
|
zatricion/Streams | ExamplesElementaryOperations/ExamplesOpNoState.py | Python | mit | 4,241 | 0.006602 | """This module contains examples of the op() function
where:
op(f,x) returns a stream where x is a stream, and f
is an operator on lists, i.e., f is a function from
a list to a list. These lists are of lists of arbitrary
objects other than streams and agents.
Function f must be stateless, i.e., for any lists u, v:
f(u.extend(v)) = f(u).extend(f(v))
(Stateful functions are given in OpStateful.py with
examples in ExamplesOpWithState.py.)
Let f be a stateless operator on lists and let x be a stream.
If at some point, the value of stream x is a list u then at
that point, the value of stream op(f,x) is the list f(u).
If at a later point, the value of stream x is the list:
u.extend(v) then, at that point the value of stream op(f,x)
is f(u).extend(f(v)).
As a specific example, consider the following f():
def f(lst): return [w * w for w in lst]
If at some point in time, the value of x is [3, 7],
then at that point the value of op(f,x) is f([3, 7])
or [9, 49]. If at a later point, the value of x is
[3, 7, 0, 11, 5] then the value of op(f,x) at that point
is f([3, 7, 0, 11, 5]) or [9, 49, 0, 121, 25].
"""
if __name__ == '__main__':
if __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ) )
from Agent import *
from ListOperators import *
from PrintFunctions import print_streams_recent
def example_1():
print "example_1"
print "op(f, x): f is a function from a list to a list"
print "x is a stream \n"
# FUNCTIONS FROM LIST TO LIST
# This example uses the following list operators:
# functions from a list to a list.
# f, g, h, r
# Example A: function using list comprehension
def f(lst): return [w*w for w in lst]
# Example B: function using filter
threshold = 6
def predicate(w):
return w > threshold
def g(lst):
return filter(predicate, lst)
# Example C: function using map
# Raise each element of the list to the n-th power.
n = 3
def power(w):
return w**n
def h(lst):
return map(power, lst)
# Example D: function using another list comprehension
# Discard any element of x that is not a
# multiple of a parameter n, and divide the
# elements that are multiples of n by n.
n = 3
def r(lst):
result = []
for w in lst:
if w%n == 0: result.append(w/n)
return result
# EXAMPLES OF OPERATIONS | ON STREAMS
# The input stream for these examples
x = Stream('x')
print 'x is the input stream.'
print 'a is a stream consisting of the squares of the input'
print 'b is the stream consisting of values that exceed 6'
print 'c is the stream consisting of the third powers of the input'
print 'd is th | e stream consisting of values that are multiples of 3 divided by 3'
print 'newa is the same as a. It is defined in a more succinct fashion.'
print 'newb has squares that exceed 6.'
print ''
# The output streams a, b, c, d obtained by
# applying the list operators f, g, h, r to
# stream x.
a = op(f, x)
b = op(g, x)
c = op(h, x)
d = op(r, x)
# You can also define a function only on streams.
# You can do this using functools in Python or
# by simple encapsulation as shown below.
def F(x): return op(f,x)
def G(x): return op(g,x)
newa = F(x)
newb = G(F(x))
# The advantage is that F is a function only
# of streams. So, function composition looks cleaner
# as in G(F(x))
# Name the output streams to label the output
# so that reading the output is easier.
a.set_name('a')
newa.set_name('newa')
b.set_name('b')
newb.set_name('newb')
c.set_name('c')
d.set_name('d')
# At this point x is the empty stream:
# its value is []
x.extend([3, 7])
# Now the value of x is [3, 7]
print "FIRST STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
print ""
x.extend([0, 11, 15])
# Now the value of x is [3, 7, 0, 11, 15]
print "SECOND STEP"
print_streams_recent([x, a, b, c, d, newa, newb])
def main():
example_1()
if __name__ == '__main__':
main()
|
darrenwatt/steam_tracker | create_hour_per_day_chart.py | Python | gpl-2.0 | 1,342 | 0.003726 | import os
import plotly.plotly as py
from plotly.graph_objs import *
import config
py.sign_in(config.plotly_user, config.plotly_pass)
| dates = []
count_dates = []
c | ounts = []
hours = []
with open(os.path.normpath(config.filename), 'r') as f:
rows = f.readlines()
# get summary of dates in file
for line in rows:
lines = line.strip().split(',')
just_date = lines[0].split(' ')
dates.append(just_date[0])
bins = sorted(list(set(dates)))
# count occurrences per day
for line in rows:
lines = line.strip().split(',')
just_date = lines[0].split(' ')
if lines[1] == ' 1':
# gaming happened
count_dates.append(just_date[0])
# sort date events into bins
for bin in bins:
counts.append(count_dates.count(bin))
# normalise 5 min slots into actual hours
for count in counts:
hours.append(count/float(12))
trace1 = Bar(x=bins,y=hours)
data = Data([trace1])
layout = Layout(
title = config.plotly_graph_title,
xaxis=XAxis(
title='Date',
autorange=True
),
yaxis=YAxis(
title='Number of Hours',
autorange=True
)
)
fig = Figure(data=data, layout=layout)
# do the graph magic
plot_url = py.plot(fig, filename=config.plotly_url, fileopt=config.plotly_fileopt, auto_open= config.plotly_auto_open)
|
jawilson/home-assistant | tests/components/hassio/test_handler.py | Python | apache-2.0 | 7,540 | 0.000928 | """The tests for the hassio component."""
import aiohttp
import pytest
from homeassistant.components.hassio.handler import HassioAPIError
async def test_api_ping(hassio_handler, aioclient_mock):
"""Test setup with API ping."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"})
assert await hassio_handler.is_connected()
assert aioclient_mock.call_count == 1
async def test_api_ping_error(hassio_handler, aioclient_mock):
"""Test setup with API ping error."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "error"})
assert not (await hassio_handler.is_connected())
assert aioclient_mock.call_count == 1
async def test_api_ping_exeption(hassio_handler, aioclient_mock):
"""Test setup with API ping exception."""
aioclient_mock.get("http://127.0.0.1/supervisor/ping", exc=aiohttp.ClientError())
assert not (await hassio_handler.is_connected())
assert aioclient_mock.call_count == 1
async def test_api_info(hassio_handler | , aioclient_mock):
"""Test setup with API generic info."""
aioclient_mock.get(
"http://127.0.0.1/info",
json={
"result": "ok",
"data": {"supervisor": "222", "homeassistant": "0.110.0", "hassos": None},
},
)
data = await hassio_handler.get_info()
assert aioclient_mock.call_count = | = 1
assert data["hassos"] is None
assert data["homeassistant"] == "0.110.0"
assert data["supervisor"] == "222"
async def test_api_info_error(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant info error."""
aioclient_mock.get(
"http://127.0.0.1/info", json={"result": "error", "message": None}
)
with pytest.raises(HassioAPIError):
await hassio_handler.get_info()
assert aioclient_mock.call_count == 1
async def test_api_host_info(hassio_handler, aioclient_mock):
"""Test setup with API Host info."""
aioclient_mock.get(
"http://127.0.0.1/host/info",
json={
"result": "ok",
"data": {
"chassis": "vm",
"operating_system": "Debian GNU/Linux 10 (buster)",
"kernel": "4.19.0-6-amd64",
},
},
)
data = await hassio_handler.get_host_info()
assert aioclient_mock.call_count == 1
assert data["chassis"] == "vm"
assert data["kernel"] == "4.19.0-6-amd64"
assert data["operating_system"] == "Debian GNU/Linux 10 (buster)"
async def test_api_supervisor_info(hassio_handler, aioclient_mock):
"""Test setup with API Supervisor info."""
aioclient_mock.get(
"http://127.0.0.1/supervisor/info",
json={
"result": "ok",
"data": {"supported": True, "version": "2020.11.1", "channel": "stable"},
},
)
data = await hassio_handler.get_supervisor_info()
assert aioclient_mock.call_count == 1
assert data["supported"]
assert data["version"] == "2020.11.1"
assert data["channel"] == "stable"
async def test_api_os_info(hassio_handler, aioclient_mock):
"""Test setup with API OS info."""
aioclient_mock.get(
"http://127.0.0.1/os/info",
json={
"result": "ok",
"data": {"board": "odroid-n2", "version": "2020.11.1"},
},
)
data = await hassio_handler.get_os_info()
assert aioclient_mock.call_count == 1
assert data["board"] == "odroid-n2"
assert data["version"] == "2020.11.1"
async def test_api_host_info_error(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant info error."""
aioclient_mock.get(
"http://127.0.0.1/host/info", json={"result": "error", "message": None}
)
with pytest.raises(HassioAPIError):
await hassio_handler.get_host_info()
assert aioclient_mock.call_count == 1
async def test_api_core_info(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant Core info."""
aioclient_mock.get(
"http://127.0.0.1/core/info",
json={"result": "ok", "data": {"version_latest": "1.0.0"}},
)
data = await hassio_handler.get_core_info()
assert aioclient_mock.call_count == 1
assert data["version_latest"] == "1.0.0"
async def test_api_core_info_error(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant Core info error."""
aioclient_mock.get(
"http://127.0.0.1/core/info", json={"result": "error", "message": None}
)
with pytest.raises(HassioAPIError):
await hassio_handler.get_core_info()
assert aioclient_mock.call_count == 1
async def test_api_homeassistant_stop(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant stop."""
aioclient_mock.post("http://127.0.0.1/homeassistant/stop", json={"result": "ok"})
assert await hassio_handler.stop_homeassistant()
assert aioclient_mock.call_count == 1
async def test_api_homeassistant_restart(hassio_handler, aioclient_mock):
"""Test setup with API Home Assistant restart."""
aioclient_mock.post("http://127.0.0.1/homeassistant/restart", json={"result": "ok"})
assert await hassio_handler.restart_homeassistant()
assert aioclient_mock.call_count == 1
async def test_api_addon_info(hassio_handler, aioclient_mock):
"""Test setup with API Add-on info."""
aioclient_mock.get(
"http://127.0.0.1/addons/test/info",
json={"result": "ok", "data": {"name": "bla"}},
)
data = await hassio_handler.get_addon_info("test")
assert data["name"] == "bla"
assert aioclient_mock.call_count == 1
async def test_api_addon_stats(hassio_handler, aioclient_mock):
"""Test setup with API Add-on stats."""
aioclient_mock.get(
"http://127.0.0.1/addons/test/stats",
json={"result": "ok", "data": {"memory_percent": 0.01}},
)
data = await hassio_handler.get_addon_stats("test")
assert data["memory_percent"] == 0.01
assert aioclient_mock.call_count == 1
async def test_api_discovery_message(hassio_handler, aioclient_mock):
"""Test setup with API discovery message."""
aioclient_mock.get(
"http://127.0.0.1/discovery/test",
json={"result": "ok", "data": {"service": "mqtt"}},
)
data = await hassio_handler.get_discovery_message("test")
assert data["service"] == "mqtt"
assert aioclient_mock.call_count == 1
async def test_api_retrieve_discovery(hassio_handler, aioclient_mock):
"""Test setup with API discovery message."""
aioclient_mock.get(
"http://127.0.0.1/discovery",
json={"result": "ok", "data": {"discovery": [{"service": "mqtt"}]}},
)
data = await hassio_handler.retrieve_discovery_messages()
assert data["discovery"][-1]["service"] == "mqtt"
assert aioclient_mock.call_count == 1
async def test_api_ingress_panels(hassio_handler, aioclient_mock):
"""Test setup with API Ingress panels."""
aioclient_mock.get(
"http://127.0.0.1/ingress/panels",
json={
"result": "ok",
"data": {
"panels": {
"slug": {
"enable": True,
"title": "Test",
"icon": "mdi:test",
"admin": False,
}
}
},
},
)
data = await hassio_handler.get_ingress_panels()
assert aioclient_mock.call_count == 1
assert data["panels"]
assert "slug" in data["panels"]
|
googleapis/python-container | samples/generated_samples/container_v1_generated_cluster_manager_set_network_policy_sync.py | Python | apache-2.0 | 1,448 | 0.000691 | # -*- coding: utf-8 -* | -
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR | CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for SetNetworkPolicy
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-container
# [START container_v1_generated_ClusterManager_SetNetworkPolicy_sync]
from google.cloud import container_v1
def sample_set_network_policy():
# Create a client
client = container_v1.ClusterManagerClient()
# Initialize request argument(s)
request = container_v1.SetNetworkPolicyRequest(
)
# Make the request
response = client.set_network_policy(request=request)
# Handle the response
print(response)
# [END container_v1_generated_ClusterManager_SetNetworkPolicy_sync]
|
rahlk/WarnPlan | warnplan/commons/tools/axe/where2.py | Python | mit | 10,028 | 0.028022 | """
# A Better Where
WHERE2 is a near-linear time top-down clustering alogithm.
WHERE2 updated an older where with new Python tricks.
## Standard Header Stuff
"""
from __future__ import division,print_function
import sys
sys.dont_write_bytecode = True
from lib import *
from nasa93 import *
"""
## Dimensionality Reduction with Fastmp
Project data.dat in N dimensions down to a single dimension connecting
twp distant points. Divide that data.dat at the median of those projects.
"""
def fastmap(m,data):
"Divide data.dat into two using distance to two distant items."
import random
random.seed(1)
one = any(data) # 1) pick anything
west = furthest(m,one,data) # 2) west is as far as you can go from anything
east = furthest(m,west,data) # 3) east is as far as you can go from west
c = dist(m,west,east)
# now find everyone's distance
lst = []
for one in data:
a = dist(m,one,west)
b = dist(m,one,east)
x = (a*a + c*c - b*b)/(2*c) # cosine rule
y = max(0, a**2 - x**2)**0.5 # not used, here for a demo
lst += [(x,one)]
lst = sorted(lst)
mid = len(lst)//2
wests = map(second,lst[:mid])
easts = map(second,lst[mid:])
return wests,west, easts,east,c
def gt(x,y): return x > y
def lt(x,y): return x < y
"""
In the above:
+ _m_ is some model that generates candidate
solutions that we wish to niche.
+ _(west,east)_ are not _the_ most distant points
(that would require _N*N) distance
calculations). But they are at least very distant
to each other.
This code needs some helper functions. _Dist_ uses
the standard Euclidean measure. Note that you tune
what it uses to define the niches (decisions or
objectives) using the _what_ parameter:
"""
def dist(m,i,j,
what = lambda m: m.decisions):
"Euclidean distance 0 <= d <= 1 between decisions"
n = len(i.cells)
deltas = 0
for c in what(m):
n1 = norm(m, c, i.cells[c])
n2 = norm(m, c, j.cells[c])
inc = (n1-n2)**2
deltas += inc
n += abs(m.w[c])
return deltas**0.5 / n**0.5
"""
The _Dist_ function normalizes all the raw values zero to one.
"""
def norm(m,c,val) :
"Normalizes val in col c within model m 0..1"
return (val- m.lo[c]) / (m.hi[c]- m.lo[c]+ 0.0001)
"""
Now we can define _furthest_:
"""
def furthest(m,i,all,
init = 0,
better = gt):
"find which of all is furthest from 'i'"
out,d= i,init
for j in all:
if i == j: continue
tmp = dist(m,i,j)
if better(tmp,d):
out,d = j,tmp
return out
"""
And of course, _closest_:
"""
def closest(m,i,all):
return furthest(m,i,all,init=10**32,better=lt)
"""
## WHERE2 = Recursive Fastm | ap
WHERE2 finds everyone's else's distance from the poles
and divide the data.dat on the mean point of those
distances. This all stops if:
+ Any division has _tooFew_ solutions (say,
less than _sqrt_ of the total number of
solutions).
+ Something has gone horribly wrong and you are
recursin | g _tooDeep_
This code is controlled by the options in [_The_ settings](settingspy). For
example, if _The.pruning_ is true, we may ignore
some sub-tree (this process is discussed, later on).
Also, if _The.verbose_ is true, the _show_
function prints out a little tree showing the
progress (and to print indents in that tree, we use
the string _The.b4_). For example, here's WHERE2
dividing 93 examples from NASA93.
---| _where |-----------------
93
|.. 46
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. 47
|.. |.. 23
|.. |.. |.. 11
|.. |.. |.. |.. 5.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. 24
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
|.. |.. |.. 12
|.. |.. |.. |.. 6.
|.. |.. |.. |.. 6.
WHERE2 returns clusters, where each cluster contains
multiple solutions.
"""
def where2(m, data, lvl=0, up=None):
node = o(val=None,_up=up,_kids=[])
def tooDeep(): return lvl > The.what.depthMax
def tooFew() : return len(data) < The.what.minSize
def show(suffix):
if The.verbose:
print(The.what.b4*lvl,len(data),
suffix,' ; ',id(node) % 1000,sep='')
if tooDeep() or tooFew():
show(".")
node.val = data
else:
show("")
wests,west, easts,east,c = fastmap(m,data)
node.update(c=c,east=east,west=west)
goLeft, goRight = maybePrune(m,lvl,west,east)
if goLeft:
node._kids += [where2(m, wests, lvl+1, node)]
if goRight:
node._kids += [where2(m, easts, lvl+1, node)]
return node
"""
## An Experimental Extensions
Lately I've been experimenting with a system that
prunes as it divides the data.dat. GALE checks for
domination between the poles and ignores data.dat in
halves with a dominated pole. This means that for
_N_ solutions we only ever have to evaluate
_2*log(N)_ of them- which is useful if each
evaluation takes a long time.
The niches found in this way
contain non-dominated poles; i.e. they are
approximations to the Pareto frontier.
Preliminary results show that this is a useful
approach but you should treat those results with a
grain of salt.
In any case, this code supports that pruning as an
optional extra (and is enabled using the
_slots.pruning_ flag). In summary, this code says if
the scores for the poles are more different that
_slots.wriggle_ and one pole has a better score than
the other, then ignore the other pole.
"""
def maybePrune(m,lvl,west,east):
"Usually, go left then right, unless dominated."
goLeft, goRight = True,True # default
if The.prune and lvl >= The.what.depthMin:
sw = scores(m, west)
se = scores(m, east)
if abs(sw - se) > The.wriggle: # big enough to consider
if se > sw: goLeft = False # no left
if sw > se: goRight = False # no right
return goLeft, goRight
"""
Note that I do not allow pruning until we have
descended at least _slots.depthMin_ into the tree.
### Model-specific Stuff
WHERE2 talks to models via the the following model-specific variables:
+ _m.cols_: list of indices in a list
+ _m.names_: a list of names for each column.
+ _m.decisions_: the subset of cols relating to decisions.
+ _m.obectives_: the subset of cols relating to objectives.
+ _m.eval(m,eg)_: function for computing variables from _eg_.
+ _m.lo[c]_ : the lowest value in column _c_.
+ _m.hi[c]_ : the highest value in column _c_.
+ _m.w[c]_: the weight for each column. Usually equal to one.
If an objective and if we are minimizing that objective, then the weight is negative.
### Model-general stuff
Using the model-specific stuff, WHERE2 defines some
useful general functions.
"""
def some(m,x) :
"with variable x of model m, pick one value at random"
return m.lo[x] + by(m.hi[x] - m.lo[x])
def scores(m,it):
"Score an individual."
if not it.scored:
m.eval(m,it)
new, w = 0, 0
for c in m.objectives:
val = it.cells[c]
w += abs(m.w[c])
tmp = norm(m,c,val)
if m.w[c] < 0:
tmp = 1 - tmp
new += (tmp**2)
it.score = (new**0.5) / (w**0.5)
it.scored = True
return it.score
"""
## Tree Code
Tools for manipulating the tree returned by _where2_.
### Primitive: Walk the nodes
"""
def nodes(tree,seen=None,steps=0):
if seen is None: seen=[]
if tree:
if not id(tree) in seen:
seen.append(id(tree))
yield tree,steps
for kid in tree._kids:
for sub,steps1 in nodes(kid,seen,steps+1):
yield sub,steps1
"""
### Return nodes that are leaves
"""
def leaves(tree,seen=None,steps=0):
for node,steps1 in nodes(tree,seen,steps):
if not node._kids:
yield node,steps1
"""
### Return nodes nearest to furthest
"""
def neighbors(leaf,seen=None,steps=-1):
"""Walk the tree from 'leaf' increasingly
distant leaves. """
if seen is None: seen=[]
for down,steps1 in leaves(leaf,seen,steps+1):
yield down,steps1
if leaf:
for up,steps1 i |
whitzhu/kolibri | kolibri/logger/permissions.py | Python | mit | 989 | 0.004044 | from kolibri.auth.permissions.base import BasePermissions
class AnonymousUsersCanWriteAnonymousLogs(BasePermissions):
"""
Permissions class that allows anonymous users to create logs with no associated user.
"""
def user_can_create_object(self, user, obj):
return user.is_anonymous() and not obj.user
def user_can_read_object(self, user, obj):
return False
def user_can_update_object | (self, user, obj):
# this one is a bit worrying, since anybody could update anonymous logs, but at least only if they have the ID
# (and this is needed, in order to allow a ContentSessionLog to be updated within a session -- in theory,
# we could add date checking in here to not allow further updating after a couple of days)
return user.is_anonymous() and not obj.user
def user_can_delete_object(self, user, obj):
| return False
def readable_by_user_filter(self, user, queryset):
return queryset.none()
|
scottkmaxwell/papa | tests/executables/echo_client.py | Python | mit | 429 | 0 | import sys
import socket
from papa.utils import cast_string, send_with_retry | , recv_with_retry
__author__ = 'Scott Maxwell'
if len(sys.argv) != 2:
sys.stderr.write(' | Need one port number\n')
sys.exit(1)
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect(('127.0.0.1', int(sys.argv[1])))
send_with_retry(sock, b'howdy\n')
data = recv_with_retry(sock)
sys.stdout.write(cast_string(data))
sock.close()
|
DavidMcDonald1993/ghsom | parameter_tests_edges.py | Python | gpl-2.0 | 7,042 | 0.013206 |
# coding: utf-8
# In[1]:
import os
from shutil import copyfile
import subprocess
from save_embedded_graph27 import main_binary as embed_main
from spearmint_ghsom import main as ghsom_main
import numpy as np
import pickle
from time import time
def save_obj(obj, name):
with open(name + '.pkl', 'wb') as f:
pickle.dump(obj, f, pickle.HIGHEST_PROTOCOL)
def load_obj(name):
with open(name + '.pkl', 'rb') as f:
return pickle.load(f)
#root dir
os.chdir("C:\Miniconda3\Jupyter\GHSOM_simplex_dsd")
#save directory
dir = os.path.abspath("parameter_tests_edges")
#number of times to repeat
num_repeats = 30
#number of nodes in the graph
N = 64
#make save directory
if not os.path.isdir(dir):
os.mkdir(dir)
#change to dir
os.chdir(dir)
#network file names -- output of network generator
network = "network.dat"
first_level = "community.dat"
#community labels
labels = 'firstlevelcommunity'
#mixing factors
mu = 0.1
num_edges_ls = [256, 512, 1024]
parameter_settings = [0.5, 0.6, 0.7, 0.8, 0.9, 1][::-1]
overall_nmi_scores = np.zeros((len(num_edges_ls), len(parameter_settings)))
for i in range(len(num_edges_ls)):
#number of edges
num_edges = num_edges_ls[i]
#create directory
dir_string = os.path.join(dir, str(num_edges))
if not os.path.isdir(dir_string):
os.mkdir(dir_string)
#change working directory
os.chdir(dir_string)
for j in range(len(parameter_settings)):
#setti | ng fo e_sg
p = parameter_se | ttings[j]
#ghsom parameters
params = {'w': 0.0001,
'eta': 0.0001,
'sigma': 1,
'e_sg': p,
'e_en': 0.8}
#create directory
dir_string_p = os.path.join(dir_string, str(p))
if not os.path.isdir(dir_string_p):
os.mkdir(dir_string_p)
#change working directory
os.chdir(dir_string_p)
if os.path.isfile('nmi_scores.csv'):
print 'already completed {}/{}, loading scores and continuing'.format(k1, p)
nmi_scores = np.genfromtxt('nmi_scores.csv', delimiter=',')
overall_nmi_scores[i,j] = np.mean(nmi_scores, axis=0)
continue
#copy executable
ex = "benchmark.exe"
if not os.path.isfile(ex):
source = "C:\\Users\\davem\\Documents\\PhD\\Benchmark Graph Generators\\binary_networks\\benchmark.exe"
copyfile(source, ex)
#record NMI scores
if not os.path.isfile('nmi_scores.pkl'):
print 'creating new nmi scores array'
nmi_scores = np.zeros(num_repeats)
else:
print 'loading nmi score progress'
nmi_scores = load_obj('nmi_scores')
#record running times
if not os.path.isfile('running_times.pkl'):
print 'creating new running time array'
running_times = np.zeros(num_repeats)
else:
print 'loading running time progress'
running_times = load_obj('running_times')
print
#generate networks
for r in range(1, num_repeats+1):
#number of communities
num_communities = np.random.randint(1,5)
#number of nodes in micro community
minc = np.floor(float(N) / num_communities)
maxc = np.ceil(float(N) / num_communities)
#average number of edges
k = float(num_edges) / N
#max number of edges
maxk = 2 * k
#make benchmark parameter file
filename = "benchmark_flags_{}_{}_{}.dat".format(num_edges,p,r)
if not os.path.isfile(filename):
print 'number of edges: {}'.format(num_edges)
print 'number of communities: {}'.format(num_communities)
print '-N {} -k {} -maxk {} -minc {} -maxc {} -mu {}'.format(N, k, maxk, minc, maxc, mu)
with open(filename,"w") as f:
f.write("-N {} -k {} -maxk {} -minc {} -maxc {} -mu {}".format(N, k, maxk, minc, maxc, mu))
print 'written flag file: {}'.format(filename)
#cmd strings
change_dir_cmd = "cd {}".format(dir_string_p)
generate_network_cmd = "benchmark -f {}".format(filename)
#output of cmd
output_file = open("cmd_output.out", 'w')
network_rename = "{}_{}".format(r,network)
first_level_rename = "{}_{}".format(r,first_level)
gml_filename = 'embedded_network_{}.gml'.format(r)
if not os.path.isfile(network_rename):
process = subprocess.Popen(change_dir_cmd + " && " + generate_network_cmd,
stdout=output_file,
stderr=output_file,
shell=True)
process.wait()
print 'generated graph {}'.format(r)
os.rename(network, network_rename)
os.rename(first_level, first_level_rename)
print 'renamed graph {}'.format(r)
if not os.path.isfile(gml_filename):
##embed graph
embed_main(network_rename, first_level_rename)
print 'embedded graph {} as {} in {}'.format(r, gml_filename, os.getcwd())
##score for this network
if not np.all(nmi_scores[r-1]):
start_time = time()
print 'starting ghsom for: {}/{}/{}'.format(num_edges, p, gml_filename)
nmi_score, communities_detected = ghsom_main(params, gml_filename, labels)
nmi_scores[r-1] = nmi_score
running_time = time() - start_time
print 'running time of algorithm: {}'.format(running_time)
running_times[r-1] = running_time
#save
save_obj(nmi_scores, 'nmi_scores')
save_obj(running_times, 'running_times')
print 'saved nmi score for network {}: {}'.format(gml_filename, nmi_score)
print
##output nmi scores to csv file
print 'writing nmi scores and running times to file'
np.savetxt('nmi_scores.csv',nmi_scores,delimiter=',')
np.savetxt('running_times.csv',running_times,delimiter=',')
print
#odd to overall list
overall_nmi_scores[i,j] = np.mean(nmi_scores, axis=0)
print 'DONE'
print 'OVERALL NMI SCORES'
print overall_nmi_scores
# In[3]:
for scores in overall_nmi_scores:
print scores
idx = np.argsort(scores)[::-1]
print parameter_settings[idx[0]]
|
jgruhl/djtokeninput | lib/djtokeninput/__init__.py | Python | mit | 111 | 0 | #!/usr/bin/env pyth | on
from djtokeninput.field | s import TokenField
from djtokeninput.widgets import TokenWidget
|
frankcash/censorship-analyser | dnscompare.py | Python | bsd-3-clause | 3,151 | 0.008569 | # -*- coding: utf-8 -*-
from ooni.utils import log
from twisted.python import usage
from twisted.internet import defer
from ooni.templates import dnst
class UsageOptions(usage.Options):
optParameters = [
['target', 't', None, 'Specify a single hostname to query.'],
['expected', 'e', None, 'Speficy file containing expected lookup results'],
]
class DNSLookup(dnst.DNSTest):
name = "DNSLookupTest"
version = 0.1
usageOptions = UsageOptions
def setUp(self):
self.expected_results = []
self.dns_servers = []
if self.input:
self.hostname = self.input
elif self.localOptions['target']:
self.hostname = self.localOptions['target']
else:
self.hostname = "torproject.org"
if self.localOptions['expected']:
with open (self.localOptions['expected']) as file:
for line in file:
self.expected_results.append(line.strip())
else:
self.expected_results = [
'154.35.132.70',
'38.229.72.14',
'38.229.72.16',
'82.195.75.101',
'86.59.30.40',
'93.95.227.222'
]
self.report['expected_results'] = self.expected_results
with open('/etc/resolv.conf') as f:
for line in f:
if line.startswith('nameserver'):
self.dns_servers.append(line.split(' ')[1].strip())
self.report['dns_servers'] = self.dns_servers
def verify_results(self, results):
for result in results:
if result not in self.expected_results:
return False
return True
@defer.inlineCallbacks
def test_dns_comparison(self):
"""
Performs A lookup on specified host and matches the results
against a set of expected results. When not specified, host and
expected results default to "torproject.org" and
['38.229.72.14', '38.229.72.16', '82.195.75.101', '86.59.30.40', '93.95.227.222'].
"""
for s in self.dns_servers:
dnsServer = (s, 53)
results = yield self.performALookup(self.hostname, dnsServer)
if results:
if self.verify_results(results):
self.report['TestStatus'] = 'OK'
else:
self.report['TestStatus'] = 'FAILED'
self.report['T | estException'] = 'unexpected results'
@defer.inlineCallbacks
def test_control_results(self):
"""
Googles 8.8.8.8 server is queried, in order to generate
control data.
"""
results = yield self.performALookup(self.hostname, ("8.8.8.8", 53) | )
if results:
self.report['control_results'] = results
|
robhowley/treetl | tests/test_parent_data_param.py | Python | mit | 1,534 | 0.006519 |
import unittest
class TestParentDataParams(unittest.TestCase):
def setUp(self):
from treetl import Job
self.expected_results = { jn: i+1 for i, jn in enumerate([ 'JobA', 'JobB', 'JobC', 'JobD' ]) }
self.actual_results = { }
def update_actual_results(job):
self.actual_results[job.__class__.__name__] = job.transformed_data
class | LoadToDict(Job):
def load(self, **kwargs):
update_actual_results(self)
class JobA(LoadToDict):
def transform(self, **kwargs):
self.transformed_data = 1
class JobB(LoadToDict):
def transform(self, **kwargs):
self.transformed_data = 2
@Job.dependency(b_data=JobB, a_data=JobA)
| class JobC(LoadToDict):
def transform(self, a_data=None, b_data=None, **kwargs):
self.transformed_data = a_data + b_data
@Job.dependency(a_data=JobA, c_data=JobC)
class JobD(LoadToDict):
def transform(self, a_data=None, c_data=None, **kwargs):
self.transformed_data = a_data + c_data
self.jobs = [ JobD(), JobA(), JobC(), JobB() ]
def test_parent_data_params(self):
from treetl import JobRunner
JobRunner(self.jobs).run()
self.assertDictEqual(
d1=self.expected_results,
d2=self.actual_results,
msg='Error in transformed data loaded to dict'
)
if __name__ == '__main__':
unittest.main()
|
nwalters512/the-blue-alliance | helpers/event_simulator.py | Python | mit | 15,162 | 0.001649 | from collections import defaultdict
import copy
import datetime
import json
from appengine_fixture_loader.loader import load_fixture
from google.appengine.ext import ndb
from helpers.event_details_manipulator import EventDetailsManipulator
from helpers.match_helper import MatchHelper
from helpers.match_manipulator import MatchManipulator
from models.event import Event
from models.event_details import EventDetails
from models.match import Match
class EventSimulator(object):
"""
Steps through an event in time. At step = 0, only the Event exists:
(step 0) Add all unplayed qual matches
(step 1, substep n) Add results of each of the n qual matches +
rankings being updated (if has_event_details)
(step 2) Add alliance selections (if has_event_details)
(step 3) Add unplayed QF matches
(step 4, substep n) Add results of each of the n QF matches +
update SF matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 5) Add unplayed SF matches (if batch_advance)
(step 6, substep n) Add results of each of the n SF matches +
update F matches with advancing alliances (if not batch_advance) +
update alliance selection backups (if has_event_details)
(step 7) Add unplayed F matches (if batch_advance)
(step 8, substep n) Add results of each of the n F matches +
update alliance selection backups (if has_event_details)
"""
def __init__(self, has_event_details=True, batch_advance=False):
self._step = 0
self._substep = 0
# whether to update rankings and alliance selections
self._has_event_details = has_event_details
# whether to update next playoff level all at once, or as winners are determined
self._batch_advance = batch_advance
# Load and save complete data
load_fixture('test_data/fixtures/2016nytr_event_team_status.json',
kind={'EventDetails': EventDetails, 'Event': Event, 'Match': Match},
post_processor=self._event_key_adder)
event = Event.get_by_id('2016nytr')
# Add 3rd matches that never got played
unplayed_matches = [
Match(
id='2016nytr_qf1m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc3044', 'frc4930', 'frc4481'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 34),
),
Match(
id='2016nytr_qf3m3',
year=2016,
event=event.key,
comp_level='qf',
set_number=3,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc20', 'frc5254', 'frc229'],
'score': -1,
},
'blue': {
'teams': ['frc3003', 'frc358', 'frc527'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 18, 48),
),
Match(
id='2016nytr_sf1m3',
year=2016,
event=event.key,
comp_level='sf',
set_number=1,
match_number=3,
alliances_json=json.dumps({
'red': {
'teams': ['frc3990', 'frc359', 'frc4508'],
'score': -1,
},
'blue': {
'teams': ['frc5240', 'frc3419', 'frc663'],
'score': -1,
}
}),
time=datetime.datetime(2016, 3, 19, 19, 42),
)
]
self._event_details = event.details
self._alliance_selections_without_backup = copy.deepcopy(event.details.alliance_selections)
self._alliance_selections_without_backup[1]['backup'] = None
self._played_matches = MatchHelper.organizeMatches(event.matches)
self._all_matches = MatchHelper.organizeMatches(event.matches + unplayed_matches)
# Delete data
event.details.key.delete()
ndb.delete_multi([match.key for match in event.matches])
ndb.get_context().clear_cache()
# Used to keep track of non-batch advancement
self._advancement_alliances = defaultdict(dict)
def _event_key_adder(self, obj):
obj.event = ndb.Key(Event, '2016nytr')
def _update_rankings(self):
"""
Generates and saves fake rankings
"""
event = Event.get_by_id('2016nytr')
team_wins = defaultdict(int)
team_losses = defaultdict(int)
team_ties = defaultdict(int)
teams = set()
for match in event.matches:
if match.comp_level == 'qm':
for alliance in ['red', 'blue']:
for team in match.alliances[alliance]['teams']: |
teams.add(team)
if match.has_been_played:
if alliance == match.winning_alliance:
team_wins[team] += 1
elif match.winning_alliance == '':
team_ties[team] += 1
| else:
team_losses[team] += 1
rankings = []
for team in sorted(teams):
wins = team_wins[team]
losses = team_losses[team]
ties = team_ties[team]
rankings.append({
'team_key': team,
'record': {
'wins': wins,
'losses': losses,
'ties': ties,
},
'matches_played': wins + losses + ties,
'dq': 0,
'sort_orders': [2 * wins + ties, 0, 0, 0, 0],
'qual_average': None,
})
rankings = sorted(rankings, key=lambda r: -r['sort_orders'][0])
for i, ranking in enumerate(rankings):
ranking['rank'] = i + 1
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
rankings2=rankings,
))
def step(self):
event = Event.get_by_id('2016nytr')
if self._step == 0: # Qual match schedule added
for match in copy.deepcopy(self._all_matches['qm']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match.alliances_json = json.dumps(match.alliances)
match.score_breakdown_json = None
match.actual_time = None
MatchManipulator.createOrUpdate(match)
self._step += 1
elif self._step == 1: # After each qual match
MatchManipulator.createOrUpdate(self._played_matches['qm'][self._substep])
if self._substep < len(self._played_matches['qm']) - 1:
self._substep += 1
else:
self._step += 1
self._substep = 0
EventDetailsManipulator.createOrUpdate(EventDetails(id='2016nytr'))
elif self._step == 2: # After alliance selections
EventDetailsManipulator.createOrUpdate(EventDetails(
id='2016nytr',
alliance_selections=self._alliance_selections_without_backup
))
self._step += 1
elif self._step == 3: # QF schedule added
for match in copy.deepcopy(self._all_matches['qf']):
for alliance in ['red', 'blue']:
match.alliances[alliance]['score'] = -1
match. |
dssg/wikienergy | disaggregator/build/pandas/pandas/tools/plotting.py | Python | mit | 116,674 | 0.000917 | # being a bit too dynamic
# pylint: disable=E1101
import datetime
import warnings
import re
from math import ceil
from collections import namedtuple
from contextlib import contextmanager
from distutils.version import LooseVersion
import numpy as np
from pandas.util.decorators import cache_readonly, deprecate_kwarg
import pandas.core.common as com
from pandas.core.generic import _shared_docs, _shared_doc_kwargs
from pandas.core.index import Index, MultiIndex
from pandas.core.series import Series, remove_na
from pandas.tseries.index import DatetimeIndex
from pandas.tseries.period import PeriodIndex, Period
import pandas.tseries.frequencies as frequencies
from pandas.tseries.offsets import DateOffset
from pandas.compat import range, lrange, lmap, map, zip, string_types
import pandas.compat as compat
from pandas.util.decorators import Appender
try: # mpl optional
import pandas.tseries.converter as conv
conv.register() # needs to override so set_xlim works with str/number
except ImportError:
pass
# Extracted from https://gist.github.com/huyng/816622
# this is the rcParams set when setting display.with_mpl_style
# to True.
mpl_stylesheet = {
'axes.axisbelow': True,
'axes.color_cycle': ['#348ABD',
'#7A68A6',
'#A60628',
'#467821',
'#CF4457',
'#188487',
'#E24A33'],
'axes.edgecolor': '#bcbcbc',
'axes.facecolor': '#eeeeee',
'axes.grid': True,
'axes.labelcolor': '#555555',
'axes.labelsize': 'large',
'axes.linewidth': 1.0,
'axes.titlesize': 'x-large',
'figure.edgecolor': 'white',
'figure.facecolor': 'white',
'figure.figsize': (6.0, 4.0),
'figure.subplot.hspace': 0.5,
'font.family': 'monospace',
'font.monospace': ['Andale Mono',
'Nimbus Mono L',
'Courier New',
'Courier',
'Fixed',
'Terminal',
'monospace'],
'font.size': 10,
'interactive': True,
'keymap.all_axes': ['a'],
'keymap.back': ['left', 'c', 'backspace'],
'keymap.forward': ['right', 'v'],
'keymap.fullscreen': ['f'],
'keymap.grid': ['g'],
'keymap.home': | ['h', 'r', 'home'],
'keymap.pan': ['p'],
'keymap.save': ['s'],
'keymap.xscale': ['L', 'k'],
'keymap.yscale': ['l'],
'keymap.zoom': ['o'],
'l | egend.fancybox': True,
'lines.antialiased': True,
'lines.linewidth': 1.0,
'patch.antialiased': True,
'patch.edgecolor': '#EEEEEE',
'patch.facecolor': '#348ABD',
'patch.linewidth': 0.5,
'toolbar': 'toolbar2',
'xtick.color': '#555555',
'xtick.direction': 'in',
'xtick.major.pad': 6.0,
'xtick.major.size': 0.0,
'xtick.minor.pad': 6.0,
'xtick.minor.size': 0.0,
'ytick.color': '#555555',
'ytick.direction': 'in',
'ytick.major.pad': 6.0,
'ytick.major.size': 0.0,
'ytick.minor.pad': 6.0,
'ytick.minor.size': 0.0
}
def _get_standard_kind(kind):
return {'density': 'kde'}.get(kind, kind)
def _get_standard_colors(num_colors=None, colormap=None, color_type='default',
color=None):
import matplotlib.pyplot as plt
if color is None and colormap is not None:
if isinstance(colormap, compat.string_types):
import matplotlib.cm as cm
cmap = colormap
colormap = cm.get_cmap(colormap)
if colormap is None:
raise ValueError("Colormap {0} is not recognized".format(cmap))
colors = lmap(colormap, np.linspace(0, 1, num=num_colors))
elif color is not None:
if colormap is not None:
warnings.warn("'color' and 'colormap' cannot be used "
"simultaneously. Using 'color'")
colors = color
else:
if color_type == 'default':
# need to call list() on the result to copy so we don't
# modify the global rcParams below
colors = list(plt.rcParams.get('axes.color_cycle',
list('bgrcmyk')))
if isinstance(colors, compat.string_types):
colors = list(colors)
elif color_type == 'random':
import random
def random_color(column):
random.seed(column)
return [random.random() for _ in range(3)]
colors = lmap(random_color, lrange(num_colors))
else:
raise NotImplementedError
if len(colors) != num_colors:
multiple = num_colors//len(colors) - 1
mod = num_colors % len(colors)
colors += multiple * colors
colors += colors[:mod]
return colors
class _Options(dict):
"""
Stores pandas plotting options.
Allows for parameter aliasing so you can just use parameter names that are
the same as the plot function parameters, but is stored in a canonical
format that makes it easy to breakdown into groups later
"""
# alias so the names are same as plotting method parameter names
_ALIASES = {'x_compat': 'xaxis.compat'}
_DEFAULT_KEYS = ['xaxis.compat']
def __init__(self):
self['xaxis.compat'] = False
def __getitem__(self, key):
key = self._get_canonical_key(key)
if key not in self:
raise ValueError('%s is not a valid pandas plotting option' % key)
return super(_Options, self).__getitem__(key)
def __setitem__(self, key, value):
key = self._get_canonical_key(key)
return super(_Options, self).__setitem__(key, value)
def __delitem__(self, key):
key = self._get_canonical_key(key)
if key in self._DEFAULT_KEYS:
raise ValueError('Cannot remove default parameter %s' % key)
return super(_Options, self).__delitem__(key)
def __contains__(self, key):
key = self._get_canonical_key(key)
return super(_Options, self).__contains__(key)
def reset(self):
"""
Reset the option store to its initial state
Returns
-------
None
"""
self.__init__()
def _get_canonical_key(self, key):
return self._ALIASES.get(key, key)
@contextmanager
def use(self, key, value):
"""
Temporarily set a parameter value using the with statement.
Aliasing allowed.
"""
old_value = self[key]
try:
self[key] = value
yield self
finally:
self[key] = old_value
plot_params = _Options()
def scatter_matrix(frame, alpha=0.5, figsize=None, ax=None, grid=False,
diagonal='hist', marker='.', density_kwds=None,
hist_kwds=None, range_padding=0.05, **kwds):
"""
Draw a matrix of scatter plots.
Parameters
----------
frame : DataFrame
alpha : float, optional
amount of transparency applied
figsize : (float,float), optional
a tuple (width, height) in inches
ax : Matplotlib axis object, optional
grid : bool, optional
setting this to True will show the grid
diagonal : {'hist', 'kde'}
pick between 'kde' and 'hist' for
either Kernel Density Estimation or Histogram
plot in the diagonal
marker : str, optional
Matplotlib marker type, default '.'
hist_kwds : other plotting keyword arguments
To be passed to hist function
density_kwds : other plotting keyword arguments
To be passed to kernel density estimate plot
range_padding : float, optional
relative extension of axis range in x and y
with respect to (x_max - x_min) or (y_max - y_min),
default 0.05
kwds : other plotting keyword arguments
To be passed to scatter function
Examples
--------
>>> df = DataFrame(np.random.randn(1000, 4), columns=['A','B','C','D'])
>>> scatter_matrix(df, alpha=0.2)
"""
import matplotlib.pyplot as plt
from matplotlib.artist import setp
df = frame._get_numeric_data()
n = df.columns.size
naxes = n * n
fig, axes = _subplots(naxes=naxes, figsize=figsize, ax=ax,
squeeze=False)
# no gaps between subplots
fig.subplots_a |
fridex/fabric8-analytics-worker | alembic/versions/e2762a61d34c_upstream_url_can_be_null.py | Python | gpl-3.0 | 1,411 | 0.001417 | """Upstream URL can be null
Revision ID: e2762a61d34c
Revises: f5c853b83d41
Create Date: 2017-07-31 08:41:39.811488
"""
# revision identifiers, used by Alembic.
revision = 'e2762a61d34c'
down_revision = 'f5c853b83d41'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('api_requests', 'api_name' | ,
existing_type=sa.VARCHAR(length=256),
nullable=False)
op.alter_column('api_requests', 'request_digest',
existing_type=sa.VARCHAR(length=128),
nullable=True)
op.alter_column('monitored_upstreams', 'url',
| existing_type=sa.VARCHAR(length=255),
nullable=True)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('monitored_upstreams', 'url',
existing_type=sa.VARCHAR(length=255),
nullable=False)
op.alter_column('api_requests', 'request_digest',
existing_type=sa.VARCHAR(length=128),
nullable=False)
op.alter_column('api_requests', 'api_name',
existing_type=sa.VARCHAR(length=256),
nullable=True)
# ### end Alembic commands ###
|
robinson96/GRAPE | stashy/stashy/admin/users.py | Python | bsd-3-clause | 3,086 | 0.00324 | from ..helpers import ResourceBase, FilteredIterableResource
from ..errors import ok_or_error, response_or_error
from ..compat import update_doc
class | Users(ResourceBase, FilteredIterableResource):
@response_or_error
def add(self, name, password, displayName, emailAddress, addToDefaultGroup=True):
"""
Add a user, returns a dictionary containing information about the newly created user
"""
| data = dict(name=name,
password=password,
displayName=displayName,
emailAddress=emailAddress,
addToDefaultGroup=addToDefaultGroup)
return self._client.post(self.url(), data)
@ok_or_error
def delete(self, user):
"""
Delete a user.
"""
return self._client.delete(self.url(), params=dict(name=user))
@response_or_error
def update(self, name, displayName=None, emailAddress=None):
"""
Update the user information, and return the updated user info.
None is used as a sentinel value, use empty string if you mean to clear.
"""
data = dict(name=name)
if displayName is not None:
data['displayName'] = displayName
if data is not None:
data['emailAddress'] = emailAddress
return self._client.put(self.url(), data)
@ok_or_error
def credentials(self, name, new_password):
"""
Update a user's password.
"""
data = dict(name=name, password=new_password, passwordConfirm=new_password)
return self._client.put(self.url(), data)
@ok_or_error
def add_group(self, user, group):
"""
Add the given user to the given user.
"""
return self._client.post(self.url("/add-group"), dict(context=user, itemName=group))
@ok_or_error
def remove_group(self, user, group):
"""
Remove the given user from the given group.
"""
return self._client.post(self.url("/remove-group"), dict(context=user, itemName=group))
def more_members(self, user, filter=None):
"""
Retrieves a list of groups the specified user is a member of.
filter: if specified only groups with names containing the supplied string will be returned
"""
params = dict(context=user)
if filter:
params['filter'] = filter
return self.paginate("/more-members", params)
def more_non_members(self, user, filter=None):
"""
Retrieves a list of groups that the specified user is not a member of
filter: if specified only groups with names containing the supplied string will be returned
"""
params = dict(context=user)
if filter:
params['filter'] = filter
return self.paginate("/more-non-members", params)
update_doc(Users.all, """
Returns an iterator that will walk all the users, paginating as necessary.
filter: return only users with usernames, display name or email addresses containing the supplied string
""")
|
Carreau/sphinx_numfig | setup.py | Python | bsd-3-clause | 1,529 | 0.001308 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
readme = open('README.rst').read()
history = open('HISTORY.rst').read().replace('.. :changelog:', '')
requirem | ents = [
# TODO: put package requirements here
]
test_requirements = [
# TODO: p | ut package test requirements here
]
setup(
name='sphinx_numfig',
version='0.1.0',
description='Python Boilerplate contains all the boilerplate you need to create a Python package.',
long_description=readme + '\n\n' + history,
author='Matthias Bussonnier',
author_email='bussonniermatthias@gmail.com',
url='https://github.com/Carreau/sphinx_numfig',
packages=[
'sphinx_numfig',
],
package_dir={'sphinx_numfig':
'sphinx_numfig'},
include_package_data=True,
install_requires=requirements,
license="BSD",
zip_safe=False,
keywords='sphinx_numfig',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
],
test_suite='tests',
tests_require=test_requirements
) |
rouault/mapnik | tests/python_tests/ogr_test.py | Python | lgpl-2.1 | 4,110 | 0.009978 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from nose.tools import eq_,assert_almost_equal,raises
from utilities import execution_path, run_all
import os, mapnik
def setup():
# All of the paths used are relative, if we run the tests
# from another directory we need to chdir()
os.chdir(execution_path('.'))
if 'ogr' in mapnik.DatasourceCache.plugin_names():
# Shapefile initialization
def test_shapefile_init():
s = mapnik.Ogr(file='../../demo/data/boundaries.shp',layer_by_index=0)
e = s.envelope()
assert_almost_equal(e.minx, -11121.6896651, places=7)
assert_almost_equal(e.miny, -724724.216526, places=6)
assert_almost_equal(e.maxx, 2463000.67866, places=5)
assert_almost_equal(e.maxy, 1649661.267, places=3)
# Shapefile properties
def test_shapefile_properties():
ds = mapnik.Ogr(file='../../demo/data/boundaries.shp',layer_by_index=0)
f = ds.features_at_point(ds.envelope().center(), 0.001).features[0]
eq_(ds.geometry_type(),mapnik.DataGeometryType.Polygon)
eq_(f['CGNS_FID'], u'6f733341ba2011d892e2080020a0f4c9')
eq_(f['COUNTRY'], u'CAN')
eq_(f['F_CODE'], u'FA001')
eq_(f['NAME_EN'], u'Quebec')
eq_(f['Shape_Area'], 1512185733150.0)
eq_(f['Shape_Leng'], 19218883.724300001)
# NOTE: encoding is latin1 but gdal >= 1.9 should now expose utf8 encoded features
# See SHAPE_ENCODING for overriding: http://gdal.org/ogr/drv_shapefile.html
# Failure for the NOM_FR field is expected for older gdal
#eq_(f['NOM_FR'], u'Qu\xe9bec')
#eq_(f['NOM_FR'], u'Québec')
@raises(RuntimeError)
def test_that_nonexistant_query_field_throws(**kwargs):
ds = mapnik.Ogr(file='../data/shp/world_merc.shp',layer_by_index=0)
eq_(len(ds.fields()),11)
eq_(ds.fields(),['FIPS', 'ISO2', 'ISO3', 'UN', 'NAME', 'AREA', 'POP2005', 'REGION', 'SUBREGION', 'LON', 'LAT'])
eq_(ds.field_types(),['str', 'str', 'str', 'int', 'str', 'int', 'int', 'int', 'int', 'float', 'float'])
query = mapnik.Query(ds.envelope())
for fld in ds.fields():
query.add_property_name(fld)
# also add an invalid one, triggering throw
query.add_property_name('bogus')
ds.features(query)
# disabled because OGR pr | ints an annoying error: ERROR 1: Invalid Point object. Missing 'coordinates' member.
#def test_handling_of_null_features():
# ds = mapnik.Ogr(file='../data/json/null_feature.geojson',layer_by_index=0)
# fs = ds.all_features()
# eq_(len(fs),1)
# OGR plugin extent parameter
def test_ogr_extent_parameter():
ds = mapnik.Ogr(file='../data/shp/world_me | rc.shp',layer_by_index=0,extent='-1,-1,1,1')
e = ds.envelope()
eq_(e.minx,-1)
eq_(e.miny,-1)
eq_(e.maxx,1)
eq_(e.maxy,1)
def test_ogr_reading_gpx_waypoint():
ds = mapnik.Ogr(file='../data/gpx/empty.gpx',layer='waypoints')
e = ds.envelope()
eq_(e.minx,-122)
eq_(e.miny,48)
eq_(e.maxx,-122)
eq_(e.maxy,48)
def test_ogr_empty_data_should_not_throw():
default_logging_severity = mapnik.logger.get_severity()
mapnik.logger.set_severity(mapnik.severity_type.None)
# use logger to silence expected warnings
for layer in ['routes', 'tracks', 'route_points', 'track_points']:
ds = mapnik.Ogr(file='../data/gpx/empty.gpx',layer=layer)
e = ds.envelope()
eq_(e.minx,0)
eq_(e.miny,0)
eq_(e.maxx,0)
eq_(e.maxy,0)
mapnik.logger.set_severity(default_logging_severity)
# disabled because OGR prints an annoying error: ERROR 1: Invalid Point object. Missing 'coordinates' member.
#def test_handling_of_null_features():
# ds = mapnik.Ogr(file='../data/json/null_feature.geojson',layer_by_index=0)
# fs = ds.all_features()
# eq_(len(fs),1)
if __name__ == "__main__":
setup()
exit(run_all(eval(x) for x in dir() if x.startswith("test_")))
|
ccxt/ccxt | python/ccxt/binancecoinm.py | Python | mit | 1,253 | 0.000798 | # -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.binance import binance
class binancecoinm(binance):
def describe(self):
return self.deep_extend(super(binancecoinm, self).describe(), {
'id': 'binancecoinm',
'name': 'Binance COIN-M',
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/117738721-668c8d80-b205-11eb-8c49-3fad84c4a07f.jpg',
'doc': [
'https://binance-docs.github.io/apidocs/delivery/en/',
'https://binance-docs.github.io/apidocs/ | spot/en',
],
},
'options': {
'defaultType': 'delivery',
'leverageBrackets': None,
},
})
def transfer_in(self, code, amount, params={}):
# transfer from spot wallet to coinm futures wallet
return self.futuresTransfer(code, amount, 3, params)
def transfer_out(self, code, amount, params={}):
# transfer from coinm futures wallet to spot wallet
return self.futuresTransfer(code, a | mount, 4, params)
|
seerjk/reboot06 | 01/exec04.py | Python | mit | 231 | 0 | deposit = 10000
year = 0
interests = 0.0325
# in | terests = 0.10
money = deposit
total_income = 2 * deposit
while money < total_income:
year += 1
money = money*(1+inte | rests)
# print money
print "Need %d years." % year
|
MITPERG/oilsands-mop | run.py | Python | mit | 74 | 0.013514 | # | !/usr/bin/env python
from app impor | t app
app.run(debug=True, port=5000) |
zsh2401/PYCLT | pyclt/res/__init__.py | Python | gpl-3.0 | 211 | 0.015075 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
from pyclt.res.languages import *
import json
import os
def getText(language_type):
'''获取语言字符'' | '
if language_type | =="zh_cn":
return zh_cn()
|
ifduyue/sentry | src/sentry/integrations/__init__.py | Python | bsd-3-clause | 350 | 0 | from __future__ import absolute_import
from .analytics import * # NOQA
from .base import * # | NOQA
from .manager import IntegrationManager # NOQA
default_manager = IntegrationManager()
all = default_manager.all
get = d | efault_manager.get
exists = default_manager.exists
register = default_manager.register
unregister = default_manager.unregister
|
GhostshipSoftware/avaloria | src/utils/dummyrunner/memplot.py | Python | bsd-3-clause | 3,601 | 0.006109 | """
Script that saves memory and idmapper data over time.
Data will be saved to game/logs/memoryusage.log. Note that
the script will append to this file if it already exists.
Call this module directly to plot the log (requires matplotlib and numpy).
"""
import os, sys
import time
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))))
os.environ['DJANGO_SETTINGS_MODULE'] = 'game.settings'
import ev
from src.utils.idmapper import base as _idmapper
LOGFILE = "logs/memoryusage.log"
INTERVAL = 30 # log every 30 seconds
class Memplot(ev.Script):
def at_script_creation(self):
self.key = "memplot"
self.desc = "Save server memory stats to file"
self.start_delay = False
self.persistent = True
self.interva | l = INTERVAL
self.db.starttime = time.time()
def at_repeat(self):
pid = os.getpid()
rmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, | "rss")).read()) / 1000.0 # resident memory
vmem = float(os.popen('ps -p %d -o %s | tail -1' % (pid, "vsz")).read()) / 1000.0 # virtual memory
total_num, cachedict = _idmapper.cache_size()
t0 = (time.time() - self.db.starttime) / 60.0 # save in minutes
with open(LOGFILE, "a") as f:
f.write("%s, %s, %s, %s\n" % (t0, rmem, vmem, int(total_num)))
if __name__ == "__main__":
# plot output from the file
from matplotlib import pyplot as pp
import numpy
data = numpy.genfromtxt("../../../game/" + LOGFILE, delimiter=",")
secs = data[:,0]
rmem = data[:,1]
vmem = data[:,2]
nobj = data[:,3]
# calculate derivative of obj creation
#oderiv = (0.5*(nobj[2:] - nobj[:-2]) / (secs[2:] - secs[:-2])).copy()
#oderiv = (0.5*(rmem[2:] - rmem[:-2]) / (secs[2:] - secs[:-2])).copy()
fig = pp.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("1000 bots (normal players with light building)")
ax1.set_xlabel("Time (mins)")
ax1.set_ylabel("Memory usage (MB)")
ax1.plot(secs, rmem, "r", label="RMEM", lw=2)
ax1.plot(secs, vmem, "b", label="VMEM", lw=2)
ax1.legend(loc="upper left")
ax2 = ax1.twinx()
ax2.plot(secs, nobj, "g--", label="objs in cache", lw=2)
#ax2.plot(secs[:-2], oderiv/60.0, "g--", label="Objs/second", lw=2)
#ax2.plot(secs[:-2], oderiv, "g--", label="Objs/second", lw=2)
ax2.set_ylabel("Number of objects")
ax2.legend(loc="lower right")
ax2.annotate("First 500 bots\nconnecting", xy=(10, 4000))
ax2.annotate("Next 500 bots\nconnecting", xy=(350,10000))
#ax2.annotate("@reload", xy=(185,600))
# # plot mem vs cachesize
# nobj, rmem, vmem = nobj[:262].copy(), rmem[:262].copy(), vmem[:262].copy()
#
# fig = pp.figure()
# ax1 = fig.add_subplot(111)
# ax1.set_title("Memory usage per cache size")
# ax1.set_xlabel("Cache size (number of objects)")
# ax1.set_ylabel("Memory usage (MB)")
# ax1.plot(nobj, rmem, "r", label="RMEM", lw=2)
# ax1.plot(nobj, vmem, "b", label="VMEM", lw=2)
#
## # empirical estimate of memory usage: rmem = 35.0 + 0.0157 * Ncache
## # Ncache = int((rmem - 35.0) / 0.0157) (rmem in MB)
#
# rderiv_aver = 0.0157
# fig = pp.figure()
# ax1 = fig.add_subplot(111)
# ax1.set_title("Relation between memory and cache size")
# ax1.set_xlabel("Memory usage (MB)")
# ax1.set_ylabel("Idmapper Cache Size (number of objects)")
# rmem = numpy.linspace(35, 2000, 2000)
# nobjs = numpy.array([int((mem - 35.0) / 0.0157) for mem in rmem])
# ax1.plot(rmem, nobjs, "r", lw=2)
pp.show()
|
adhoc-dev/odoo-argentina | l10n_ar_account_withholding/models/account_move.py | Python | agpl-3.0 | 1,785 | 0 | from odoo import models, fields
class AccountMove(models.Model):
_inherit = "account.move"
def _get_tax_factor(self):
tax_factor = super()._get_tax_factor()
doc_letter = self.l10n_latam_document_type_id.l10n_ar_letter
# if we receive B invoices, then we take out 21 of vat
# this use of case if when company is except on vat for eg.
if tax_factor == 1.0 and doc_letter == 'B':
tax_factor = 1.0 / 1.21
return tax_factor
def get_taxes_values(self):
"""
Hacemos esto para disponer de fecha de factura y cia para calcular
impuesto con código python (por ej. para ARBA).
Aparentemente no se puede cambiar el contexto a cosas que se llaman
desde un onchange (ver https://github.com/odoo/odoo/issues/7472)
entonces usamos este artilugio
"""
invoice_date = self.invoice_date or fields.Date.context_today(self)
# hacemos try porque al llamarse desde acciones de servidor | da error
try:
self.env.context.invoice_date = invoice_date
self.env.context.invoice_company = self.compa | ny_id
except Exception:
pass
return super().get_taxes_values()
class AccountMoveLine(models.Model):
_inherit = "account.move.line"
def _compute_price(self):
# ver nota en get_taxes_values
invoice = self.move_id
invoice_date = invoice.invoice_date or fields.Date.context_today(self)
# hacemos try porque al llamarse desde acciones de servidor da error
try:
self.env.context.invoice_date = invoice_date
self.env.context.invoice_company = self.company_id
except Exception:
pass
return super()._compute_price()
|
jor-/matrix-decomposition | matrix/__init__.py | Python | agpl-3.0 | 1,947 | 0.003082 | # *** submodules *** #
from matrix import constants, decompositions, errors, approximation, nearest
# *** functions *** #
from matrix.calculate import (is_positive_semidefinite, is_positive_definite, is_invertible,
decompose, solve)
# *** constants *** #
from matrix.constants import (
DECOMPOSITION_TYPES,
LDL_DECOMPOSITION_TYPE, LDL_DECOMPOSITION_COMPRESSED_TYPE, LL_DECOMPOSITION_TYPE,
UNIVERSAL_PERMUTATION_METHODS, SPARSE_ONLY_PERMUTATION_METHODS,
NO_PERMUTATION_METHOD,
DECREASING_DIAGONAL_VALUES_PERMUTATION_METHOD, INCREASING_DIAGONAL_VALUES_PERMUTATION_METHOD,
DECREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD,
INCREASING_ABSOLUTE_DIAGONAL_VALUES_PERMUTATION_METHOD)
DECOMPOSITION_TYPES = DECOMPOSITION_TYPES
""" Supported types of decompositions. """
UNIVERSAL_PERMUTATION_METHODS = UNIVERSAL_PERMUTATION_METHODS
""" Supported permutation methods for decompose dense and sparse matrices. """
SPARSE_ONLY_PERMUTATION_METHODS = SPARSE_ONLY_PERMUTATION_METHODS
""" Supported permutation methods only | for sparse matrices. """
# *** version *** #
from ._version import get_versions
__ | version__ = get_versions()['version']
del get_versions
# *** logging *** #
import logging
logger = logging.getLogger(__name__)
del logging
# *** deprecated *** #
def __getattr__(name):
deprecated_names = ['decomposition', 'positive_definite_matrix',
'positive_semidefinite_matrix', 'APPROXIMATION_ONLY_PERMUTATION_METHODS']
if name in deprecated_names:
import warnings
warnings.warn(f'"matrix.{name}" is deprecated. Take a look at'
' "matrix.approximation.positive_semidefinite" instead.',
DeprecationWarning, stacklevel=2)
import matrix.approximate
return matrix.approximate.__getattribute__(name)
raise AttributeError(f'Module {__name__} has no attribute {name}.')
|
google/telluride_decoding | test/regression_data_test.py | Python | apache-2.0 | 3,840 | 0.001563 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for telluride_decoding.regression_data."""
import os
from absl import flags
from absl.testing import absltest
from telluride_decoding import brain_data
from telluride_decoding import regression_data
import tensorflow.compat.v2 as tf
# Note these tests do NOT test the data download cdoe. These are hard to test,
# only run occasionally, and are obvious when they don't work in real use.
class TellurideDataTest(absltest.TestCase):
def setUp(self):
super(TellurideDataTest, self).setUp()
self._test_data_dir = os.path.join(
flags.FLAGS.test_srcdir, '__main__',
'test_data/')
def test_data_ingestion(self):
cache_dir = os.path.join(self._test_data_dir, 'telluride4')
tmp_dir = self.create_tempdir().full_path
tf_dir = os.path.join(tmp_dir, 'telluride4_tf')
# Create the data object and make sure we have the downloaded archive file.
rd = regression_data.RegressionDataTelluride4()
self.assertTrue(rd.is_data_local(cache_dir))
# Now ingest the data, making sure it's not present at start, then present.
self.assertFalse(rd.is_data_ingested(tmp_dir))
rd.ingest_data(cache_dir, tf_dir, 128)
self.assertTrue(rd.is_data_ingested(tf_dir))
# Check the data files.
test_file = os.path.join(tf_dir, 'trial_01.tfrecords')
features = br | ain_data.discover_feature_shapes(test_file)
print('Telluride features:', features)
self.assertIn('eeg', features)
self.assertEqual(features['eeg'].shape, [63])
self.assertIn('intensity', features)
self.assertEqual(features['intensity'].shape, [1])
self.assertEqual(brain_data.count_tfrecords(test_file), (8297, False))
class JensMemoryDataTest(absltest.TestCase):
def setUp(self):
super(JensMemoryDataTest, self).setUp()
self._test_data_di | r = os.path.join(
flags.FLAGS.test_srcdir, '__main__',
'test_data/')
def test_data_ingestion(self):
cache_dir = os.path.join(self._test_data_dir, 'jens_memory')
tmp_dir = self.create_tempdir().full_path
tf_dir = os.path.join(tmp_dir, 'jens_memory')
num_subjects = 1 # Only 1 of 22 subjects loaded for test.
num_trials = 5 # That one subject has been shortened to 5/40 trials.
# Create the data object and make sure we have the downloaded archive file.
rd = regression_data.RegressionDataJensMemory()
self.assertTrue(rd.is_data_local(cache_dir, num_subjects))
# Now ingest the data, making sure it's not present at start, then present.
self.assertFalse(rd.is_data_ingested(tmp_dir, num_subjects))
rd.ingest_data(cache_dir, tf_dir, 128)
self.assertTrue(rd.is_data_ingested(tf_dir, num_subjects, num_trials))
# Check the data files.
test_file = os.path.join(tf_dir, 'subject_01', 'trial_01.tfrecords')
features = brain_data.discover_feature_shapes(test_file)
self.assertIn('eeg', features)
self.assertEqual(features['eeg'].shape, [69])
self.assertIn('intensity', features)
self.assertEqual(features['intensity'].shape, [1])
self.assertEqual(brain_data.count_tfrecords(test_file), (7442, False))
if __name__ == '__main__':
tf.compat.v1.enable_v2_behavior()
absltest.main()
|
PeteTheAutomator/ACServerManager | session/migrations/0013_auto_20160904_1252.py | Python | mit | 858 | 0.001166 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('session', '0012_auto_20160904_1130'),
]
oper | ations = [
migrations.AlterField(
model_name='serversetting',
name='minorating_grade',
field=models.CharField(default=b'ABCN', help | _text=b"Minorating Grade required to join this server's sessions (driver proficiency - see http://www.minorating.com/Grades for details)", max_length=8, choices=[(b'A', b'A - exemplary'), (b'AB', b'AB - clean racer (or better)'), (b'ABC', b'ABC - rookie (or better)'), (b'ABCN', b'ABCN - rookie or new/unlisted racers (or better)'), (b'ABCDN', b'ABCDN - dirty racers welcome'), (b'ABCDNW', b'ABCNW - anybody (including wreckers)')]),
),
]
|
ish/wsgiapptools | wsgiapptools/flash.py | Python | bsd-3-clause | 3,252 | 0.002153 | """
"Flash" messaging support.
A "flash" message is a message displayed on a web page that is removed next
request.
"""
__all__ = ['add_message', 'get_messages', 'get_flash',
'flash_middleware_factory']
import itertools
import webob
from wsgiapptools import cookies
ENVIRON_KEY = 'wsgiapptools.flash'
COOKIE_NAME = 'flash'
def add_message(environ, message, type=None):
"""
Add the flash message to the Flash manager in the WSGI environ."
"""
return get_flash(environ).add_message(message, type)
def get_messages(environ):
"""
Get the flasg messages from the Flash manager in the WSGI environ.
"""
return get_flash(environ).get_messages()
def get_flash(environ):
"""
Get the flash manager from the environ.
"""
return environ[ENVIRON_KEY]
class Flash(object):
"""
Flash message manager, associated with a WSGI environ.
"""
def __init__(self, environ):
| self.request = webob.Request(environ)
self.flashes = []
def add_message(self, message, type=None):
"""
Add a new flas | h message.
Note: this can be called multiple times to set multiple messages. The
messages can be retrieved, using get_messages below, and will be returned
in the order they were added.
"""
if type is None:
type = ''
self.flashes.append('%s:%s'% (type, message))
def get_messages(self):
"""
Retrieve flash messages found in the request's cookies, returning them as a
list of (type, message) tuples and deleting the cookies.
"""
messages = []
cookies_mgr = cookies.get_cookies(self.request.environ)
for i in itertools.count():
cookie_name = '%s.%d'% (COOKIE_NAME, i)
# Try to find the next message. Leave the loop if it does not exist.
message = self.request.cookies.get(cookie_name)
if not message:
break
# Remove the cookie, presumably it will be displayed shortly.
cookies_mgr.delete_cookie(cookie_name)
# Parse and yield the message.
try:
type, message = message.split(':', 1)
except ValueError:
# Skip an unparseable cookie value.
pass
else:
messages.append((type or None, message))
return messages
def flash_middleware_factory(app):
"""
Create a flash middleware WSGI application around the given WSGI
application.
"""
def middleware(environ, start_response):
def _start_response(status, response_headers, exc_info=None):
# Iterate the new flash messages in the WSGI, setting a 'flash'
# cookie for each one.
flash = environ[ENVIRON_KEY]
cookies_mgr = cookies.get_cookies(environ)
for i, flash in enumerate(flash.flashes):
cookies_mgr.set_cookie(('%s.%d'% (COOKIE_NAME, i), flash))
# Call wrapped app's start_response.
return start_response(status, response_headers, exc_info)
environ[ENVIRON_KEY] = Flash(environ)
return app(environ, _start_response)
return middleware
|
hjanime/bcbio-nextgen | bcbio/rnaseq/variation.py | Python | mit | 3,793 | 0.001318 | import os
from bcbio.utils import file_exists
import bcbio.pipeline.datadict as dd
from bcbio.ngsalign.postalign import dedup_bam
from bcbio.distributed.transaction import file_transaction
from bcbio import broad, bam
def rnaseq_gatk_variant_calling(data):
data = dd.set_deduped_bam(data, dedup_bam(dd.get_work_bam(data), data))
data = gatk_splitreads(data)
data = gatk_rnaseq_calling(data)
return data
def gatk_splitreads(data):
"""
use GATK to split reads with Ns in the CIGAR string, hard clipping regions
that end up in introns
"""
broad_runner = broad.runner_from_config(dd.get_config(data))
ref_file = dd.get_ref_file(data)
deduped_bam = dd.get_deduped_bam(data)
base, ext = os.path.splitext(deduped_bam)
split_bam = base + ".splitN" + ext
if dd.get_quality_format(data) == "illumina":
quality_flag = ["--fix_misencoded_quality_scores", "-fixMisencodedQuals"]
else:
quality_flag = []
if file_exists(split_bam):
data = dd.set_split_bam(data, split_bam)
return data
with file_transaction(split_bam) as tx_split_bam:
params = ["-T", "SplitNCigarReads",
| "-R", ref_file,
"-I", deduped_bam,
"-o", tx_split_bam,
"-rf", "ReassignOneMappingQuality",
"-RMQF", "255",
"-RMQT", "60",
"-rf", "UnmappedRead",
"-U", "ALLOW_N_CIGAR_READS"] + quality_flag
broad_runner.run_gatk(params)
bam.index(split_bam, dd.get_confi | g(data))
data = dd.set_split_bam(data, split_bam)
return data
def gatk_rnaseq_calling(data):
"""
use GATK to perform variant calling on RNA-seq data
"""
broad_runner = broad.runner_from_config(dd.get_config(data))
ref_file = dd.get_ref_file(data)
split_bam = dd.get_split_bam(data)
out_file = os.path.splitext(split_bam)[0] + ".gvcf"
num_cores = dd.get_num_cores(data)
if file_exists(out_file):
data = dd.set_vrn_file(data, out_file)
return data
with file_transaction(out_file) as tx_out_file:
params = ["-T", "HaplotypeCaller",
"-R", ref_file,
"-I", split_bam,
"-o", tx_out_file,
"-nct", str(num_cores),
"--emitRefConfidence", "GVCF",
"--variant_index_type", "LINEAR",
"--variant_index_parameter", "128000",
"-dontUseSoftClippedBases",
"-stand_call_conf", "20.0",
"-stand_emit_conf", "20.0"]
broad_runner.run_gatk(params)
data = dd.set_vrn_file(data, out_file)
return data
def gatk_joint_calling(data, vrn_files, ref_file, out_file=None):
if out_file is None:
out_file = os.path.join("variation", "combined.vcf")
if not file_exists(out_file):
out_file = _run_genotype_gvcfs(data, vrn_files, ref_file, out_file)
return out_file
def _run_genotype_gvcfs(data, vrn_files, ref_file, out_file):
if not file_exists(out_file):
broad_runner = broad.runner_from_config(data["config"])
with file_transaction(data, out_file) as tx_out_file:
params = ["-T", "GenotypeGVCFs",
"-R", ref_file, "-o", tx_out_file]
for vrn_file in vrn_files:
params += ["--variant", vrn_file]
broad_runner.new_resources("gatk-haplotype")
cores = dd.get_cores(data)
if cores > 1:
params += ["-nt", str(cores)]
memscale = {"magnitude": 0.9 * cores, "direction": "increase"}
else:
memscale = None
broad_runner.run_gatk(params, memscale=memscale)
return out_file
|
felixcarmona/coveragit | run.py | Python | mit | 128 | 0 | #!/usr/bin/ | env python
from coveragit.application.console import Application
if __name__ == "__main__":
Application( | ).run()
|
USGSDenverPychron/pychron | pychron/extraction_line/extraction_line_manager.py | Python | apache-2.0 | 34,407 | 0.000872 | # ===============================================================================
# Copyright 2011 Jake Ross
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
import logging
import time
from socket import gethostbyname, gethostname
from threading import Thread
# =============enthought library imports=======================
from apptools.preferences.preference_binding import bind_preference
from pyface.timer.do_later import do_after
from traits.api import (
Instance,
List,
Any,
| Bool,
on_trait_change,
Str,
Int,
Dict,
File,
Float,
Enum,
)
from pychron.canvas.canvas_editor import CanvasEditor
from pychron.core.file_listener import FileListener
from pychron.core.ui.gui import invoke_in_main_thread
from pychron.core.wait.wait_group import WaitGroup
from pychron.envisage.consoleable import Consoleable
from pychron.extraction_line import LOG_LEVEL_NAMES, LOG_LEVELS
from pychron.extraction_line.explanation.extraction_line_explanation import (
Ex | tractionLineExplanation,
)
from pychron.extraction_line.extraction_line_canvas import ExtractionLineCanvas
from pychron.extraction_line.graph.extraction_line_graph import ExtractionLineGraph
from pychron.extraction_line.sample_changer import SampleChanger
from pychron.globals import globalv
from pychron.hardware.core.i_core_device import ICoreDevice
from pychron.managers.manager import Manager
from pychron.monitors.system_monitor import SystemMonitor
from pychron.pychron_constants import NULL_STR
MANAGERS = {
"manometer_manager": (
"pychron.extraction_line.manometer_manager",
"ManometerManager",
),
"cryo_manager": ("pychron.extraction_line.cryo_manager", "CryoManager"),
"gauge_manager": ("pychron.extraction_line.gauge_manager", "GaugeManager"),
"heater_manager": ("pychron.extraction_line.heater_manager", "HeaterManager"),
"pump_manager": ("pychron.extraction_line.pump_manager", "PumpManager"),
}
class ExtractionLineManager(Manager, Consoleable):
"""
Manager for interacting with the extraction line
contains reference to valve manager, gauge manager and laser manager
"""
canvas = Instance(ExtractionLineCanvas)
canvases = List
plugin_canvases = List
explanation = Instance(ExtractionLineExplanation, ())
monitor = Instance(SystemMonitor)
switch_manager = Any
gauge_manager = Any
cryo_manager = Any
multiplexer_manager = Any
manometer_manager = Any
pump_manager = Any
heater_manager = Any
network = Instance(ExtractionLineGraph)
readback_items = List
runscript = None
learner = None
mode = "normal"
valve_state_frequency = Int
valve_lock_frequency = Int
check_master_owner = Bool
use_network = Bool
display_volume = Bool
volume_key = Str
sample_changer = Instance(SampleChanger)
link_valve_actuation_dict = Dict
canvas_path = File
canvas_config_path = File
use_hardware_update = Bool
hardware_update_period = Float
file_listener = None
wait_group = Instance(WaitGroup, ())
console_bgcolor = "black"
_active = False
_update_status_flag = None
_monitoring_valve_status = False
canvas_editor = Instance(CanvasEditor, ())
logging_level = Enum(LOG_LEVEL_NAMES)
def set_extract_state(self, *args, **kw):
pass
def activate(self):
self._active = True
self._load_additional_canvases()
self._activate_hook()
self.reload_canvas()
devs = self.application.get_services(ICoreDevice)
self.devices = devs
def deactivate(self):
for t in ("gauge", "heater", "pump"):
self.info("start {} scans".format(t))
man = getattr(self, "{}_manager".format(t))
man.stop_scans()
if self.monitor:
self.monitor.stop()
self._active = False
self._deactivate_hook()
def bind_preferences(self):
prefid = "pychron.extraction_line"
attrs = (
"canvas_path",
"canvas_config_path",
"use_hardware_update",
"hardware_update_period",
"check_master_owner",
"use_network",
"logging_level",
)
for attr in attrs:
try:
bind_preference(self, attr, "{}.{}".format(prefid, attr))
except BaseException as e:
print("fffffffff", attr, e)
bind_preference(
self.network, "inherit_state", "{}.inherit_state".format(prefid)
)
self.console_bind_preferences("{}.console".format(prefid))
for t in ("gauge", "heater", "pump"):
man = getattr(self, "{}_manager".format(t))
if man:
bind_preference(
man,
"period",
"{}.{}_update_period".format(prefid, t),
)
bind_preference(
man,
"update_enabled",
"{}.{}_update_enabled".format(prefid, t),
)
if self.canvas:
bind_preference(
self.canvas.canvas2D,
"display_volume",
"{}.display_volume".format(prefid),
)
bind_preference(
self.canvas.canvas2D, "volume_key", "{}.volume_key".format(prefid)
)
def link_valve_actuation(self, name, func, remove=False):
if remove:
try:
del self.link_valve_actuation_dict[name]
except KeyError:
self.debug(
'could not remove "{}". not in dict {}'.format(
name, ",".join(list(self.link_valve_actuation_dict.keys()))
)
)
else:
self.debug(
'adding name="{}", func="{}" to link_valve_actuation_dict'.format(
name, func.__name__
)
)
self.link_valve_actuation_dict[name] = func
def enable_auto_reload(self):
self.file_listener = fm = FileListener(
path=self.canvas_path, callback=self.reload_canvas
)
def disable_auto_reload(self):
if self.file_listener:
self.file_listener.stop()
def do_sample_loading(self):
"""
1. isolate chamber
2.
:return:
"""
sc = self._sample_changer_factory()
if sc:
if self.confirmation_dialog("Ready to Isolate Chamber"):
self._handle_console_message(("===== Isolate Chamber =====", "maroon"))
if not sc.isolate_chamber():
return
else:
return
if self.confirmation_dialog("Ready to Evacuate Chamber"):
self._handle_console_message(("===== Evacuate Chamber =====", "maroon"))
err = sc.check_evacuation()
if err:
name = sc.chamber
msg = "Are you sure you want to evacuate the {} chamber. {}".format(
name, err
)
if not self.confirmation_dialog(msg):
return
if not sc.evacuate_chamber():
return
else:
return
if self.confirmation_dialog("Ready to Finish Sample Change"):
self._handle_console_message(
("===== Finish Sample Change =====", "maroon")
)
|
iancze/Starfish | Starfish/spectrum.py | Python | bsd-3-clause | 9,193 | 0.000979 | import h5py
import numpy as np
from dataclasses import dataclass
from nptyping import NDArray
from typing import Optional
@dataclass
class Order:
"""
A data class to hold astronomical spectra orders.
Parameters
----------
_wave : numpy.ndarray
The full wavelength array
_flux : numpy.ndarray
The full flux array
_sigma : numpy.ndarray, optional
The full sigma array. If None, will default to all 0s. Default is None
mask : numpy.ndarray, optional
The full mask. If None, will default to all Trues. Default is None
Attributes
----------
name : str
"""
_wave: NDArray[float]
_flux: NDArray[float]
_sigma: Optional[NDArray[float]] = None
mask: Optional[NDArray[bool]] = None
def __post_init__(self):
if self._sigma is None:
self._sigma = np.zeros_like(self._flux)
if self.mask is None:
self.mask = np.ones_like(self._wave, dtype=bool)
@property
def wave(self):
"""
numpy.ndarray : The masked wavelength array
"""
return self._wave[self.mask]
@property
def flux(self):
"""
numpy.ndarray : The masked flux array
"""
return self._flux[self.mask]
@property
def sigma(self):
"""
numpy.ndarray : The masked flux uncertainty array
"""
return self._sigma[self.mask]
def __len__(self):
return len(self._wave)
class Spectrum:
"""
Object to store astronomical spectra.
Parameters
----------
waves : 1D or 2D array-like
wavelength in Angtsrom
fluxes : 1D or 2D array-like
flux (in f_lam)
sigmas : 1D or 2D array-like, optional
Poisson noise (in f_lam). If not specified, will be zeros. Default is None
masks : 1D or 2D array-like, optional
Mask to blot out bad pixels or emission regions. Must be castable to boolean. If None, will create a mask of all True. Default is None
name : str, optional
The name of this spectrum. Default is "Spectrum"
Note
----
If the waves, fluxes, and sigmas are provided as 1D arrays (say for a single order), they will be converted to 2D arrays with length 1 in the 0-axis.
Warning
-------
For now, the Spectrum waves, fluxes, sigmas, and masks must be a rectangular grid. No ragged Echelle orders allowed.
Attributes
----------
name : str
The name of the spectrum
"""
def __init__(self, waves, fluxes, sigmas=None, masks=None, name="Spectrum"):
waves = np.atleast_2d(waves)
fluxes = np.atleast_2d(fluxes)
if sigmas is not None:
sigmas = np.atleast_2d(sigmas)
else:
sigmas = np.ones_like(fluxes)
if masks is not None:
masks = np.atleast_2d(masks).astype(bool)
else:
masks = np.ones_like(waves, dtype=bool)
assert fluxes.shape == waves.shape, "flux array incompatible shape."
assert sigmas.shape == waves.shape, "sigma array incompatible shape."
assert masks.shape == waves.shape, "mask array incompatible shape."
self.orders = []
for i in range(len(waves)):
self.orders.append(Order(waves[i], fluxes[i], sigmas[i], masks[i]))
self.name = name
def __getitem__(self, index: int):
return self.orders[index]
def __setitem__(self, index: int, order: Order):
if len(order) != len(self.orders[0]):
raise ValueError("Invalid order length; no ragged spectra allowed")
self.orders[index] = order
def __len__(self):
return len(self.orders)
def __iter__(self):
self._n = 0
return self
def __next__(self):
if self._n < len(self.orders):
n, self._n = self._n, self._n + 1
return self.orders[n]
else:
raise StopIteration
# Masked properties
@property
def waves(self) -> np.ndarray:
"""
numpy.ndarray : The 2 dimensional masked wavelength arrays
"""
waves = [o.wave for o in self.orders]
return np.asarray(waves)
@property
def fluxes(self) -> np.ndarray:
"""
numpy.ndarray : The 2 dimensional masked flux arrays
"""
fluxes = [o.flux for o in self.orders]
return np.asarray(fluxes)
@property
def sigmas(self) -> np.ndarray:
"""
numpy.ndarray : The 2 dimensional masked flux uncertainty arrays
"""
sigmas = | [o.sigma for o in self.orders]
return np.asarray(sigmas)
# Unmasked properties
@property
def _waves(self) -> np.ndarray:
_waves = [o._wave for o in self.orders]
return np.asarray(_waves)
@property
def _fluxes(self) -> np.ndarray:
_fluxes = [o._flux for o in self.orders]
return np.asarray(_fluxes)
@property
def _sigmas(self) -> np.ndarray:
_sigmas = [o._sigma for o | in self.orders]
return np.asarray(_sigmas)
@property
def masks(self) -> np.ndarray:
"""
np.ndarray: The full 2-dimensional boolean masks
"""
waves = [o.wave for o in self.orders]
return np.asarray(waves)
@property
def shape(self):
"""
numpy.ndarray: The shape of the spectrum, *(norders, npixels)*
:setter: Tries to reshape the data into a new arrangement of orders and pixels following numpy reshaping rules.
"""
return (len(self), len(self.orders[0]))
@shape.setter
def shape(self, shape):
new = self.reshape(shape)
self.__dict__.update(new.__dict__)
def reshape(self, shape):
"""
Reshape the spectrum to the new shape. Obeys the same rules that numpy reshaping does. Note this is not done in-place.
Parameters
----------
shape : tuple
The new shape of the spectrum. Must abide by numpy reshaping rules.
Returns
-------
Spectrum
The reshaped spectrum
"""
waves = self._waves.reshape(shape)
fluxes = self._fluxes.reshape(shape)
sigmas = self._sigmas.reshape(shape)
masks = self.masks.reshape(shape)
return self.__class__(waves, fluxes, sigmas, masks, name=self.name)
@classmethod
def load(cls, filename):
"""
Load a spectrum from an hdf5 file
Parameters
----------
filename : str or path-like
The path to the HDF5 file.
See Also
--------
:meth:`save`
"""
with h5py.File(filename, "r") as base:
if "name" in base.attrs:
name = base.attrs["name"]
else:
name = None
waves = base["waves"][:]
fluxes = base["fluxes"][:]
sigmas = base["sigmas"][:]
masks = base["masks"][:]
return cls(waves, fluxes, sigmas, masks, name=name)
def save(self, filename):
"""
Takes the current DataSpectrum and writes it to an HDF5 file.
Parameters
----------
filename: str or path-like
The filename to write to. Will not create any missing directories.
See Also
--------
:meth:`load`
"""
with h5py.File(filename, "w") as base:
base.create_dataset("waves", data=self.waves, compression=9)
base.create_dataset("fluxes", data=self.fluxes, compression=9)
base.create_dataset("sigmas", data=self.sigmas, compression=9)
base.create_dataset("masks", data=self.masks, compression=9)
if self.name is not None:
base.attrs["name"] = self.name
def plot(self, ax=None, **kwargs):
"""
Plot all the orders of the spectrum
Parameters
----------
ax : matplotlib.Axes, optional
If provided, will plot on this axis. Otherwise, will create a new axis, by
default None
Returns
-------
matplotlib.Axes
The axis that was plotted on
"""
|
iulian787/spack | var/spack/repos/builtin/packages/bash-completion/package.py | Python | lgpl-2.1 | 1,869 | 0.00214 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class BashCompletion(AutotoolsPackage):
"""Programmable completion functions | for ba | sh."""
homepage = "https://github.com/scop/bash-completion"
url = "https://github.com/scop/bash-completion/archive/2.3.tar.gz"
git = "https://github.com/scop/bash-completion.git"
version('develop', branch='master')
version('2.7', sha256='dba2b88c363178622b61258f35d82df64dc8d279359f599e3b93eac0375a416c')
version('2.3', sha256='d92fcef5f6e3bbc68a84f0a7b063a1cd07b4000cc6e275cd1ff83863ab3b322a')
# Build dependencies
depends_on('automake', type='build')
depends_on('autoconf', type='build')
depends_on('libtool', type='build')
# Other dependencies
depends_on('bash@4.1:', type='run')
@run_before('install')
def create_install_directory(self):
mkdirp(join_path(self.prefix.share, 'bash-completion', 'completions'))
@run_after('install')
def show_message_to_user(self):
prefix = self.prefix
# Guidelines for individual user as provided by the author at
# https://github.com/scop/bash-completion
print('=====================================================')
print('Bash completion has been installed. To use it, please')
print('include the following lines in your ~/.bash_profile :')
print('')
print('# Use bash-completion, if available')
print('[[ $PS1 && -f %s/share/bash-completion/bash_completion ]] && \ ' % prefix) # NOQA: ignore=E501
print(' . %s/share/bash-completion/bash_completion' % prefix)
print('')
print('=====================================================')
|
lavish205/olympia | src/olympia/compat/views.py | Python | bsd-3-clause | 4,194 | 0.000238 | import json
import re
from django import http
from django.db.models import Count
from django.db.transaction import non_atomic_requests
from django.shortcuts import redirect
from django.views.decorators.csrf import csrf_exempt
from olympia import amo
from olympia.addons.decorators import owner_or_unlisted_reviewer
from olympia.addons.models import Addon
from olympia.amo.decorators import post_required
from olympia.amo.utils import paginate, render
from olympia.search.utils import floor_version
from olympia.versions.compare import version_dict as vdict
from .forms import AppVerForm
from .models import CompatReport
@csrf_exempt
@post_required
@non_atomic_requests
def incoming(request):
# Turn camelCase into snake_case.
def snake_case(s):
return re.sub('[A-Z]+', '_\g<0>', s).lower()
try:
data = [(snake_case(k), v)
for k, v in json.loads(request.body).items()]
except Exception:
return http.HttpResponseBadRequest()
# Build up a new report.
report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))
| fields = [field.name for field in CompatReport._meta.get_fields()]
for key, value in data:
if key in fields:
setattr(report, key, value)
else:
return http.HttpResponseBadRequest()
r | eport.save()
return http.HttpResponse(status=204)
@non_atomic_requests
def reporter(request):
query = request.GET.get('guid')
if query:
qs = None
if query.isdigit():
qs = Addon.objects.filter(id=query)
if not qs:
qs = Addon.objects.filter(slug=query)
if not qs:
qs = Addon.objects.filter(guid=query)
if not qs and len(query) > 4:
qs = CompatReport.objects.filter(guid__startswith=query)
if qs:
guid = qs[0].guid
addon = Addon.objects.get(guid=guid)
if (addon.has_listed_versions() or
owner_or_unlisted_reviewer(request, addon)):
return redirect('compat.reporter_detail', guid)
addons = (Addon.objects.filter(authors=request.user)
if request.user.is_authenticated() else [])
return render(request, 'compat/reporter.html',
dict(query=query, addons=addons))
@non_atomic_requests
def reporter_detail(request, guid):
try:
addon = Addon.objects.get(guid=guid)
except Addon.DoesNotExist:
addon = None
name = addon.name if addon else guid
qs = CompatReport.objects.filter(guid=guid)
show_listed_only = addon and not owner_or_unlisted_reviewer(request, addon)
if (addon and not addon.has_listed_versions() and show_listed_only):
# Not authorized? Let's pretend this addon simply doesn't exist.
name = guid
qs = CompatReport.objects.none()
elif show_listed_only:
unlisted_versions = addon.versions.filter(
channel=amo.RELEASE_CHANNEL_UNLISTED).values_list(
'version', flat=True)
qs = qs.exclude(version__in=unlisted_versions)
form = AppVerForm(request.GET)
if request.GET and form.is_valid() and form.cleaned_data['appver']:
# Apply filters only if we have a good app/version combination.
version = form.cleaned_data['appver']
ver = vdict(floor_version(version))['major'] # 3.6 => 3
# Ideally we'd have a `version_int` column to do strict version
# comparing, but that's overkill for basic version filtering here.
qs = qs.filter(app_guid=amo.FIREFOX.guid,
app_version__startswith=str(ver) + '.')
works_ = dict(qs.values_list('works_properly').annotate(Count('id')))
works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}
works_properly = request.GET.get('works_properly')
if works_properly:
qs = qs.filter(works_properly=works_properly)
reports = paginate(request, qs.order_by('-created'), 100)
return render(request, 'compat/reporter_detail.html',
dict(reports=reports, works=works,
works_properly=works_properly,
name=name, guid=guid, form=form))
|
grahamking/lintswitch | setup.py | Python | gpl-3.0 | 1,119 | 0 | """To install: sudo python setup.py install
"""
import os
from setuptools import setup, find_packages
def read(fname):
"""Utility function to read the README file."""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
VERSION | = __import__('lintswitch').__version__
setup(
name='lintswitch',
version=VERSION,
author='Graham King',
author_email='graham@gkgk.org',
description='Lint your Python in real-time',
long_description=read('README.md'),
packages=find_packages(),
package_data={'lintswitch': ['index.html']},
entry_points={
'console_scripts': ['lintswitch=lintswitch.main:main']
},
url='https://github.com/gra | hamking/lintswitch',
install_requires=['setuptools'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Quality Assurance'
]
)
|
felixonmars/app | lib/vendor/php-rql/docs/scripts/validate_examples.py | Python | gpl-2.0 | 5,917 | 0.003042 | import os
import sys
import yaml
import re
from subprocess import Popen, PIPE, call
from random import randrange
from threading import Thread
import SocketServer
import struct
import pdb
sys.path.insert(0, '/usr/local/lib/python2.6/dist-packages/rethinkdb')
import ql2_pb2 as p
# tree of YAML documents defining documentation
src_dir = sys.argv[1]
commands = []
# Walk the src files to compile all sections and commands
for root, dirs, file_names in os.walk(src_dir):
for file_name in file_names:
docs = yaml.load(file(os.path.join(root, file_name)))
if 'commands' in docs:
commands.extend(docs['commands'])
def validate_for(lang, port):
test_file_name = 'build/test.%s' % lang
with open(test_file_name, 'w') as out:
if lang == 'py':
out.write("""
from sys import path
path.insert(0, '../../../drivers/python')
import rethinkdb as r
conn = r.connect(port=%d)
print 'Running python validation.'
""" % port)
elif lang == 'js':
out.write("""
var r = require("../../../../drivers/javascript/build/rethinkdb");
var callback = (function() { });
var cur = {next:(function(){}), hasNext:(function(){}), each:(function(){}), toArray:(function(){})};
r.connect({port:%d}, function(err, conn) {
console.log("Running Javascript validation.");
""" % port)
elif lang == 'rb':
out.write("""
$LOAD_PATH.unshift('../../../drivers/ruby/lib')
require 'rethinkdb.rb'
include RethinkDB::Shortcuts
conn = r.connect('localhost', %d)
puts 'Running Ruby validation.'
""" % port)
elif lang == 'ph':
out.write("""
<?php
error_reporting(-1);
set_include_path("../src");
require_once("rdb/rdb.php");
$conn = r\\connect('localhost', %d);
echo 'Running PHP validation.\n';
""" % port)
for command in commands:
section_name = command['section']
command_name = command['tag']
for i,example in enumerate(command['examples']):
test_tag = section_name+"-"+command_name+"-"+str(i)
test_case = example['code']
if isinstance(test_case, dict):
if lang in test_case:
test_case = | test_case[lang]
else:
test_case = None
if 'validate' in example and not example['validate']:
test_case = None
skip_validation = True
else:
skip | _validation = False
# Check for an override of this test case
if lang in command:
if isinstance(command[lang], bool) and not command[lang]:
test_case = None
elif isinstance(command[lang], dict):
override = command[lang]
if 'examples' in override:
if i in override['examples']:
example_override = override['examples'][i]
if len(example_override) == 0:
test_case = None
elif 'code' in example_override:
test_case = example_override['code']
if 'validate' in example_override:
if not example_override['validate']:
test_case = None
elif skip_validation:
test_case = None
comment = '#'
if lang == 'js':
comment = '//'
if lang == 'ph':
comment = '//'
if test_case != None:
test_case = re.sub("\n", " %s %s\n" % (comment, test_tag), test_case)
out.write("%s %s %s\n" % (test_case, comment, test_tag))
if lang == 'js':
out.write("console.log('Javascript validation complete.');\n");
out.write("conn.close()})")
if lang == 'py':
out.write("print 'Python validation complete.'");
if lang == 'rb':
out.write("puts 'Ruby validation complete.'");
if lang == 'ph':
out.write("echo 'PHP validation complete.\n';\n");
out.write("?>");
if lang == 'py':
interpreter = 'python'
elif lang == 'js':
interpreter = 'node'
elif lang == 'rb':
interpreter = 'ruby'
elif lang == 'ph':
interpreter = 'php5'
ret = call([interpreter, test_file_name])
if ret is not 0:
sys.exit(1)
class BlackHoleRDBHandler(SocketServer.BaseRequestHandler):
def handle(self):
magic = self.request.recv(4)
while (True):
header = self.request.recv(4)
if len(header) == 0:
break;
(length,) = struct.unpack("<L", header)
data = self.request.recv(length)
query = p.Query()
query.ParseFromString(data)
response = p.Response()
response.token = query.token
response.type = p.Response.SUCCESS_ATOM
datum = response.response.add()
datum.type = p.Datum.R_NULL
response_protobuf = response.SerializeToString()
response_header = struct.pack("<L", len(response_protobuf))
self.request.sendall(response_header + response_protobuf)
def validate():
# Setup void server
port = randrange(1025, 65535)
server = SocketServer.TCPServer(('localhost', port), BlackHoleRDBHandler)
t = Thread(target=server.serve_forever)
t.start()
try:
#validate_for('py', port)
#validate_for('js', port)
#validate_for('rb', port)
validate_for('ph', port)
finally:
server.shutdown()
validate()
|
alexlo03/ansible | lib/ansible/modules/network/avi/avi_trafficcloneprofile.py | Python | gpl-3.0 | 4,161 | 0.000961 | #!/usr/bin/python
#
# @author: Gaurav Rastogi (grastogi@avinetworks.com)
# Eric Anderson (eanderson@avinetworks.com)
# module_check: supported
#
# Copyright: (c) 2017 Gaurav Rastogi, <grastogi@avinetworks.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_trafficcloneprofile
author: Gaurav Rastogi (grastogi@avinetworks.com)
short_description: Module for setup of TrafficCloneProfile Avi RESTful Object
description:
- This module is used to configure TrafficCloneProfile object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default | method for object update is HTTP | PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
clone_servers:
description:
- Field introduced in 17.1.1.
cloud_ref:
description:
- It is a reference to an object of type cloud.
- Field introduced in 17.1.1.
name:
description:
- Name for the traffic clone profile.
- Field introduced in 17.1.1.
required: true
preserve_client_ip:
description:
- Specifies if client ip needs to be preserved to clone destination.
- Field introduced in 17.1.1.
- Default value when not specified in API or module is interpreted by Avi Controller as False.
type: bool
tenant_ref:
description:
- It is a reference to an object of type tenant.
- Field introduced in 17.1.1.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Uuid of the traffic clone profile.
- Field introduced in 17.1.1.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Example to create TrafficCloneProfile object
avi_trafficcloneprofile:
controller: 10.10.25.42
username: admin
password: something
state: present
name: sample_trafficcloneprofile
"""
RETURN = '''
obj:
description: TrafficCloneProfile (api/trafficcloneprofile) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
clone_servers=dict(type='list',),
cloud_ref=dict(type='str',),
name=dict(type='str', required=True),
preserve_client_ip=dict(type='bool',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'trafficcloneprofile',
set([]))
if __name__ == '__main__':
main()
|
yasserglez/programming-problems | cracking_the_coding_interview/chapter_04/first_common_ancestor.py | Python | mit | 1,764 | 0 | # Interview Question 4.7
class Node(object):
def __init__(self, value):
self.value = value
self.parent = Node
self._left = self._right = | None
@property
def left(self):
return self._left
@left.setter
def left(self, node):
self._left = node
self._left.parent = self
@property
def right(self):
return self._right
@right.setter
def | right(self, node):
self._right = node
self._right.parent = self
def is_ancestor(self, other):
if self is other:
return True
else:
is_ancestor = False
if self.left is not None:
is_ancestor = self.left.is_ancestor(other)
if not is_ancestor and self.right is not None:
is_ancestor = self.right.is_ancestor(other)
return is_ancestor
def first_common_ancestor(node1, node2):
while node1 is not node2:
if node1.is_ancestor(node2):
return node1
elif node2.is_ancestor(node1):
return node2
else:
assert node1.parent is not None
assert node2.parent is not None
node1 = node1.parent
node2 = node2.parent
return node1
if __name__ == '__main__':
n1 = root = Node(1)
n3 = root.left = Node(3)
n2 = root.left.right = Node(2)
n4 = root.right = Node(4)
n6 = root.right.left = Node(6)
n5 = root.right.right = Node(5)
n7 = root.right.right.left = Node(7)
print(first_common_ancestor(n1, n1).value)
print(first_common_ancestor(n3, n2).value)
print(first_common_ancestor(n6, n4).value)
print(first_common_ancestor(n2, n6).value)
print(first_common_ancestor(n6, n7).value)
|
stvstnfrd/edx-platform | common/lib/xmodule/xmodule/assetstore/tests/test_asset_xml.py | Python | agpl-3.0 | 3,718 | 0.001345 | """
Test for asset XML generation / parsing.
"""
import unittest
import pytest
from contracts import ContractNotRespected
from lxml import etree
from opaque_keys.edx.locator import CourseLocator
from path import Path as path
from six.moves import zip
from xmodule.assetstore import AssetMetadata
from xmodule.modulestore.tests.test_assetstore import AssetStoreTestData
class TestAssetXml(unittest.TestCase):
"""
Tests for storing/querying course asset metadata.
"""
def setUp(self):
super(TestAssetXml, self).setUp() # lint-amnesty, pylint: disable=super-with-arguments
xsd_filename = "assets.xsd"
self.course_id = CourseLocator('org1', 'course1', 'run1')
self.course_assets = []
for asset in AssetStoreTestData.all_asset_data:
asset_dict = dict(list(zip(AssetStoreTestData.asset_fields[1:], asset[1:])))
asset_md = AssetMetadata(self.course_id.make_asset_key('asset', asset[0]), **asset_dict)
self.course_assets.append(asset_md)
# Read in the XML schema definition and make a validator.
xsd_path = path(__file__).realpath().parent / xsd_filename
with open(xsd_path, 'rb') as f:
schema_root = etree.XML(f.read())
schema = etree.XMLSchema(schema_root)
self.xmlparser = etree.XMLParser(schema=schema)
def test_export_single_asset_to_from_xml(self):
"""
Export a single AssetMetadata to XML and verify the structure and fields.
"""
asset_md = self.course_assets[0]
root = etree.Element("assets")
asset = etree.SubElement(root, "asset")
asset_md.to_xml(asset)
# If this line does *not* raise, the XML is valid.
etree.fromstring(etree.tostring(root), self.xmlparser)
new_asset_key = self.course_id.make_asset_key('tmp', 'tmp')
new_asset_md = AssetMetadata(new_asset_key)
new_asset_md.from_xml(asset)
# Compare asset_md to new_asset_md.
for attr in AssetMetadata.XML_ATTRS:
if attr in AssetMetadata.XML_ONLY_ATTRS:
continue
orig_value = getattr(asset_md, attr)
new_value = getattr(new_asset_md, attr)
assert orig_value == new_value
def test_export_with_None_value(self):
"""
Export and import a single AssetMetadata to XML with a None created_by field, without causing an exception.
"""
asset_md = AssetMetadata(
self.course_id.make_asset_key('asset', 'none_value'),
created_by=None,
)
asset = etree.Element("asset")
asset_md.to_x | ml(a | sset)
asset_md.from_xml(asset)
def test_export_all_assets_to_xml(self):
"""
Export all AssetMetadatas to XML and verify the structure and fields.
"""
root = etree.Element("assets")
AssetMetadata.add_all_assets_as_xml(root, self.course_assets)
# If this line does *not* raise, the XML is valid.
etree.fromstring(etree.tostring(root), self.xmlparser)
def test_wrong_node_type_all(self):
"""
Ensure full asset sections with the wrong tag are detected.
"""
root = etree.Element("glassets")
with pytest.raises(ContractNotRespected):
AssetMetadata.add_all_assets_as_xml(root, self.course_assets)
def test_wrong_node_type_single(self):
"""
Ensure single asset blocks with the wrong tag are detected.
"""
asset_md = self.course_assets[0]
root = etree.Element("assets")
asset = etree.SubElement(root, "smashset")
with pytest.raises(ContractNotRespected):
asset_md.to_xml(asset)
|
Dishwishy/beets | beets/ui/__init__.py | Python | mit | 40,331 | 0 | # This file is part of beets.
# Copyright 2015, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""This module contains all of the core logic for beets' command-line
interface. To invoke the CLI, just call beets.ui.main(). The actual
CLI commands are implemented in the ui.commands module.
"""
from __future__ import (division, absolute_import, print_function,
unicode_literals)
import locale
import optparse
import textwrap
import sys
from difflib import SequenceMatcher
import sqlite3
import errno
import re
import struct
import traceback
import os.path
from beets import logging
from beets import library
from beets import plugins
from beets import util
from beets.util.functemplate import Template
from beets import config
from beets.util import confit
from beets.autotag import mb
from beets.dbcore import query as db_query
# On Windows platforms, use colorama to support "ANSI" terminal colors.
if sys.platform == b'win32':
try:
import colorama
except ImportError:
pass
else:
colorama.init()
log = logging.getLogger('beets')
if not log.handlers:
log.addHandler(logging.StreamHandler())
log.propagate = False # Don't propagate to root handler.
PF_KEY_QUERIES = {
'comp': 'comp:true',
'singleton': 'singleton:true',
}
class UserError(Exception):
"""UI exception. Commands should throw this in order to display
nonrecoverable errors to the user.
"""
# Utilities.
def _out_encoding():
"""Get the encoding to use for *outputting* strings to the console.
"""
# Configured override?
encoding = config['terminal_encoding'].get()
if encoding:
return encoding
# For testing: When sys.stdout is a StringIO under the test harness,
# it doesn't have an `encodiing` attribute. Just use UTF-8.
if not hasattr(sys.stdout, 'encoding'):
return 'utf8'
# Python's guessed output stream encoding, or UTF-8 as a fallback
# (e.g., when piped to a file).
return sys.stdout.encoding or 'utf8'
def _arg_encoding():
"""Get the encoding for command-line arguments (and other OS
locale-sensitive strings).
"""
try:
return locale.getdefaultlocale()[1] or 'utf8'
except ValueError:
# Invalid locale environment variable setting. To avoid
# failing entirely for no good reason, assume UTF-8.
return 'utf8'
def decargs(arglist):
"""Given a list of command-line argument bytestrings, attempts to
decode them to Unicode strings.
"""
return [s.decode(_arg_encoding()) for s in arglist]
def print_(*strings, **kwargs):
"""Like print, but rather than raising an error when a character
is not in the terminal's encoding's character set, just silently
replaces it.
If the arguments are strings then they're expected to share the same
type: either bytes or unicode.
The `end` keyword argument behaves similarly to the built-in `print`
(it defaults to a newline). The value should have the same string
type as the arguments.
"""
end = kwargs.get('end')
if not strings or isinstance(strings[0], unicode):
txt = u' '.join(strings)
txt += u'\n' if end is None else end
else:
txt = b' '.join(strings)
txt += b'\n' if end is None else end
# Always send bytes to the stdout stream.
if isinstance(txt, unicode):
txt = txt.encode(_out_encoding(), 'replace')
sys.stdout.write(txt)
def input_(prompt=None):
"""Like `raw_input`, but decodes the result to a Unicode string.
Raises a UserError if stdin is not available. The prompt is sent to
stdout rather than stderr. A printed between the prompt and the
input cursor.
"""
# raw_input incorrectly sends prompts to stderr, not stdout, so we
# use print() explicitly to display prompts.
# http://bugs.python.org/issue1927
if prompt:
print_(prompt, end=' ')
try:
resp = raw_input()
except EOFError:
raise UserError('stdin stream ended while input required')
return resp.decode(sys.stdin.encoding or 'utf8', 'ignore')
def input_options(options, require=False, prompt=None, fallback_prompt=None,
numrange=None, default=None, max_width=72):
"""Prompts a user for input. The sequence of `options` defines the
choices the user has. A single-letter shortcut is inferred for each
option; the user's choice is returned as that single, lower-case
letter. The options should be provided as lower-case strings unless
a particular shortcut is desired; in that case, only that letter
should be capitalized.
By default, the first option is the default. `default` can be provided to
override this. If `require` is provided, then there is no default. The
prompt and fallback prompt are also inferred but can be overridden.
If numrange is provided, it is a pair of `(high, low)` (both ints)
indicating that, in addition to `options`, the user may enter an
integer in that inclusive range.
`max_width` specifies the maximum number of columns in the
automatically generated prompt string.
"""
# Assign single letters to each option. Also capitalize the options
# to indicate the letter.
letters = {}
display_letters = []
capitalized = []
first = True
for option in options:
# Is a letter already capitalized?
for letter in option:
if letter.isalpha() and letter.upper() == letter:
found_letter = letter
break
else:
# Infer a letter.
for letter in option:
if not letter.isalpha():
continue # Don't use punctuation.
if letter not in letters:
found_letter = letter
break
else:
raise ValueError('no unambiguous lettering found')
letters[found_letter.lower()] = option
index = option.index(found_letter)
# Mark the option's shortcut letter for display.
if not require and (
(default is None and not numrange and first) or
(isinstance(default, basestring) and
found_letter.lower() == default.lower())):
# The f | irst option is the default; mark it.
show_letter = '[%s]' % found_letter.upper()
is_default = True
else:
show_letter = found_letter.upper()
is_default = False
# Colorize the letter shortcut.
show_letter = colorize('action_default' if is_default else 'action',
show_letter)
# Insert the highlighted letter back int | o the word.
capitalized.append(
option[:index] + show_letter + option[index + 1:]
)
display_letters.append(found_letter.upper())
first = False
# The default is just the first option if unspecified.
if require:
default = None
elif default is None:
if numrange:
default = numrange[0]
else:
default = display_letters[0].lower()
# Make a prompt if one is not provided.
if not prompt:
prompt_parts = []
prompt_part_lengths = []
if numrange:
if isinstance(default, int):
default_name = unicode(default)
default_name = colorize('action_default', default_name)
tmpl = '# selection (default %s)'
prompt_parts.append(tmpl % default_name)
prompt_part_lengths.append(len(tmpl % |
ArtyomSliusar/StorageOfKnowledge | storageofknowledge/manage.py | Python | gpl-3.0 | 364 | 0.002747 | #!/usr/bin/env python
import os
import sys
import dotenv
dotenv | .read_dotenv()
if __name__ == "__main__" | :
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'storageofknowledge.settings')
os.environ.setdefault('DJANGO_CONFIGURATION', 'Settings')
from configurations.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
i3visio/osrframework | osrframework/wrappers/pending/bebee.py | Python | agpl-3.0 | 4,625 | 0.004758 | # !/usr/bin/python
# -*- coding: cp1252 -*-
#
##################################################################################
#
# Copyright 2016-2017 Félix Brezo and Yaiza Rubio (i3visio, contacto@i3visio.com)
#
# This program is part of OSRFramework. You can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##################################################################################
__author__ = "John Doe <johndoe@mailcatch.com>"
__version__ = "1.0"
import argparse
import json
import re
import sys
import urllib2
import osrframework.utils.browser as browser
from osrframework.utils.platforms import Platform
class Bebee(Platform):
"""
A <Platform> object for Bebee.
"""
def __init__(self):
"""
Constructor...
"""
self.platformName = "Bebee"
self.tags = ["jobs"]
########################
# Defining valid modes #
########################
self.isValidMode = {}
self.isValidMode["phonefy"] = False
self.isValidMode["usufy"] = True
self.isValidMode["searchfy"] = False
######################################
# Search URL for the different modes #
######################################
# Strings with the URL for each and every mode
self.url = {}
#self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>"
self.url["usufy"] = "https://bebee.com/bee/" + "<usufy>"
#self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>"
######################################
# Whether the user needs credentials #
######################################
self.needsCredentials = {}
#self.needsCredentials["phonefy"] = False
self.needsCredentials["usufy"] = False
#self.needsCredentials["searchfy"] = False
#################
# Valid queries #
#################
# Strings that will imply that the query number is not appearing
self.validQuery = {}
# The regular expression '.+' will match any query
#self.validQuery["phonefy"] = ".*"
self.validQuery["usufy"] = ".+"
#self.validQuery["searchfy"] = ".*"
###################
# Not_found clues #
########## | ###### | ###
# Strings that will imply that the query number is not appearing
self.notFoundText = {}
#self.notFoundText["phonefy"] = []
self.notFoundText["usufy"] = ['<link rel="canonical" href="https://.bebee.com/bees/search">']
#self.notFoundText["searchfy"] = []
#########################
# Fields to be searched #
#########################
self.fieldsRegExp = {}
# Definition of regular expressions to be searched in phonefy mode
#self.fieldsRegExp["phonefy"] = {}
# Example of fields:
#self.fieldsRegExp["phonefy"]["i3visio.location"] = ""
# Definition of regular expressions to be searched in usufy mode
self.fieldsRegExp["usufy"] = {}
# Example of fields:
self.fieldsRegExp["usufy"]["i3visio.fullname"] = {"start": '<title>', "end": '- beBee</title>'}
self.fieldsRegExp["usufy"]["i3visio.location"] = {"start": '<span itemprop="addressRegion">', "end": '</span>'}
self.fieldsRegExp["usufy"]["i3visio.alias.googleplus"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="https://plus.google.com/u/0/', "end": '"'}
self.fieldsRegExp["usufy"]["i3visio.alias.linkedin"] = {"start": '<div><a rel="nofollow" class="color_corp_three" href="http://br.linkedin.com/in/', "end": '"'}
# Definition of regular expressions to be searched in searchfy mode
#self.fieldsRegExp["searchfy"] = {}
# Example of fields:
#self.fieldsRegExp["searchfy"]["i3visio.location"] = ""
################
# Fields found #
################
# This attribute will be feeded when running the program.
self.foundFields = {}
|
iamahuman/angr | angr/knowledge_plugins/cfg/cfg_model.py | Python | bsd-2-clause | 15,139 | 0.003567 | # pylint:disable=no-member
import pickle
import logging
from collections import defaultdict
import networkx
from ...protos import cfg_pb2, primitives_pb2
from ...serializable import Serializable
from ...utils.enums_conv import cfg_jumpkind_to_pb, cfg_jumpkind_from_pb
from ...errors import AngrCFGError
from .cfg_node import CFGNode
from .memory_data import MemoryData
from ...misc.ux import once
l = logging.getLogger(name=__name__)
class CFGModel(Serializable):
"""
This class describes a Control Flow Graph for a specific range of code.
"""
__slots__ = ('ident', 'graph', 'jump_tables', 'memory_data', 'insn_addr_to_memory_data', 'references',
'_nodes_by_addr', '_nodes', '_cfg_manager', '_iropt_level', )
def __init__(self, ident, cfg_manager=None):
self.ident = ident
self._cfg_manager = cfg_manager
# Necessary settings
self._iropt_level = None
# The graph
self.graph = networkx.DiGraph()
# Jump tables
self.jump_tables = { }
# Memory references
# A mapping between address and the actual data in memory
self.memory_data = { }
# A mapping between address of the instruction that's referencing the memory data and the memory data itself
self.insn_addr_to_memory_data = { }
# Lists of CFGNodes indexed by the address of each block. Don't serialize
self._nodes_by_addr = defaultdict(list)
# CFGNodes dict indexed by block ID. Don't serialize
self._nodes = { }
#
# Properties
#
@property
def project(self):
if self._cfg_manager is None:
return None
return self._cfg_manager._kb._project
#
# Serialization
#
@classmethod
def _get_cmsg(cls):
return cfg_pb2.CFG()
def serialize_to_cmessage(self):
if "Emulated" in self.ident:
raise NotImplementedError("Serializing a CFGEmulated instance is currently not supported.")
cmsg = self._get_cmsg()
cmsg.ident = self.ident
# nodes
nodes = [ ]
for n in self.graph.nodes():
nodes.append(n.serialize_to_cmessage())
cmsg.nodes.extend(nodes)
# edges
edges = [ ]
for src, dst, data in self.graph.edges(data=True):
edge = primitives_pb2.Edge()
edge.src_ea = src.addr
edge.dst_ea = dst.addr
for k, v in data.items():
if k == 'jumpkind':
edge.jumpkind = cfg_jumpkind_to_pb(v)
elif k == 'ins_addr':
edge.ins_addr = v if v is not None else -1
elif k == 'stmt_idx':
edge.stmt_idx = v if v is not None else -1
else:
edge.data[k] = pickle.dumps(v)
edges.append(edge)
cmsg.edges.extend(edges)
# memory data
memory_data = [ ]
for data in self.memory_data.values():
memory_data.append(data.serialize_to_cmessage())
cmsg.memory_data.extend(memory_data)
return cmsg
@classmethod
def parse_from_cmessage(cls, cmsg, cfg_manager=None): # pylint:disable=arguments-differ
if cfg_manager is None:
# create a new model unassociated from any project
model = cls(cmsg.ident)
else:
model = cfg_manager.new_model(cmsg.ident)
# nodes
for node_pb2 in cmsg.nodes:
node = CFGNode.parse_from_cmessage(node_pb2, cfg=model)
model._nodes[node.block_id] = node
model._nodes_by_addr[node.addr].ap | pend(node)
model.graph.add_node(node)
if len(model._nodes_by_addr[node.block_id]) > 1:
if once("cfg_model_parse_from_cmessage many nodes at addr"):
l.warning("Importing a CFG with more than one node for a given address is currently unsupported. "
"The resulting graph may be broken.")
# edges
for edge_pb2 in cmsg.edges:
# more than one | node at a given address is unsupported, grab the first one
src = model._nodes_by_addr[edge_pb2.src_ea][0]
dst = model._nodes_by_addr[edge_pb2.dst_ea][0]
data = { }
for k, v in edge_pb2.data.items():
data[k] = pickle.loads(v)
data['jumpkind'] = cfg_jumpkind_from_pb(edge_pb2.jumpkind)
data['ins_addr'] = edge_pb2.ins_addr if edge_pb2.ins_addr != -1 else None
data['stmt_idx'] = edge_pb2.stmt_idx if edge_pb2.stmt_idx != -1 else None
model.graph.add_edge(src, dst, **data)
# memory data
for data_pb2 in cmsg.memory_data:
md = MemoryData.parse_from_cmessage(data_pb2)
model.memory_data[md.addr] = md
return model
#
# Other methods
#
def copy(self):
model = CFGModel(self.ident, cfg_manager=self._cfg_manager)
model.graph = networkx.DiGraph(self.graph)
model.jump_tables = self.jump_tables.copy()
model.memory_data = self.memory_data.copy()
model.insn_addr_to_memory_data = self.insn_addr_to_memory_data.copy()
model._nodes_by_addr = self._nodes_by_addr.copy()
model._nodes = self._nodes.copy()
return model
#
# CFG View
#
def get_node(self, block_id):
"""
Get a single node from node key.
:param BlockID block_id: Block ID of the node.
:return: The CFGNode
:rtype: CFGNode
"""
if block_id in self._nodes:
return self._nodes[block_id]
return None
def get_any_node(self, addr, is_syscall=None, anyaddr=False, force_fastpath=False):
"""
Get an arbitrary CFGNode (without considering their contexts) from our graph.
:param int addr: Address of the beginning of the basic block. Set anyaddr to True to support arbitrary
address.
:param bool is_syscall: Whether you want to get the syscall node or any other node. This is due to the fact that
syscall SimProcedures have the same address as the targer it returns to.
None means get either, True means get a syscall node, False means get something that isn't
a syscall node.
:param bool anyaddr: If anyaddr is True, then addr doesn't have to be the beginning address of a basic
block. By default the entire graph.nodes() will be iterated, and the first node
containing the specific address is returned, which is slow. If you need to do many such
queries, you may first call `generate_index()` to create some indices that may speed up the
query.
:param bool force_fastpath: If force_fastpath is True, it will only perform a dict lookup in the _nodes_by_addr
dict.
:return: A CFGNode if there is any that satisfies given conditions, or None otherwise
"""
# fastpath: directly look in the nodes list
if not anyaddr:
try:
return self._nodes_by_addr[addr][0]
except (KeyError, IndexError):
pass
if force_fastpath:
return None
# slower path
#if self._node_lookup_index is not None:
# pass
# the slowest path
# try to show a warning first
# TODO: re-enable it once the segment tree is implemented
#if self._node_lookup_index_warned == False:
# l.warning('Calling get_any_node() with anyaddr=True is slow on large programs. '
# 'For better performance, you may first call generate_index() to generate some indices that may '
# 'speed the node lookup.')
# self._node_lookup_index_warned = True
for n in self.graph.nodes():
if self.ident == "CFGEmulate |
GzkV/bookstore_project | store/migrations/0001_initial.py | Python | mit | 839 | 0.002384 | # -*- coding: utf-8 -*-
# Generated by Django 1 | .9 on 2016-01-26 02:11
from __future__ import unicode_literals
import datetime
from django.db im | port migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Book',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('author', models.CharField(max_length=200)),
('description', models.TextField()),
('publish_date', models.DateField(default=datetime.datetime(2016, 1, 26, 2, 11, 14, 827928, tzinfo=utc))),
],
),
]
|
CraigHarris/gpdb | src/test/tinc/tincrepo/mpp/models/test/sql_related/test_sql_tc_42.py | Python | apache-2.0 | 1,931 | 0.001554 | """
Copyright (C) 2004-2015 Pivotal Software, Inc. All rights reserved.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the spec | ific language governing permissions and
limitations under the License.
"""
import unittest2 as unittest
from mpp.models.mpp_tc import _MPPMetaClassType
from mpp.models.mpp_tc import MPPDUT
from mpp.models import SQLTestCase, SQLTestCaseException
from mpp.models.sql_tc import __gpdbSQLTestCase__, __hawqSQLTestCase__
class | MockMPPMetaClassTypeGPDB42(_MPPMetaClassType):
_MPPMetaClassType.DUT = MPPDUT('gpdb', '4.2')
@unittest.skip('mock')
class MockSQLTestCaseGPDB42(SQLTestCase):
__metaclass__ = MockMPPMetaClassTypeGPDB42
def test_do_stuff(self):
(product, version) = self.get_product_version()
self.assertEquals(prodcut, 'gpdb')
self.assertEquals(version, '4.2')
class SQLTestCaseGPDB42Tests(unittest.TestCase):
def test_get_optimizer_mode_42(self):
"""
Test whether get_optimizer_mode returns None for versions < 43
"""
gpdb_test_case = MockSQLTestCaseGPDB42('test_do_stuff')
self.assertEqual(gpdb_test_case.__class__.__product__, 'gpdb')
self.assertEqual(gpdb_test_case.__class__.__version_string__, '4.2')
self.assertTrue(isinstance(gpdb_test_case, __gpdbSQLTestCase__))
self.assertFalse(isinstance(gpdb_test_case, __hawqSQLTestCase__))
self.assertIsNone(MockSQLTestCaseGPDB42.get_global_optimizer_mode())
|
Liamc0950/EcosystemSimulator | utils.py | Python | gpl-2.0 | 5,066 | 0.010265 | import pygame, sys, random, math
from pygame.locals import *
import organisms
import globalVars
class Graphics:
def __init__(self):
self.screen = pygame.display.set_mode((1080, 820))
pygame.display.set_caption('Ecosystem Simulator')
class GUI:
def __init__(self):
self.sliderX = 150
self.mouseX = pygame.mouse.get_pos()[0]
def render(self):
#Draw GUI pane
box = pygame.Rect(0, 720, 1080, 100)
pygame.draw.rect(globalVars.graphics.screen, (214,214,214), box, 0)
#Draw GUI text
font = pygame.font.Font(None, 20)
slider = font.render("Simulation Speed", 1, (10, 10, 10))
sliderpos = (50, 730)
globalVars.graphics.screen.blit(slider, sliderpos)
#Draw Slider
slider_bar = pygame.Rect(50, 770, 200, 10)
pygame.draw.rect(globalVars.graphics.screen, (255,255,255), slider_bar, 0)
slider = pygame.Rect(self.sliderX, 760, 15, 30)
pygame.draw.rect(globalVars.graphics.screen, (100,100,100), slider, 0)
def sliderDrag(self):
if pygame.mouse.get_pressed()[0] == True:
delta = pygame.mouse.get_pos()[0] - self.mouseX
if abs(pygame.mouse.get_pos()[0] - self.sliderX) <= 15 and abs(pygame.mouse.get_pos()[1] - 760) <= 30:
if self.sliderX + delta <= 250 and self.sliderX + delta | >= 50:
self.sliderX += delta
globalVars.simSpeed = self.sliderX - 50
def act(self):
self.sliderDrag()
self.mouseX = pygame.mouse.get_pos()[0]
self.render()
class HUD:
def __init__(self, world):
#World
self.world = world
self.target = False
def getTarget(self):
#pygame.e | vent.get()
if pygame.mouse.get_pressed()[0] == True:
for veg in self.world.vegetation:
if self.mouseClicked(veg):
self.target = veg
return
for prey in self.world.prey:
if self.mouseClicked(prey):
self.target = prey
return
def render(self):
font = pygame.font.Font(None, 20)
#Name
name = font.render("Name: " + str(self.target), 1, (10, 10, 10))
namepos = (25, 25)
globalVars.graphics.screen.blit(name, namepos)
#Position
pos = font.render("Position: (" + str(self.target.x) + "," + str(self.target.y) + ")", 1, (10, 10, 10))
pospos = (25, 45)
globalVars.graphics.screen.blit(pos, pospos)
#Age
age = font.render("Age: " + str(self.target.age), 1, (10, 10, 10))
agepos = (25, 85)
globalVars.graphics.screen.blit(age, agepos)
#Hunger/Food
if type(self.target) is organisms.Prey:
hunger = font.render("Hunger: " + str(self.target.hunger), 1, (10, 10, 10))
hungpos = (25, 65)
globalVars.graphics.screen.blit(hunger, hungpos)
if type(self.target) is organisms.Vegetation:
food = font.render("Food: " + str(self.target.food), 1, (10, 10, 10))
foodpos = (25, 65)
globalVars.graphics.screen.blit(food, foodpos)
#Status
if type(self.target) is organisms.Prey:
status = font.render("Status: " + str(self.target.status), 1, (10, 10, 10))
statuspos = (25, 105)
globalVars.graphics.screen.blit(status, statuspos)
#Target must be a Prey or Vegetation object
def mouseClicked(self, target):
dx = pygame.mouse.get_pos()[0] - target.x
dy = pygame.mouse.get_pos()[1] - target.y
distance = math.hypot(dx, dy)
if distance < target.circle_radius:
return True
else:
return False
def act(self):
self.getTarget()
if self.target != False:
self.render()
class World:
def __init__(self):
self.prey = []
self.predators = []
self.vegetation = []
self.water = []
self.shelter = []
self.time = 0
def addPrey(self, prey):
self.prey.append(prey)
def addVegetation(self, veg):
self.vegetation.append(veg)
def removePrey(self, prey):
self.prey.remove(prey)
def removeVegetation(self, veg):
self.vegetation.remove(veg)
def getDistance(self, element1, element2):
return math.hypot(element1.x - element2.x, element1.y - element2.y)
def collideByRadius(self, p1, p2):
dx = p1.x - p2.x
dy = p1.y - p2.y
distance = math.hypot(dx, dy)
if distance < p1.hunting_radius + p2.radius:
return True
else:
return False
def vegetationInRadius(self, element):
inRadius = []
for element2 in self.vegetation:
if self.collideByRadius(element, element2) == True:
inRadius.append(element2)
return inRadius
|
txtbits/daw-python | primeros ejercicios/Ejercicios entradasalida/ejercicio2.py | Python | mit | 470 | 0.002128 | # Escribir un programa que pregunte al usuario dos números y luego muestre la suma, el producto | y la media de los dos números
numero = raw_input('Elige un numero ')
numero2 = raw_input('Elige otro numero ')
numero = int(numero)
numero2 = int(numero2)
print 'La suma de los numeros es: ', numero + numero2
print 'El producto de los numeros es: ', numero * numero2
print 'La media de los numeros es: ', (numero+numero2)/2.
raw_input('Pulse la tecla enter par | a finalizar')
|
yongfuyang/vnpy | vn.trader/ctaAlgo/strategyAtrRsi2.py | Python | mit | 12,056 | 0.003283 | # encoding: UTF-8
"""
一个ATR-RSI指标结合的交易策略,适合用在股指的1分钟和5分钟线上。
注意事项:
1. 作者不对交易盈利做任何保证,策略代码仅供参考
2. 本策略需要用到talib,没有安装的用户请先参考www.vnpy.org上的教程安装
3. 将IF0000_1min.csv用ctaHistoryData.py导入MongoDB后,直接运行本文件即可回测策略
"""
from ctaBase import *
from ctaTemplate import CtaTemplate
import talib
import numpy as np
########################################################################
class AtrRsiStrategy(CtaTemplate):
"""结合ATR和RSI指标的一个分钟线交易策略"""
className = 'AtrRsiStrategy'
author = u'用Python的交易员'
# 策略参数
atrLength = 22 # 计算ATR指标的窗口数
atrMaLength = 10 # 计算ATR | 均线的窗口数
rsiLength = 5 # 计算RSI的窗口数
rsiEntry = 16 # RSI的开仓信号
trailingPercent = 1.0 # 百分比移动止损
initDays = 10 # 初始化数据所用的天数
fixedSize = 1 # risk
useTrailingStop = False # 是否使用跟踪止损
profitLock = 30 # 利润锁定
trailingStop = 20 # 跟踪止损
# 策略变量
bar = None # K线对象
barMinute = EMPTY_STRING # K线当前的分钟
bufferSize = 100 # | 需要缓存的数据的大小
bufferCount = 0 # 目前已经缓存了的数据的计数
highArray = np.zeros(bufferSize) # K线最高价的数组
lowArray = np.zeros(bufferSize) # K线最低价的数组
closeArray = np.zeros(bufferSize) # K线收盘价的数组
atrCount = 0 # 目前已经缓存了的ATR的计数
atrArray = np.zeros(bufferSize) # ATR指标的数组
atrValue = 0 # 最新的ATR指标数值
atrMa = 0 # ATR移动平均的数值
rsiValue = 0 # RSI指标的数值
rsiBuy = 0 # RSI买开阈值
rsiSell = 0 # RSI卖开阈值
intraTradeHigh = 0 # 移动止损用的持仓期内最高价
intraTradeLow = 0 # 移动止损用的持仓期内最低价
orderList = [] # 保存委托代码的列表
# 参数列表,保存了参数的名称
paramList = ['name',
'className',
'author',
'vtSymbol',
'atrLength',
'atrMaLength',
'rsiLength',
'rsiEntry',
'trailingPercent']
# 变量列表,保存了变量的名称
varList = ['inited',
'trading',
'pos',
'atrValue',
'atrMa',
'rsiValue',
'rsiBuy',
'rsiSell']
#----------------------------------------------------------------------
def __init__(self, ctaEngine, setting):
"""Constructor"""
super(AtrRsiStrategy, self).__init__(ctaEngine, setting)
# 注意策略类中的可变对象属性(通常是list和dict等),在策略初始化时需要重新创建,
# 否则会出现多个策略实例之间数据共享的情况,有可能导致潜在的策略逻辑错误风险,
# 策略类中的这些可变对象属性可以选择不写,全都放在__init__下面,写主要是为了阅读
# 策略时方便(更多是个编程习惯的选择)
self.isPrePosHaved = False
self.isAlreadyTraded = False
#----------------------------------------------------------------------
def onInit(self):
"""初始化策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略初始化' %self.name)
# 初始化RSI入场阈值
self.rsiBuy = 50 + self.rsiEntry
self.rsiSell = 50 - self.rsiEntry
# 载入历史数据,并采用回放计算的方式初始化策略数值
initData = self.loadBar(self.initDays)
for bar in initData:
self.onBar(bar)
self.putEvent()
#----------------------------------------------------------------------
def onStart(self):
"""启动策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略启动' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onStop(self):
"""停止策略(必须由用户继承实现)"""
self.writeCtaLog(u'%s策略停止' %self.name)
self.putEvent()
#----------------------------------------------------------------------
def onTick(self, tick):
"""收到行情TICK推送(必须由用户继承实现)"""
# 计算K线
tickMinute = tick.datetime.minute
if tickMinute != self.barMinute:
if self.bar:
self.onBar(self.bar)
bar = CtaBarData()
bar.vtSymbol = tick.vtSymbol
bar.symbol = tick.symbol
bar.exchange = tick.exchange
bar.open = tick.lastPrice
bar.high = tick.lastPrice
bar.low = tick.lastPrice
bar.close = tick.lastPrice
bar.date = tick.date
bar.time = tick.time
bar.datetime = tick.datetime # K线的时间设为第一个Tick的时间
self.bar = bar # 这种写法为了减少一层访问,加快速度
self.barMinute = tickMinute # 更新当前的分钟
else: # 否则继续累加新的K线
bar = self.bar # 写法同样为了加快速度
bar.high = max(bar.high, tick.lastPrice)
bar.low = min(bar.low, tick.lastPrice)
bar.close = tick.lastPrice
#----------------------------------------------------------------------
def onBar(self, bar):
"""收到Bar推送(必须由用户继承实现)"""
# 撤销之前发出的尚未成交的委托(包括限价单和停止单)
for orderID in self.orderList:
self.cancelOrder(orderID)
self.orderList = []
# 保存K线数据
self.closeArray[0:self.bufferSize-1] = self.closeArray[1:self.bufferSize]
self.highArray[0:self.bufferSize-1] = self.highArray[1:self.bufferSize]
self.lowArray[0:self.bufferSize-1] = self.lowArray[1:self.bufferSize]
self.closeArray[-1] = bar.close
self.highArray[-1] = bar.high
self.lowArray[-1] = bar.low
self.bufferCount += 1
if self.bufferCount < self.bufferSize:
return
# 计算指标数值
self.atrValue = talib.ATR(self.highArray,
self.lowArray,
self.closeArray,
self.atrLength)[-1]
self.atrArray[0:self.bufferSize-1] = self.atrArray[1:self.bufferSize]
self.atrArray[-1] = self.atrValue
self.atrCount += 1
if self.atrCount < self.bufferSize:
return
self.atrMa = talib.MA(self.atrArray,
self.atrMaLength)[-1]
self.rsiValue = talib.RSI(self.closeArray,
self.rsiLength)[-1]
# 判断是否要进行交易
# 当前无仓位
if self.pos == 0:
self.intraTradeHigh = bar.high
self.intraTradeLow = bar.low
# ATR数值上穿其移动平均线,说明行情短期内波动加大
# 即处于趋势的概率较大,适合CTA开仓
if self.atrValue > self.atrMa:
# 使用RSI指标的趋势行情时,会在超买超卖区钝化特征,作为开仓信号
if self.rsiValue > self.rsiBuy:
# 这里为了保证成交,选择超价5个整指数点下单
self.buy(bar.close+5, self.fixedSize)
elif self.rsiValue < self.rsiSell:
self.short(bar.close-5, self.fixedSize)
# 持有多头仓位
elif self.pos == 1:
# 计算多头持有期内的最高价,以及重置最低价
self.intraTradeHigh = max(self.intraTradeHigh, bar.high)
self.intraTradeLow = bar.low
# 计算多头移动止损
longStop = self.intraTradeHigh * (1-self.trailingPercent/100)
# 发出本地止损委托,并且把委托号记录下来,用于后续撤单
orderID = self.sell(longStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 持有空头仓位
elif self.pos == -1:
self.intraTradeLow = min(self.intraTradeLow, bar.low)
self.intraTradeHigh = bar.high
shortStop = self.intraTradeLow * (1+self.trailingPercent/100)
orderID = self.cover(shortStop, abs(self.pos), stop=True)
self.orderList.append(orderID)
# 发出状态更新事件
self.putEvent()
#----------------------------------------------------------------------
def onOrder(self, order):
"""收到委托变化推送(必须由用户继承实现)"""
pass
#----------------------------------------------------------------------
def onTrade(self, trade):
pass
#----------------------------------------------------------------------
def onPosition(self, pos):
if self.isPrePosHaved or self.isAlreadyTraded: # 还没有开过仓,或,还没有获取历史仓位
return
elif pos.position != 0:
if pos.direction == DIRECTION_LONG:
self.pos = pos.position
else:
self.pos = |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.