repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
hkff/AccLab
pyAAL/shell.py
1
5489
""" Shell Copyright (C) 2014 Walid Benghabrit This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ __author__ = 'walid' import AALCompiler from AALChecker import * from importlib import reload import os import re import curses from tools.hottie import hot from AALCompiler import AALCompilerListener # TODO : make it more user friendly self = None help_str = "Shell Help" +\ "\n - call(macro, args) " + "\t call a macro where /\n" +\ "\t\t\t *macro : is the name of the macro\n" +\ "\t\t\t *args : a list of string; << ex : [\"'args1'\", \"'args2'\", ...\"'argsN'\"] >>" +\ "\n - clauses() " + "\t show all declared clauses in the loaded aal program" +\ "\n - macros() " + "\t show all declared macros in the loaded aal program" +\ "\n - load(lib) " + "\t load the library lib" +\ "\n - quit / q " + "\t exit the shell" +\ "\n - help / h / man() " + "\t show this help" +\ "\n - self " + "\t the current compiler instance of the loaded aal program" +\ "\n - aalprog " + "\t the current loaded aal program " +\ "\n - man(arg) " + "\t print the help for the given arg" +\ "\n - hs(module) " + "\t hotswaping : reload the module" +\ "\n - r() " + "\t hot-swaping the shell" help_str = Color(help_str) COMMANDS = ['clauses()', 'macros()', 'quit', 'q', 'h', 'help', 'self', 'aalprog', 'man()', 'call', 'extra'] RE_SPACE = re.compile('.*\s+$', re.M) # Completer class class Completer(object): def complete(self, text, state): """Generic readline completion entry point.""" try: import readline except: print(Color("{autored}[ERROR] You need to install readline module to use the shell.{/red}\n" "Please visit {autogreen}https://pypi.python.org/pypi/readline{/green}\n")) sys.exit(-1) buffer = readline.get_line_buffer() line = readline.get_line_buffer().split() # show all commands if not line: return [c + ' ' for c in COMMANDS][state] # account for last argument ending in a space if RE_SPACE.match(buffer): line.append('') # resolve command to the implementation function cmd = line[0].strip() if cmd in COMMANDS: impl = getattr(self, 'complete_%s' % cmd) args = line[1:] if args: return (impl(args) + [None])[state] return [cmd + ' '][state] results = [c + ' ' for c in COMMANDS if c.startswith(cmd)] + [None] return results[state] # Man method def man(args=None): if args is None: print(help_str) else: print("printing manual for " + str(args.__class__)) arg_type = type(args) if isinstance(args, aalmmnode): print(args.man()) else: AALCompilerListener.man() # Interactive mode @hot def shell(listener): try: import readline except: print(Color("{autored}[ERROR] You need to install readline module to use the shell.{/red}\n" "Please visit {autogreen}https://pypi.python.org/pypi/readline{/green}\n")) sys.exit(-1) import shell, AALMetaModel, inspect # For hotswaping stop = False self = listener aalprog = self.aalprog comp = Completer() # we want to treat '/' as part of a word, so override the delimiters readline.set_completer_delims(' \t\n;') readline.parse_and_bind("tab: complete") readline.set_completer(comp.complete) # Load a lib on the current AAL file def load(lib): return self.load_lib(lib) # Call a macro on the loaded file def call(macro, args=None): if args is None: args = [] return self.macro_call(macro, args) # Get clauses def clauses(): return self.get_clauses() # Get macros def macros(): return self.get_macros() # Reload shell def r(): return reload(shell) # Reload a module def hs(module): res = reload(module) # Use hot swaping decoration on all AALMetaModel classes # NOTE : stop abusing introspection... for name, obj in inspect.getmembers(module): if inspect.isclass(obj): if "AALMetaModel" in str(obj): obj = hot(obj) return res # return #exec("from AALCompiler import AALCompilerListener"); while not stop: cmd = input("shell >") # cmd = sys.stdin.read() if cmd == "quit" or cmd == "q": stop = True elif cmd == "help" or cmd == "h": man() else: try: res = eval(cmd) if res is not None: print(res) except: print("Eval error !", sys.exc_info()[:2])
gpl-3.0
-8,510,460,452,724,908,000
31.473373
107
0.574891
false
3.779614
false
false
false
kenyansongithub/django-rog
setup.py
1
1360
__author__ = 'ndieks' import os from setuptools import setup import setuptools with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme: README = readme.read() # allow setup.py to be run from any path os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) setup( name='django-rog', version='0.1', packages=setuptools.find_packages(), include_package_data=True, zip_safe=False, license='BSD License', description='A simple Django app to track activities of people from some location.', long_description=README, url='https://www.example.com/', author='danleyb2', author_email='ndieksman@gmail.com', classifiers=[ 'Development Status :: 3 - Alpha', 'Environment :: Web Environment', 'Framework :: Django', 'Intended Audience :: Developers', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.2', 'Programming Language :: Python :: 3.4', 'Topic :: Internet :: WWW/HTTP', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', ], )
bsd-2-clause
-8,937,582,572,829,403,000
34.789474
92
0.580882
false
4.197531
false
true
false
osuripple/lets
helpers/leaderboardHelper.py
1
2869
from common.log import logUtils as log from common.ripple import scoreUtils from objects import glob from common.ripple import userUtils def getRankInfo(userID, gameMode): """ Get userID's current rank, user above us and pp/score difference :param userID: user :param gameMode: gameMode number :return: {"nextUsername": "", "difference": 0, "currentRank": 0} """ data = {"nextUsername": "", "difference": 0, "currentRank": 0} k = "ripple:leaderboard:{}".format(scoreUtils.readableGameMode(gameMode)) position = userUtils.getGameRank(userID, gameMode) - 1 log.debug("Our position is {}".format(position)) if position is not None and position > 0: aboveUs = glob.redis.zrevrange(k, position - 1, position) log.debug("{} is above us".format(aboveUs)) if aboveUs is not None and len(aboveUs) > 0 and aboveUs[0].isdigit(): # Get our rank, next rank username and pp/score difference myScore = glob.redis.zscore(k, userID) otherScore = glob.redis.zscore(k, aboveUs[0]) nextUsername = userUtils.getUsername(aboveUs[0]) if nextUsername is not None and myScore is not None and otherScore is not None: data["nextUsername"] = nextUsername data["difference"] = int(myScore) - int(otherScore) else: position = 0 data["currentRank"] = position + 1 return data def update(userID, newScore, gameMode, *, relax=False): """ Update gamemode's leaderboard. Doesn't do anything if userID is banned/restricted. :param userID: user :param newScore: new score or pp :param gameMode: gameMode number :param relax: if True, update relax global leaderboard, otherwise update classic global leaderboard """ if userUtils.isAllowed(userID): log.debug("Updating leaderboard...") glob.redis.zadd( "ripple:leaderboard:{}{}".format(scoreUtils.readableGameMode(gameMode), ":relax" if relax else ""), str(userID), str(newScore) ) else: log.debug("Leaderboard update for user {} skipped (not allowed)".format(userID)) def updateCountry(userID, newScore, gameMode, *, relax=False): """ Update gamemode's country leaderboard. Doesn't do anything if userID is banned/restricted. :param userID: user, country is determined by the user :param newScore: new score or pp :param gameMode: gameMode number :param relax: if True, update relax country leaderboard, otherwise update classic country leaderboard :return: """ if userUtils.isAllowed(userID): country = userUtils.getCountry(userID) if country is not None and len(country) > 0 and country.lower() != "xx": log.debug("Updating {} country leaderboard...".format(country)) k = "ripple:leaderboard:{}:{}{}".format( scoreUtils.readableGameMode(gameMode), country.lower(), ":relax" if relax else "" ) glob.redis.zadd(k, str(userID), str(newScore)) else: log.debug("Country leaderboard update for user {} skipped (not allowed)".format(userID))
agpl-3.0
9,100,213,351,443,686,000
36.25974
102
0.721854
false
3.234498
false
false
false
wolcomm/djangolg
djangolg/dialects/base.py
1
2044
# Copyright 2017 Workonline Communications (Pty) Ltd. All rights reserved. # # The contents of this file are licensed under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with the # License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. """Base dialect class for djangolg.""" from __future__ import print_function from __future__ import unicode_literals import inspect import napalm from napalm_base import NetworkDriver class BaseDialect(object): """Device base dialect class.""" driver_class = None name = None description = None commands = {} def __init__(self): """Initialise new instance.""" if not isinstance(self.driver_class, NetworkDriver): if type(self).name: self.driver_class = napalm.get_network_driver(type(self).name) else: raise ValueError def get_command_syntax(self, method=None, option=None): """Get the dialect specific syntax for a given method as a lambda.""" from djangolg.methods.base import BaseMethod if not isinstance(method, BaseMethod): return ValueError syntax = None if method.name in self.commands: if option is not None: if option in self.commands[method.name]: syntax = self.commands[method.name][option] else: syntax = self.commands[method.name] if syntax: if inspect.isfunction(syntax): return syntax else: raise TypeError # pragma: no cover raise NotImplementedError # pragma: no cover
apache-2.0
8,075,817,260,728,690,000
33.644068
79
0.65362
false
4.433839
false
false
false
juancarlosqr/datascience
python/playground/distributed_systems/classes.py
1
3241
#!/usr/bin/env python import sqlalchemy from sqlalchemy import Column, ForeignKey, Integer, String, Float from sqlalchemy.orm import relationship, backref from sqlalchemy.ext.declarative import declarative_base Base = declarative_base() """ ****** New Class ****** """ class Servidor(Base): """docstring for Servidor""" __tablename__ = 'servidor' ser_id = Column(Integer, primary_key=True) ser_ip = Column(String) ser_nombre = Column(String) grupos = relationship("Grupo", back_populates="servidor", cascade="all, delete, delete-orphan") discos = relationship("DiscoDuro", back_populates="servidor_dis", cascade="all, delete, delete-orphan") def __init__(self, ser_ip, ser_nombre): self.ser_ip = ser_ip self.ser_nombre = ser_nombre def __repr__(self): return "<Servidor ('%s','%s')>" % (self.ser_ip, self.ser_nombre) """ ****** New Class ****** """ class Grupo(Base): """docstring for Servidor""" __tablename__ = 'grupo' gru_id = Column(Integer, primary_key=True) gru_grupo = Column(String) gru_groupid = Column(Integer) servidor_ser_id = Column(Integer, ForeignKey('servidor.ser_id')) servidor = relationship("Servidor", back_populates="grupos") usuarios = relationship("Usuario", back_populates="grupo", cascade="all, delete, delete-orphan") def __init__(self, gru_grupo, gru_groupid): self.gru_grupo = gru_grupo self.gru_groupid = gru_groupid def __repr__(self): return "<Grupo ('%s','%s')>" % (self.gru_grupo, self.gru_groupid) """ ****** New Class ****** """ class Usuario(Base): """docstring for Usuario""" __tablename__ = 'usuario' usu_id = Column(Integer, primary_key=True) usu_usuario = Column(String) usu_descripcion = Column(String) usu_directorio = Column(String) usu_shell = Column(String) grupo_gru_id = Column(Integer, ForeignKey('grupo.gru_id')) grupo = relationship("Grupo", back_populates="usuarios") def __init__(self, usu_usuario, usu_descripcion, usu_directorio, usu_shell): self.usu_usuario = usu_usuario self.usu_descripcion = usu_descripcion self.usu_directorio = usu_directorio self.usu_shell = usu_shell def __repr__(self): return "<Usuario ('%s','%s','%s','%s')>" % (self.usu_usuario, self.usu_descripcion, self.usu_directorio, usu_shell) """ ****** New Class ****** """ class DiscoDuro(Base): """docstring for DiscoDuro""" __tablename__ = 'disco_duro' dis_id = Column(Integer, primary_key=True) dis_nombre = Column(String) dis_tamano = Column(String) dis_usado = Column(String) dis_disponible = Column(String) dis_usado_porcen = Column(String) dis_montado = Column(String) servidor_ser_id = Column(Integer, ForeignKey('servidor.ser_id')) servidor_dis = relationship("Servidor", back_populates="discos") def __init__(self, dis_nombre, dis_tamano, dis_usado, dis_disponible, dis_usado_porcen, dis_montado ): self.dis_nombre = dis_nombre self.dis_tamano = dis_tamano self.dis_usado = dis_usado self.dis_disponible = dis_disponible self.dis_usado_porcen = dis_usado_porcen self.dis_montado = dis_montado def __repr__(self): return "<DiscoDuro ('%s','%s','%s','%s')>" % (self.dis_nombre, self.dis_tamano, self.dis_usado, self.dis_disponible)
mit
-4,709,690,366,457,514,000
33.849462
120
0.668929
false
2.825632
false
false
false
ChristopherHogan/cython
Cython/Compiler/UtilityCode.py
2
9261
from __future__ import absolute_import from .TreeFragment import parse_from_strings, StringParseContext from . import Symtab from . import Naming from . import Code class NonManglingModuleScope(Symtab.ModuleScope): cpp = False def __init__(self, prefix, *args, **kw): self.prefix = prefix self.cython_scope = None Symtab.ModuleScope.__init__(self, *args, **kw) def add_imported_entry(self, name, entry, pos): entry.used = True return super(NonManglingModuleScope, self).add_imported_entry(name, entry, pos) def mangle(self, prefix, name=None): if name: if prefix in (Naming.typeobj_prefix, Naming.func_prefix, Naming.var_prefix, Naming.pyfunc_prefix): # Functions, classes etc. gets a manually defined prefix easily # manually callable instead (the one passed to CythonUtilityCode) prefix = self.prefix return "%s%s" % (prefix, name) else: return Symtab.ModuleScope.mangle(self, prefix) class CythonUtilityCodeContext(StringParseContext): scope = None def find_module(self, module_name, relative_to=None, pos=None, need_pxd=True, absolute_fallback=True): if relative_to: raise AssertionError("Relative imports not supported in utility code.") if module_name != self.module_name: if module_name not in self.modules: raise AssertionError("Only the cython cimport is supported.") else: return self.modules[module_name] if self.scope is None: self.scope = NonManglingModuleScope( self.prefix, module_name, parent_module=None, context=self) return self.scope class CythonUtilityCode(Code.UtilityCodeBase): """ Utility code written in the Cython language itself. The @cname decorator can set the cname for a function, method of cdef class. Functions decorated with @cname('c_func_name') get the given cname. For cdef classes the rules are as follows: obj struct -> <cname>_obj obj type ptr -> <cname>_type methods -> <class_cname>_<method_cname> For methods the cname decorator is optional, but without the decorator the methods will not be prototyped. See Cython.Compiler.CythonScope and tests/run/cythonscope.pyx for examples. """ is_cython_utility = True def __init__(self, impl, name="__pyxutil", prefix="", requires=None, file=None, from_scope=None, context=None, compiler_directives=None, outer_module_scope=None): # 1) We need to delay the parsing/processing, so that all modules can be # imported without import loops # 2) The same utility code object can be used for multiple source files; # while the generated node trees can be altered in the compilation of a # single file. # Hence, delay any processing until later. context_types = {} if context is not None: from .PyrexTypes import BaseType for key, value in context.items(): if isinstance(value, BaseType): context[key] = key context_types[key] = value impl = Code.sub_tempita(impl, context, file, name) self.impl = impl self.name = name self.file = file self.prefix = prefix self.requires = requires or [] self.from_scope = from_scope self.outer_module_scope = outer_module_scope self.compiler_directives = compiler_directives self.context_types = context_types def __eq__(self, other): if isinstance(other, CythonUtilityCode): return self._equality_params() == other._equality_params() else: return False def _equality_params(self): outer_scope = self.outer_module_scope while isinstance(outer_scope, NonManglingModuleScope): outer_scope = outer_scope.outer_scope return self.impl, outer_scope, self.compiler_directives def __hash__(self): return hash(self.impl) def get_tree(self, entries_only=False, cython_scope=None): from .AnalysedTreeTransforms import AutoTestDictTransform # The AutoTestDictTransform creates the statement "__test__ = {}", # which when copied into the main ModuleNode overwrites # any __test__ in user code; not desired excludes = [AutoTestDictTransform] from . import Pipeline, ParseTreeTransforms context = CythonUtilityCodeContext( self.name, compiler_directives=self.compiler_directives) context.prefix = self.prefix context.cython_scope = cython_scope #context = StringParseContext(self.name) tree = parse_from_strings( self.name, self.impl, context=context, allow_struct_enum_decorator=True) pipeline = Pipeline.create_pipeline(context, 'pyx', exclude_classes=excludes) if entries_only: p = [] for t in pipeline: p.append(t) if isinstance(p, ParseTreeTransforms.AnalyseDeclarationsTransform): break pipeline = p transform = ParseTreeTransforms.CnameDirectivesTransform(context) # InterpretCompilerDirectives already does a cdef declarator check #before = ParseTreeTransforms.DecoratorTransform before = ParseTreeTransforms.InterpretCompilerDirectives pipeline = Pipeline.insert_into_pipeline(pipeline, transform, before=before) def merge_scope(scope): def merge_scope_transform(module_node): module_node.scope.merge_in(scope) return module_node return merge_scope_transform if self.from_scope: pipeline = Pipeline.insert_into_pipeline( pipeline, merge_scope(self.from_scope), before=ParseTreeTransforms.AnalyseDeclarationsTransform) for dep in self.requires: if isinstance(dep, CythonUtilityCode) and hasattr(dep, 'tree') and not cython_scope: pipeline = Pipeline.insert_into_pipeline( pipeline, merge_scope(dep.tree.scope), before=ParseTreeTransforms.AnalyseDeclarationsTransform) if self.outer_module_scope: # inject outer module between utility code module and builtin module def scope_transform(module_node): module_node.scope.outer_scope = self.outer_module_scope return module_node pipeline = Pipeline.insert_into_pipeline( pipeline, scope_transform, before=ParseTreeTransforms.AnalyseDeclarationsTransform) if self.context_types: # inject types into module scope def scope_transform(module_node): for name, type in self.context_types.items(): entry = module_node.scope.declare_type(name, type, None, visibility='extern') entry.in_cinclude = True return module_node pipeline = Pipeline.insert_into_pipeline( pipeline, scope_transform, before=ParseTreeTransforms.AnalyseDeclarationsTransform) (err, tree) = Pipeline.run_pipeline(pipeline, tree, printtree=False) assert not err, err self.tree = tree return tree def put_code(self, output): pass @classmethod def load_as_string(cls, util_code_name, from_file=None, **kwargs): """ Load a utility code as a string. Returns (proto, implementation) """ util = cls.load(util_code_name, from_file, **kwargs) return util.proto, util.impl # keep line numbers => no lstrip() def declare_in_scope(self, dest_scope, used=False, cython_scope=None, whitelist=None): """ Declare all entries from the utility code in dest_scope. Code will only be included for used entries. If module_name is given, declare the type entries with that name. """ tree = self.get_tree(entries_only=True, cython_scope=cython_scope) entries = tree.scope.entries entries.pop('__name__') entries.pop('__file__') entries.pop('__builtins__') entries.pop('__doc__') for entry in entries.values(): entry.utility_code_definition = self entry.used = used original_scope = tree.scope dest_scope.merge_in(original_scope, merge_unused=True, whitelist=whitelist) tree.scope = dest_scope for dep in self.requires: if dep.is_cython_utility: dep.declare_in_scope(dest_scope) return original_scope def declare_declarations_in_scope(declaration_string, env, private_type=True, *args, **kwargs): """ Declare some declarations given as Cython code in declaration_string in scope env. """ CythonUtilityCode(declaration_string, *args, **kwargs).declare_in_scope(env)
apache-2.0
8,189,437,940,946,829,000
38.075949
110
0.620451
false
4.407901
true
false
false
cschenck/blender_sim
cutil/video_creator.py
1
18026
#!/usr/bin/env python import os import cv2 import numpy as np import subprocess import tempfile import connor_util as cutil def draw_arrow(image, p, q, color, arrow_magnitude=9, thickness=1, line_type=8, shift=0): # adapted from http://mlikihazar.blogspot.com.au/2013/02/draw-arrow-opencv.html # draw arrow tail cv2.line(image, p, q, color, thickness, line_type, shift) # calc angle of the arrow angle = np.arctan2(p[1]-q[1], p[0]-q[0]) # starting point of first line of arrow head p = (int(q[0] + arrow_magnitude * np.cos(angle + np.pi/4)), int(q[1] + arrow_magnitude * np.sin(angle + np.pi/4))) # draw first half of arrow head cv2.line(image, p, q, color, thickness, line_type, shift) # starting point of second line of arrow head p = (int(q[0] + arrow_magnitude * np.cos(angle - np.pi/4)), int(q[1] + arrow_magnitude * np.sin(angle - np.pi/4))) # draw second half of arrow head cv2.line(image, p, q, color, thickness, line_type, shift) class VideoCreator: def __init__(self, width, height): self.width_ = width self.height_ = height self.frames = np.zeros((1,height,width,3), dtype=np.uint8) self.shift = 0 def length(self): return self.frames.shape[0]+self.shift def width(self): return self.frames.shape[2] def height(self): return self.frames.shape[1] def save(self, out_fp, codec='XVID', fps=30): writer = cv2.VideoWriter(out_fp, cv2.cv.CV_FOURCC(*codec), fps, (self.width(), self.height())) for t in range(self.length()): writer.write(self.frames[t,...]) writer.release() def saveMP4(self, out_fp, fps=30): tmp = tempfile.NamedTemporaryFile() self.save(tmp.name, fps=fps) command = "avconv -i %s -c:v libx264 -c:a copy %s" % (tmp.name, out_fp) subprocess.call(command.split()) def saveGif(self, out_fp, fps=30): import matplotlib.pyplot as plt import matplotlib.animation as animation fig = plt.figure() ax = fig.add_subplot(111) #ax = fig.add_axes([0,0,1.0,1.0]) ax.set_axis_off() fig.tight_layout() fig.set_size_inches(self.width()/100.0, self.height()/100.0, forward=True) fig.subplots_adjust(left=0, bottom=0, right=1, top=1, wspace=0, hspace=0) ims = map(lambda i: (ax.imshow(self.frames[i,...,::-1]),ax.set_title('')), range(0, self.frames.shape[0])) im_ani = animation.ArtistAnimation(fig, ims, interval=1000.0/fps, repeat_delay=0, blit=False) #plt.show() im_ani.save(out_fp, writer='imagemagick', savefig_kwargs={'bbox_inches':'tight'}) def savePartial(self, end, out_fp=None, codec='XVID', fps=30, finish=False): if out_fp is not None: self.writer = cv2.VideoWriter(out_fp, cv2.cv.CV_FOURCC(*codec), fps, (self.width(), self.height())) for i in range(self.shift, end): self.writer.write(self.frames[i-self.shift,...]) self.frames = self.frames[(end-self.shift):,...] self.shift = end if finish: self.writer.release() def load(self, fp): cap = cv2.VideoCapture(fp) length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) self.frames = np.zeros((length, height, width, 3), dtype=np.uint8) self.width_ = width self.height_ = height self.shift = 0 i = 0 pm = cutil.ProgressMonitor(lambda : 1.0*i/length, update_interval=None) while cap.isOpened(): ret, frame = cap.read() if not ret: break self.frames[i,...] = frame i += 1 pm.print_progress() pm.stop() def crop(self, start=0, end=None, x=0, y=0, w=None, h=None): if end is None: end = self.length() if w is None: w = self.width() if h is None: h = self.height() other = VideoCreator(w, h) other.setFrames(self.frames[start-self.shift:end-self.shift,y:y+h,x:x+w,...], 0) return other def playVideo(self, fps=30): spin = True while spin: for i in range(self.frames.shape[0]): cv2.imshow("Video", self.frames[i,...]) k = cv2.waitKey(1000/fps) if k in [27, 1048603]: spin = False break if spin: print("Restarting from the beginning.") def __expand_frames(self, start, end): if end-self.shift >= self.frames.shape[0]: self.frames = np.concatenate((self.frames, np.zeros((end-self.shift - self.frames.shape[0],self.height_,self.width_,3), dtype=self.frames.dtype)), axis=0) def solidColor(self, start, end, color): self.__expand_frames(start, end) for c in range(self.frames.shape[-1]): self.frames[(start-self.shift):(end-self.shift),:,:,c] = color[c] def __listify(self, x): if type(x) in [list, tuple]: return x else: return [x] def placeText(self, lines, start, end, location='center', font=cv2.FONT_HERSHEY_COMPLEX, scale=2, color=(255,255,255), thickness=2, fade_in=None, fade_out=None, x_shift=0, y_shift=0): self.__expand_frames(start, end) lines = self.__listify(lines) font = self.__listify(font) scale = self.__listify(scale) thickness = self.__listify(thickness) fade_in = self.__listify(fade_in) fade_out = self.__listify(fade_out) x_shift = self.__listify(x_shift) y_shift = self.__listify(y_shift) if type(color[0]) not in [list, tuple]: color = [color] sizes = [] for i,line in enumerate(lines): f = font[min(i, len(font)-1)] s = scale[min(i, len(scale)-1)] t = thickness[min(i, len(thickness)-1)] (w,h),b = cv2.getTextSize(line, f, s, t) w = int(round(w)) h = int(round(h)) b = int(round(b)) sizes.append((w, h, b)) if location in ['northwest', 'southwest', 'west']: x_coeff = 0 start_x = 0 elif location in ['north', 'center', 'south']: x_coeff = 0.5 start_x = self.width_/2 else: x_coeff = 1.0 start_x = self.width_ if location in ['northwest', 'northeast', 'north']: y = 0 elif location in ['west', 'center', 'east']: y = self.height_/2 - sum([x[1]+x[2] for x in sizes])/2 else: y = self.height_ - sum([x[1]+x[2] for x in sizes]) y = int(round(y)) for i,line in enumerate(lines): f = font[min(i, len(font)-1)] s = scale[min(i, len(scale)-1)] t = thickness[min(i, len(thickness)-1)] c = color[min(i, len(color)-1)] fi = fade_in[min(i, len(fade_in)-1)] fi = fi if fi is not None else 0 fo = fade_out[min(i, len(fade_out)-1)] fo = fo if fo is not None else 0 xs = x_shift[min(i, len(x_shift)-1)] ys = y_shift[min(i, len(y_shift)-1)] w,h,b = sizes[i] y += h yy = y + ys x = int(round(start_x - x_coeff*w)) + xs bjs = x bje = x+w bis = yy - h bie = yy + b for j in range(start, start+fi): r = 1.0*(j-start)/fi orig = self.frames[j-self.shift,bis:bie,bjs:bje,...].copy() cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t) self.frames[j-self.shift,bis:bie,bjs:bje,...] = r*self.frames[j-self.shift,bis:bie,bjs:bje,...] + (1-r)*orig for j in range(start+fi, end-fo): cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t) for j in range(end-fo, end): r = 1.0*(j - (end-fo))/fo orig = self.frames[j-self.shift,bis:bie,bjs:bje,...].copy() cv2.putText(self.frames[j-self.shift,...], line, (x,yy), f, s, c, t) self.frames[j-self.shift,bis:bie,bjs:bje,...] = (1-r)*self.frames[j-self.shift,bis:bie,bjs:bje,...] + r*orig y += b def drawArrow(self, start, end, p, q, color, arrow_magnitude=9, thickness=1, fade_in=0, fade_out=0): self.__expand_frames(start, end) for t in range(start, start+fade_in): r = 1.0*(t-start)/fade_in orig = self.frames[t-self.shift,...].copy() draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness) self.frames[t-self.shift,...] = r*self.frames[t-self.shift,...] + (1-r)*orig for t in range(start+fade_in, end-fade_out): draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness) for t in range(end-fade_out, end): r = 1.0*(t - (end-fade_out))/fade_out orig = self.frames[t-self.shift,...].copy() draw_arrow(self.frames[t-self.shift,...], p, q, color, arrow_magnitude=arrow_magnitude, thickness=thickness) self.frames[t-self.shift,...] = (1-r)*self.frames[t-self.shift,...] + r*orig def append(self, other, crossfade=0): self.combine(other, 0, other.length(), self.length() - crossfade, self.length()) def append_load(self, fp, crossfade=0): cap = cv2.VideoCapture(fp) length = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_COUNT)) width = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT)) start = self.length() - crossfade end = start + length self.__expand_frames(start, end) i = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break frame = self.resize(frame, self.width(), self.height()) if i < crossfade: r = 1.0*i/crossfade self.frames[start+i-self.shift,...] = r*frame + (1-r)*self.frames[start+i-self.shift,...] else: self.frames[start+i-self.shift,...] = frame i += 1 def combine(self, other, other_start, other_end, self_start, self_end_trans): ntrans = (self_end_trans - self_start) length = other_end - other_start self_end = self_start + length self.__expand_frames(self_start, self_end) for i in range(ntrans): r = 1.0*i/ntrans self.frames[self_start+i-self.shift,...] = (r*other.frames[other_start+i-other.shift,...] + (1-r)*self.frames[self_start+i-self.shift,...]) for i in range(ntrans,length): self.frames[self_start+i-self.shift,...] = other.frames[other_start+i-other.shift,...] def __resize_params(self, img_width, img_height, width, height): rw = 1.0*width/img_width rh = 1.0*height/img_height x_offset = 0 y_offset = 0 # Black bars on the side. if rh < rw: ratio = rh x_offset = int((width - ratio*img_width)/2) else: ratio = rw y_offset = int((height - ratio*img_height)/2) return ratio, x_offset, y_offset def resize(self, img, width, height): if img.shape[1] == width and img.shape[0] == height: return img ratio, x_offset, y_offset = self.__resize_params(img.shape[1], img.shape[0], width, height) img = cv2.resize(img, (int(ratio*img.shape[1]), int(ratio*img.shape[0]))) ret = np.zeros((height, width, 3), dtype=img.dtype) ret[y_offset:(y_offset+img.shape[0]),x_offset:(x_offset+img.shape[1]),:] = img return ret def loadFrames(self, fps, start): end = len(fps) + start self.__expand_frames(start, end) for i,fp in enumerate(fps): img = cv2.imread(fp) self.frames[i+start-self.shift,...] = self.resize(img, self.width_, self.height_) def setFrames(self, frs, start, heatmap=None): self.__expand_frames(start, start+frs.shape[0]) for t in range(frs.shape[0]): img = frs[t,...] while len(img.shape) < 3: img = np.expand_dims(img, len(img.shape)) # Only a single color channel. if img.shape[2] == 1: if heatmap is not None: #img = cv2.applyColorMap(img, heatmap) img = cutil.grayscaleToHeatmap(img, maxVal=255, rgb_max=255) else: img = np.tile(img, (1,1,3)) self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height()) def grid(self, vcs, vcs_ranges, start): maxh = max([x.height() for x in vcs.flatten()]) maxw = max([x.width() for x in vcs.flatten()]) length = np.max(vcs_ranges[...,1] - vcs_ranges[...,0]) nrows = vcs.shape[0] ncols = vcs.shape[1] img = np.zeros((nrows*maxh, ncols*maxw, 3), dtype=np.uint8) self.__expand_frames(start, start+length) for t in range(length): for i in range(nrows): for j in range(ncols): if vcs[i,j] is None: continue r1 = vcs_ranges[i,j,0] try: img[(i*maxh):((i+1)*maxh),(j*maxw):((j+1)*maxw),...] = self.resize(vcs[i,j].frames[r1+t,...], maxw, maxh) except: cutil.keyboard("ERROR: video_creator.py:210") self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height()) def grid_shift(self, vcs_start, vcs_end, vcs_ranges, start): # First let's setup all the variables. smaxh = max([x.height() for x in vcs_start.flatten()]) smaxw = max([x.width() for x in vcs_start.flatten()]) snrows = vcs_start.shape[0] sncols = vcs_start.shape[1] emaxh = max([x.height() for x in vcs_end.flatten()]) emaxw = max([x.width() for x in vcs_end.flatten()]) enrows = vcs_end.shape[0] encols = vcs_end.shape[1] length = np.max(vcs_ranges[...,1] - vcs_ranges[...,0]) height = self.height() width = self.width() sratio, sx_off, sy_off = self.__resize_params(smaxw*sncols, smaxh*snrows, width, height) eratio, ex_off, ey_off = self.__resize_params(emaxw*encols, emaxh*enrows, width, height) self.__expand_frames(start, start+length) # Next get the parentage. parents = vcs_end.copy() for i in range(enrows): for j in range(encols): parents[i,j] = None for pi in range(snrows): for pj in range(sncols): if vcs_start[pi, pj] == vcs_end[i, j]: parents[i,j] = (pi, pj) img = np.zeros((height, width, 3), dtype=np.uint8) for t in range(length): img[...] = 0 for i in range(enrows): for j in range(encols): pi, pj = parents[i,j] r1 = vcs_ranges[i,j,0] si1 = pi*smaxh*sratio + sy_off sj1 = pj*smaxw*sratio + sx_off si2 = (pi+1)*smaxh*sratio + sy_off sj2 = (pj+1)*smaxw*sratio + sx_off ei1 = i*smaxh*eratio + ey_off ej1 = j*smaxw*eratio + ex_off ei2 = (i+1)*smaxh*eratio + ey_off ej2 = (j+1)*smaxw*eratio + ex_off r = 1.0*t/length i1 = int(round((1-r)*si1 + r*ei1)) i2 = int(round((1-r)*si2 + r*ei2)) j1 = int(round((1-r)*sj1 + r*ej1)) j2 = int(round((1-r)*sj2 + r*ej2)) try: img[i1:i2,j1:j2,...] = self.resize(vcs_end[i,j].frames[t+r1,...], j2-j1, i2-i1) except: cutil.keyboard('err') self.frames[start+t-self.shift,...] = self.resize(img, self.width(), self.height()) def overlay(self, other, start, other_start, other_end, threshold=0): length = other_end - other_start self.__expand_frames(start, start+length) for t in range(length): img = other.frames[other_start+t-other.shift,...] idxs = np.where((img[...,0] > threshold) | (img[...,1] > threshold) | (img[...,2] > threshold)) if idxs[0].shape[0] == 0: continue for c in range(3): ii = idxs + (np.array([c]*idxs[0].shape[0]),) jj = (np.array([start + t-self.shift]*idxs[0].shape[0]),) + ii self.frames[jj] = img[ii] def blend(self, other, start, other_start, other_end, other_alpha=None): length = other_end - other_start self.__expand_frames(start, start+length) for t in range(length): img = other.frames[other_start+t-other.shift,...] under = self.frames[start+t-self.shift,...] if other_alpha is not None: bf = other_alpha.frames[t,...].max(axis=-1)/255.0 else: bf = img.max(axis=-1)/255.0 for c in range(3): self.frames[start+t-self.shift,...,c] = img[...,c]*bf + under[...,c]*(1 - bf) def repeatLastFrame(self, n): start = self.length() end = self.length() + n self.__expand_frames(start, end) for t in range(start, end): self.frames[t-self.shift,...] = self.frames[start-1-self.shift,...]
gpl-3.0
5,922,609,657,792,200,000
41.514151
129
0.524576
false
3.1978
false
false
false
diogenesfilho/Estadio
Estádio/ArquibancadaMetalicaTeste.py
1
5668
# -*- coding: utf-8 -*- from math import cos from math import pi from math import sin import timeit #import numpy import ctypes import random from sys import argv from OpenGL.GL import * from OpenGL.GLU import * from OpenGL.GLUT import * global esqdir,cimabaixo global mouseX, mouseY,mouseX_ant, mouseY_ant global distancia global obj esqdir,cimabaixo = 0,0 mouseY,mouseX,mouseX_ant,mouseY_ant = .0,.0,.0,.0 distancia = 20 obj = GLuint() def grade(qtd): glRotate(-90,1,0,0) glPushMatrix() glColor(0,0,0) for i in range(qtd): glutSolidCylinder(0.08,(i+1),10,10) glTranslate(1,0,0) glPopMatrix() glRotate(90,1,0,0) def bancos(qtd): glPushMatrix() glScale(.5,.4,2) for i in range(qtd): glutSolidCube(0.5) glTranslate(0.5,0,0) glPopMatrix() def corrimao(): # CORRIMÃO glPushMatrix() glColor3f(0.3,0.3,0.3) glTranslate(-0.6,3.5,17) glutSolidCylinder(0.02, 3.0, 40, 10) glPopMatrix() glPushMatrix() glColor3f(0.8,0.8,0.8) glTranslate(-0.6,3.4,17) glutSolidCylinder(0.02, 3.0, 40, 10) glPopMatrix() glPushMatrix() glColor3f(0.8,0.8,0.8) glTranslate(-0.6,3.3,17) glutSolidCylinder(0.02, 3.0, 40, 10) glPopMatrix() glPushMatrix() glColor3f(0.3,0.3,0.3) glRotate(90, 1.0, 0.0, 0.0) glTranslate(-0.6,18,-3.5) glutSolidCylinder(0.02, 0.5, 40, 10) glPopMatrix() def desenho(): global obj obj = glGenLists(1) glNewList(obj, GL_COMPILE) # PISO PASSAGEM glPushMatrix() glTranslate(0,1,99.9) glRotate(90,0,1,0) for i in range(1): glScale(1,1,2) bancos(400) glTranslate(0,1,1) glColor3f(3,0,0) # <- Apague o chapéu aqui. glRotate(90,1,0,0) glTranslate(0,3.5,-8) bancos(400) glPopMatrix() glPushMatrix() glTranslate(2,-15,-85) glScale(5,5,5) for i in range(15): corrimao() glTranslate(0,0,1) glPopMatrix() glPushMatrix() glTranslate(0.4,1,100) glRotate(90,0,1,0) for i in range(9): if i % 2 == 0: glColor3f(0.2,0.2,0.2) else: glColor3f(0.8,0.8,0.8) bancos(400) glTranslate(0,1,1) glPopMatrix() for i in range(50): glPushMatrix() grade(10) glRotate(-180,0,1,0) glRotate(-90,0,0,1) glTranslate(-9,-9,0) grade(10) glPopMatrix() glTranslate(0,0,2) glEndList() def executar(): global obj glCallList(obj) def iluminacao_da_cena(): luzAmbiente=[0.2,0.2,0.2,1.0] luzDifusa=[0.7,0.7,0.7,1.0] # ; // "cor" luzEspecular = [1.0, 1.0, 1.0, 1.0] #;// "brilho" posicaoLuz=[25, 50.0, 50.0, 1.0] #Capacidade de brilho do material especularidade=[1.0,1.0,1.0,1.0] especMaterial = 60; # Especifica que a cor de fundo da janela será branca glClearColor(1.0, 1.0, 1.0, 1.0) # Habilita o modelo de colorização de Gouraud glShadeModel(GL_SMOOTH) # Define a refletância do material glMaterialfv(GL_FRONT,GL_SPECULAR, especularidade) # Define a concentração do brilho glMateriali(GL_FRONT,GL_SHININESS,especMaterial) # Ativa o uso da luz ambiente glLightModelfv(GL_LIGHT_MODEL_AMBIENT, luzAmbiente) # Define os parâmetros da luz de número 0 glLightfv(GL_LIGHT0, GL_AMBIENT, luzAmbiente) glLightfv(GL_LIGHT0, GL_DIFFUSE, luzDifusa ) glLightfv(GL_LIGHT0, GL_SPECULAR, luzEspecular ) glLightfv(GL_LIGHT0, GL_POSITION, posicaoLuz ) # Habilita a definição da cor do material a partir da cor corrente glEnable(GL_COLOR_MATERIAL) # Habilita o uso de iluminação glEnable(GL_LIGHTING) # Habilita a luz de número 0 glEnable(GL_LIGHT0) # Habilita o depth-buffering glEnable(GL_DEPTH_TEST) def tela(): glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # Limpar a tela glClearColor(1.0, 1.0, 1.0, 1.0) # Limpa a janela com a cor especificada glMatrixMode(GL_PROJECTION) # Muda a matriz de projeçao glLoadIdentity()# carrega a matriz identidade gluPerspective(distancia,1,0.1,500) # Especifica a projeção perspectiva glMatrixMode(GL_MODELVIEW) # Especifica sistema de coordenadas do modelo glLoadIdentity() # Inicializa sistema de coordenadas do modelo gluLookAt(sin(esqdir) * 10, cimabaixo ,cos(esqdir) * 10, mouseX,mouseY,0, 0,1,0) # Especifica posição do observador e do alvo #iluminacao_da_cena() glEnable(GL_DEPTH_TEST) # verifica os pixels que devem ser plotados no desenho 3d executar() glFlush() # Aplica o desenho def teclado(tecla,x,y): global esqdir global cimabaixo if tecla == b'a': esqdir = esqdir - 0.1 elif tecla == b'd': esqdir = esqdir + 0.1 elif tecla == b'w': cimabaixo = cimabaixo + 0.1 elif tecla == b's': cimabaixo = cimabaixo - 0.1 glutPostRedisplay() def mouse(x,y): global mouseX, mouseY, mouseY_ant, mouseX_ant mouseX = (mouseX - mouseX_ant) * 0.005 mouseY = (mouseY_ant - mouseY) * 0.005 mouseY_ant,mouseX_ant = y,x glutPostRedisplay() def scroll(button,state,x,y): global distancia if(button == 3): distancia += 2 elif(button == 4): distancia -= 4 glutPostRedisplay() glutInit(argv) glutInitDisplayMode(GLUT_RGBA | GLUT_DEPTH) glutInitWindowSize(600,600) glutCreateWindow("Arquibancada") distancia = 20 desenho() glutDisplayFunc(tela) glutMotionFunc(mouse) glutMouseFunc(scroll) glutKeyboardFunc (teclado) glutMainLoop() # Inicia o laço de eventos da GLUT
gpl-2.0
-4,241,487,094,422,690,000
23.876652
129
0.630777
false
2.535698
false
false
false
radio-ho0/rpi_temp_humi
dht11_py/xtemp.py
1
3048
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 """ """ import sys import time import datetime import dht11 import RPi.GPIO as GPIO from PySide import QtGui, QtCore # initialize GPIO GPIO.setwarnings(False) GPIO.setmode(GPIO.BCM) GPIO.cleanup() # read data using pin 14 #instance = dht11.DHT11(pin = 14) instance = dht11.DHT11(pin = 18) class TempWidget(QtGui.QWidget): def __init__(self): super(TempWidget, self).__init__() self.initUi() def initUi(self): lb_title = QtGui.QLabel('temperature viewer', self) lb_title.move(180, 15) self.initBtExit() lb_dht11_hum = QtGui.QLabel('Dht11 hum') self.le_dht11_hum =QtGui.QLineEdit() lb_dht11_temp = QtGui.QLabel('dht11 temp') self.le_dht11_temp = QtGui.QLineEdit() self.lb_18b20_temp = QtGui.QLabel('Ds18b20') self.le_18b20_temp = QtGui.QLineEdit() grid = QtGui.QGridLayout() grid.setSpacing(10) grid.addWidget( lb_dht11_hum, 1, 0, 1, 1) grid.addWidget( self.le_dht11_hum, 1, 1, 1, 1) grid.addWidget( lb_dht11_temp, 2, 0, 1, 1) grid.addWidget( self.le_dht11_temp, 2, 1, 1, 1) grid.addWidget( self.lb_18b20_temp, 3, 0, 1, 1) grid.addWidget( self.le_18b20_temp, 3, 1, 1, 1) self.setLayout(grid) self.le_18b20_temp.setText('18') update_timer = QtCore.QTimer(self) update_timer.timeout.connect(self.get_all_temp) update_timer.start(2000) self.show() def initBtExit(self): btn1 = QtGui.QPushButton('aHa!', self) btn1.setToolTip('Just for play!') btn1.resize( btn1.sizeHint()) btn1.move( 10, 10) btnExit = QtGui.QPushButton('&Exit', self) btnExit.setToolTip('88') btnExit.clicked.connect(QtCore.QCoreApplication.instance().quit) btnExit.move( 380, 320 ) def get_all_temp(self): self.get_dht11() self.get_db18b20() def get_db18b20(self): tempfile = open("/sys/bus/w1/devices/28-031571bf56ff/w1_slave") thetext = tempfile.read() tempfile.close tempdata = thetext.split("\n")[1].split(" ")[9] temperature = float(tempdata[2:]) temperature = temperature / 1000 self.le_18b20_temp.setText( str(temperature) ) print("db18b20: " , temperature) def get_dht11(self): result = instance.read() if result.is_valid(): print("Last valid input: " + str(datetime.datetime.now())) print("Temperature: %d C" % result.temperature) print("Humidity: %d %%" % result.humidity) self.le_dht11_hum.setText(str(result.humidity)) self.le_dht11_temp.setText(str(result.temperature)) def main(): app = QtGui.QApplication(sys.argv) m_widget = TempWidget() m_widget.resize( 480, 360 ) m_widget.setWindowTitle('Temperature viewer!') sys.exit( app.exec_()) if __name__ == '__main__' : main()
gpl-2.0
-5,555,138,411,479,741,000
24.830508
74
0.593504
false
3.184953
false
false
false
jjdmol/LOFAR
LCU/StationTest/prbs_dir_test.py
1
7843
""" script for testing PRBS data in dir. This script can be used for testing data from the TBB, it will be used by TBB test scripts. Started by Gijs, 16 dec 07 Modified by Gijs on March 17 2009: -PRBS test bug fixed, when data is all 0 error did't count. -CRC test on files with RRBS errors. When a PRBS error and no CRC, error in RCU-to-RSP communications, when both has errors, error between RSP-to-TBB communication Modified by Menno on Sept 21 2009: -Removed Samples Checked because sometime 10238 or 10239 """ # INIT import array import operator import os import time import commands # Look for files to test def open_dir() : files = os.listdir('./prbs/.') files.sort() #print files return files # Open de file for testing def open_file(files, file_nr) : file_name = './prbs/' + files[file_nr][:] if files[file_nr][-3:] == 'dat': fileinfo = os.stat(file_name) size = int(fileinfo.st_size) f=open(file_name,'rb') max_frames = size/(88 + 1024*2 + 4) frames_to_proces=max_frames else : frames_to_proces=0 f=open(file_name,'rb') return f, frames_to_proces # Read single frame from file def read_frame(f, info_plot, frame_nr,f_log): station_info = array.array('B') station_info.fromfile(f,4) # Bytes 0..3 time_info = array.array('L') time_info.fromfile(f,3) # Bytes 4..15 if (info_plot) : time_string = time.ctime(time_info[1]) # string_info = 'Frame nr %(FR)d Station %(ST)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz time of data %(ti_D)s and %(SN)00.6f seconds'%\ # {"FR": frame_nr, "ST": station_info[0] ,"RSP": station_info[1], "RCU": station_info[2], "S": station_info[3], "ti_D": time_string,"SN": float(time_info[2])/float(200000000)} string_info = 'Frame nr %(FR)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz'%\ {"FR": frame_nr,"RSP": station_info[1], "RCU": station_info[2], "S": station_info[3]} # print string_info f_log.write(string_info + '\n') div_info = array.array('H') div_info.fromfile(f,36) # Bytes 16..87 # READ DATA SAMPLES data_in = array.array('H') samples = int(div_info[0]) data_in.fromfile(f,samples) data_list = data_in.tolist() data_crc = array.array('l') data_crc.fromfile(f,1) return data_list, time_info[1], time_info[2] # Function for testing PRBS data def PRBS_CHECK(data_list, prev): samples_chk=0 prbs_err=0 for i in range(0,len(data_list)) : if prev == 0x0FFF : prev = data_list[i] & 0x07FF elif data_list[i] == 0xFFFF : prbs_err = prbs_err + 1 elif data_list[i] == data_list[i-1]: cur = data_list[i] samples_chk = samples_chk + 1 prbs_err = prbs_err + 1 prev = data_list[i] & 0x07FF else : cur = data_list[i] & 0x0FFE samples_chk = samples_chk + 1 if cur != 2*prev : prbs_err = prbs_err + 1 # print(str(i) + ' ' + hex(2*prev) + ' ' + hex(cur)) prev = data_list[i] & 0x07FF return samples_chk, prbs_err, prev # Function for testing CRC of header def CRC16_check(buf) : CRC=0 CRC_poly=0x18005 bits=16 data=0 CRCDIV = (CRC_poly & 0x7fffffff) * 32768 # << 15 data = (buf[0] & 0x7fffffff) << 16 len_buf = len(buf) for cnt in range(1,len_buf) : data = data + buf[cnt] for cnt in range(bits) : if data & 0x80000000 : data = data ^ CRCDIV data = data & 0x7fffffff data = data * 2 # << 1 CRC = data >> 16 return CRC # Function for testing CRC of data def CRC32_check(buf) : CRC=0 CRC_poly=0x104C11DB7 # 1 0000 0100 1100 0001 0001 1101 1011 0111 bits=16 data=0 CRCDIV = (CRC_poly & 0x7fffffffffff) * 32768 #<< 15 data = buf[0] data = data & 0x7fffffffffff data = data << 16 data = data + buf[1] data = data & 0x7fffffffffff data = data << 16 len_buf = len(buf) for cnt in range(2,len_buf) : data = data + buf[cnt] for cnt in range(bits) : if data & 0x800000000000 : data = data ^ CRCDIV data = data & 0x7fffffffffff data = data * 2 # << 1 CRC = int(data >> 16) return CRC #Function for testing CRC of complete frame (header and data) def crc_frame(f, info_plot, frame_nr,f_log): CRC_ERROR=0 header = array.array('H') data_in = array.array('H') data_crc = array.array('H') # READING HEADER INFORMATION header.fromfile(f,44) # Bytes 0..88 # remove SEQNR from header, this data is added after CRC calculations header[2]=0 header[3]=0 if CRC16_check(header) : str_info = 'CRC ERROR IN HEADER ' # f_log.write(str_info ) CRC_ERROR=1 Station_id = header[0] & 0xFF RSP_id = header[0] >> 8 RCU_id = header[1] &0xFF Sample_rate = header[1] >> 8 Time = float((header[5] * 65536) + header[4]) Sample_nr = (header[7] * 65536) + header[6] Samples = header[8] if (info_plot) : time_string = time.ctime(Time) # str_info = 'Frame nr %(FR)d Station %(ST)d RSP %(RSP)d RCU %(RCU)d Sample rate %(S)d MHz time of data %(ti_D)s and %(SN)00.6f seconds'%\ # {"FR": frame_nr, "ST": Station_id ,"RSP": RSP_id, "RCU": RCU_id, "S": Sample_rate, "ti_D": time_string,"SN": float(Sample_nr)/float(200000000)} # print string_info # f_log.write(str_info + '\n') del(header) # READ DATA SAMPLES data_in.fromfile(f,1024) data_crc.fromfile(f,2) data_list = data_in.tolist() for cnt in range(len(data_in)): data_in[cnt] = (data_in[cnt] & 0x0FFF) data_in.append(data_crc[1]) data_in.append(data_crc[0]) if CRC32_check(data_in): str_info = 'CRC ERROR IN DATA, ' # f_log.write(str_info ) CRC_ERROR=1 return CRC_ERROR # Main loop def main() : files = open_dir() f_log = file('prbs_dir_test.log', 'w') f_log.write('\n \n PRSB test \n \n') for file_cnt in range(len(files)) : prev = 0x0FFF; samples_chk=0 prbs_err=0 o_ta=0 o_tb=0 (f, frames_to_proces) = open_file(files, file_cnt) if frames_to_proces >0 : for frame_cnt in range(frames_to_proces): data_list, ta, tb = read_frame(f, (frame_cnt==0), frame_cnt, f_log) if (((ta==o_ta) and tb==(o_tb+1024)) or (ta == (o_ta+1))) : # if (tb==(o_tb+1)) : prev = prev else: prev=0x0FFF r_samples_chk, r_prbs_err, prev = PRBS_CHECK(data_list, prev) samples_chk = samples_chk + r_samples_chk prbs_err = prbs_err + r_prbs_err o_ta = ta o_tb = tb # plot results # print 'PRBS errors: ' + str(prbs_err) f_log.write('PRBS errors: ' + str(prbs_err) + '\n') f.close if prbs_err > 0: (f, frames_to_proces) = open_file(files, file_cnt) if frames_to_proces >0 : crc_err=0 for frame_cnt in range(frames_to_proces): crc_err = crc_err + crc_frame(f, (frame_cnt==0), frame_cnt, f_log) # print 'PRBS errors: ' + str(prbs_err) f_log.write('Number of frames with CRC errors: ' + str(crc_err) + '\n') f.close f_log.close if __name__ == "__main__": main()
gpl-3.0
6,226,011,526,394,658,000
32.806034
178
0.536529
false
2.945175
true
false
false
uwosh/uwosh.itpeoplesoftdocs
uwosh/itdocs/content/queryinstructions.py
1
2467
"""Definition of the QueryInstructions content type """ from zope.interface import implements from Products.Archetypes import atapi from Products.ATContentTypes.content import folder from Products.ATContentTypes.content import schemata from uwosh.itdocs import itdocsMessageFactory as _ from uwosh.itdocs.interfaces import IQueryInstructions from uwosh.itdocs.config import PROJECTNAME from Products.ATContentTypes.configuration import zconf QueryInstructionsSchema = folder.ATFolderSchema.copy() + atapi.Schema(( # -*- Your Archetypes field definitions here ... -*- atapi.TextField('operation', storage=atapi.AnnotationStorage(), default_output_type = 'text/x-html-safe', widget=atapi.LabelWidget(label=_(u'Operation'), rows = 25, description=_(u'How to run this query'),), default='', ), atapi.TextField('parameters', storage=atapi.AnnotationStorage(), default_output_type = 'text/x-html-safe', widget=atapi.RichWidget(label=_(u'Parameters'), rows = 25, description=_(u'Parameters to enter for this query'),), default='', ), atapi.TextField('additionalinfo', storage=atapi.AnnotationStorage(), default_output_type = 'text/x-html-safe', widget=atapi.RichWidget(label=_(u'Additional Information'), rows = 25, description=_(u''),), default='Information for the system is in the document: _SystemInfo.doc', ), )) # Set storage on fields copied from ATFolderSchema, making sure # they work well with the python bridge properties. QueryInstructionsSchema['title'].widget.label = _(u'Query Name') #QueryInstructionsSchema['description'].widget.label = _(u'Purpose') QueryInstructionsSchema['description'].widget.description=_(u'Query Function') QueryInstructionsSchema['description'].default='' QueryInstructionsSchema['description'].visible = {'view':'visible', 'edit':'visible'} schemata.finalizeATCTSchema( QueryInstructionsSchema, folderish=True, moveDiscussion=False ) class QueryInstructions(folder.ATFolder): """Instructions for running a PeopleSoft query (UW Oshkosh Administrative Computing)""" implements(IQueryInstructions) meta_type = "QueryInstructions" schema = QueryInstructionsSchema atapi.registerType(QueryInstructions, PROJECTNAME)
gpl-2.0
6,203,758,016,776,983,000
33.746479
95
0.68788
false
4.188455
false
false
false
Jc2k/libcloudcore
libcloudcore/importer.py
1
3816
# Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import imp import sys from .loader import Loader from .models import Model from .driver import Driver from .utils import force_str from . import backend, client class Importer(object): def __init__(self, module_prefix, backend=backend.Driver): self.loader = Loader() self.module_prefix = "{}.".format(module_prefix) self.backend = backend def find_module(self, fullname, path): if fullname.startswith(self.module_prefix): service = fullname[len(self.module_prefix):].replace(".", "/") if self.loader.find(service): return self return self return None def load_module(self, fullname): if fullname in sys.modules: return sys.modules[fullname] service = fullname[len(self.module_prefix):].replace(".", "/") if not self.loader.find(service): raise ImportError("No such module {}".format(fullname)) module = sys.modules[fullname] = imp.new_module(fullname) module.__name__ = fullname module.__loader__ = self module.__path__ = [fullname] if self.loader.is_service(service): module.Client = self.get_client(service) module.Client.__module__ = module module.Driver = module.Client.Driver module.Driver.__module__ = module module.__all__ = ['Client'] module.__package__ = fullname.rpartition('.')[0] elif self.loader.is_namespace(service): module.__package__ = fullname return module def get_driver_method(self, operation): def method(self, *args, **kwargs): return self.driver.call(operation, *args, **kwargs) setattr(method, "__doc__", operation.documentation) setattr(method, "__name__", force_str(operation.name)) return method def get_waiter_method(self, waiter): def method(self, *args, **kwargs): return self.driver.wait(waiter, *args, **kwargs) setattr(method, "__doc__", waiter.documentation) setattr(method, "__name__", force_str(waiter.name)) return method def get_driver(self, service): model = Model(self.loader.load_service(service)) if not model.name: model.name = service bases = (Driver, self.backend) + model.request_pipeline attrs = { 'name': service, 'model': model, } return type("Driver", bases, attrs) def get_client(self, service): driver = self.get_driver(service) model = driver.model attrs = { 'name': service, '__doc__': model.documentation, 'Driver': driver, } for operation in model.get_operations(): attrs[operation.name] = self.get_driver_method(operation) for waiter in model.get_waiters(): attrs[waiter.name] = self.get_waiter_method(waiter) return type("Client", (client.Client, ), attrs)
apache-2.0
-4,060,576,626,925,605,400
33.378378
74
0.622904
false
4.297297
false
false
false
ToAruShiroiNeko/revscoring
revscoring/datasources/parent_revision.py
1
1575
import mwparserfromhell as mwp from deltas.tokenizers import wikitext_split from . import revision from .datasource import Datasource metadata = Datasource("parent_revision.metadata") """ Returns a :class:`~revscoring.datasources.types.RevisionMetadata` for the parent revision. """ text = Datasource("parent_revision.text") """ Returns the text content of the parent revision. """ # ############################### Tokenized ################################### def process_tokens(revision_text): return [t for t in wikitext_split.tokenize(revision_text or '')] tokens = Datasource("parent_revision.tokens", process_tokens, depends_on=[text]) """ Returns a list of tokens. """ # ############################## Parse tree ################################### def process_parse_tree(revision_text): return mwp.parse(revision_text or "") parse_tree = Datasource("parent_revision.parse_tree", process_parse_tree, depends_on=[text]) """ Returns a :class:`mwparserfromhell.wikicode.Wikicode` abstract syntax tree representing the content of the revision. """ content = Datasource("parent_revision.content", revision.process_content, depends_on=[parse_tree]) """ Returns the raw content (no markup or templates) of the revision. """ content_tokens = Datasource("parent_revision.content_tokens", revision.process_content_tokens, depends_on=[content]) """ Returns tokens from the raw content (no markup or templates) of the current revision """
mit
6,807,011,217,652,050,000
28.716981
79
0.633651
false
4.048843
false
false
false
ses4j/THEbot
database_generator.py
1
5097
""" THEbot, a Texas Hold'em poker software library. Copyright (C) 2011 Scott Stafford This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. database_generator.py Execute this script to regenerate the precomputed databases of poker hands and their respective values: pokervals?.shelf for 5, 6, and 7 hands. """ import poker,pickle,sys,shelve,anydbm,time,logging from poker_globals import * global pokerval_cache,pokerval_cachehits,pokerval_cachemisses,weightedcomparehands_cache,weightedcomparehands_cachehits if __name__ == '__main__': logging.basicConfig(level=logging.DEBUG) def _clear_pokerval_cache(): global pokerval_cache,pokerval_cachehits,pokerval_cachemisses,weightedcomparehands_cache,weightedcomparehands_cachehits pokerval_cache={} weightedcomparehands_cache={} pokerval_cachehits=0 pokerval_cachemisses=0 weightedcomparehands_cachehits=0 def calculate_pokerval(_cards): """ Calculate/retrieve a pokerval from a set of 5 or more cards. Also return the 'index' used for db storage. """ global pokerval_cache,pokerval_cachehits,pokerval_cachemisses cards = poker.normalize_cards(_cards) try: index = poker.make_stringindex(cards) try: pokerval = pokerval_cache[index] pokerval_cachehits+=1 return index, pokerval except KeyError: pokerval_cachemisses+=1 pass pokerval = 0 if len(cards) == 5: pokerval = poker.CalculatingHand(cards).getpokerval() elif len(cards) > 5: for fivecards in xuniqueCombinations(cards,5): hand = poker.Hand(fivecards) pokerval = max(pokerval, hand.getpokerval()) else: raise ValueError("Not enough cards!") pokerval_cache[index] = pokerval except KeyError: errstr = "Hand not in database: %s %s, <%s>, %s"%(format_cards(_cards),format_cards(cards),index,reverse_stringindex(index)) raise KeyError(errstr) except: raise return index,pokerval def regenerate_database(): """ Go thru each possible hand and make a new db with the data items. """ deck = [] for val in range(2,15): for suit in range(1,5): deck.append((val,suit)) possiblehands = { 5: (2598960, 160537), 6: (20358520, 1250964), 7: (133784560, 210080), } allCombinations = sum([y[0] for (x,y) in possiblehands.iteritems()]) print """Generating all 5, 6, and 7 card hands. It takes a while (there are %d possible combinations) so find something else to do for a bit. If you kill the process at any time, no problem, you can resume it where it left off just by rerunning this method. Let's begin... """ % allCombinations start_time_all = time.clock() for numcards in range(5, 8): i = 0 _clear_pokerval_cache() start_time = time.clock() db = shelve.open("pokervals"+str(numcards)+".shelf",protocol=2) try: num_computed = db["num_computed"] except KeyError: num_computed = 0 (total, uniqueindices) = possiblehands[numcards] if len(db) != uniqueindices + 1: # +1 cause we store the counter in the database too, for restarting. print "Generating all "+str(total)+" possible "+str(numcards)+" card hands... " for cards in xuniqueCombinations(deck, numcards): i=i+1 # enable skipping ahead if we ran halfway and terminated this process. if i<num_computed: continue (idx,pokerval) = calculate_pokerval(cards) db[idx] = pokerval if i%100000 == 0: now = time.clock() print "%d%% of %d-card hands complete. %d processed, %d unique, %.2fm elapsed (%.2fm total)." % (i*100.0/total, numcards, i, len(db), (now - start_time)/60.0, (now - start_time_all)/60.0) s = format_cards(cards) + ' val: ' print "\tLast Hand: ", s + format_pokerval(pokerval) db["num_computed"] = i print len(db) print "Your %d-card database is complete! It has %d complete hands." % (numcards, len(db)) if __name__ == '__main__': regenerate_database()
gpl-3.0
202,160,966,135,602,700
37.330827
208
0.613694
false
3.803731
false
false
false
jeffFranklin/iam-resttools
resttools/models/irws.py
1
5374
from base64 import b64encode, b64decode from datetime import datetime # IRWS Name class Name(): validid = '' formal_cname = '' formal_fname = '' formal_lname = '' formal_privacy = '' display_cname = '' display_fname = '' display_mname = '' display_lname = '' display_privacy = '' def json_data(self): return {"formal_cname": self.formal_cname, "formal_fname": self.formal_fname, "formal_lname": self.formal_lname, "formal_privacy": self.formal_privacy, "display_cname": self.display_cname, "display_fname": self.display_fname, "display_mname": self.display_mname, "display_lname": self.display_lname, "display_privacy": self.display_privacy, } def __eq__(self, other): return self.uwregid == other.uwregid # IRWS Profile (only recover part for now) class Profile(): validid = '' recover_email = None recover_email_date = None recover_sms = None recover_sms_date = None recover_block_code = None def json_data(self): prof = {} if self.recover_email is not None: prof['recover_email'] = self.recover_email if self.recover_email_date is not None: prof['recover_email_date'] = self.recover_email_date if self.recover_sms is not None: prof['recover_sms'] = self.recover_sms if self.recover_sms_date is not None: prof['recover_sms_date'] = self.recover_sms_date if self.recover_block_code is not None: prof['recover_block_code'] = self.recover_block_code return {'profile': [prof]} def __eq__(self, other): return self.uwregid == other.uwregid # IRWS Person class Person(): regid = '' lname = '' fname = '' identifiers = {} # def __init__(self, *args, **kwargs): # self.identifiers = {} # IRWS UWhr Person class UWhrPerson(): validid = '' regid = '' studentid = '' birthdate = '' fname = '' lname = '' category_code = '' category_name = '' contact_email = '' workday_home_email = '' org_supervisor = '' wp_name = '' wp_department = '' wp_email = [] wp_phone = '' wp_title = '' wp_address = '' wp_publish = False college = '' department = '' home_department = '' mailstop = '' unit = '' emp_ecs_code = '' emp_status_code = '' budget = '' faccode = '' source_code = '' source_name = '' status_code = '' status_name = '' pac = '' in_feed = '' created = '' updated = '' def __eq__(self, other): if other is None: return False return self.regid == other.regid # IRWS Sdb Person class SdbPerson(): validid = '' regid = '' studentid = '' birthdate = '' fname = '' lname = '' category_code = '' category_name = '' college = '' department = '' source_code = '' source_name = '' # status: 1=active, 3=former status_code = '' status_name = '' pac = '' wp_publish = 'Y' in_feed = '' created = '' updated = '' def __eq__(self, other): if other is None: return False return self.regid == other.regid # IRWS Supplemental Person class SupplementalPerson(): validid = '' regid = '' lname = '' category_code = '' category_name = '' comment_code = '' comment_name = '' sponsor_id = '' college = '' source_code = '' source_name = '' status_code = '' status_name = '' in_feed = '' created = '' updated = '' def __eq__(self, other): if other is None: return False return self.regid == other.regid # IRWS GenericPerson class GenericPerson(): validid = '' regid = '' lname = '' fname = '' contact_email = '' category_code = '' source_code = '' # IRWS UWNetId class UWNetId(): uwnetid = '' accid = '' validid = '' uid = '' luid = '' disenfran = '' netid_code = '' netid_name = '' status_code = '' status_name = '' logname = '' created = '' updated = '' def json_data(self): return {"", } def __eq__(self, other): if other is None: return False return self.uwnetid == other.uwnetid # IRWS Regid class Regid(): regid = '' entity_code = '' entity_name = '' status_code = '' status_name = '' created = '' updated = '' def __eq__(self, other): if other is None: return False return self.regid == other.regid # IRWS Subscription class Subscription(): uwnetid = '' subscription_code = '' subscription_name = '' notify_code = '' status_code = '' status_name = '' logname = '' created = '' updated = '' def json_data(self): return {"", } def __eq__(self, other): return self.uwnetid == other.uwnetid # IRWS PAC class Pac(): pac = '' expiration = '' def json_data(self): return {"", } # IRWS QnA class QnA(): uwnetid = '' ordinal = '' question = '' answer = ''
apache-2.0
7,709,327,650,782,617,000
18.613139
64
0.51284
false
3.544855
false
false
false
yushroom/FishEngine
script/gen_enum_to_string.py
1
2907
from mako.template import Template t_str = ''' // enum count template<> constexpr int EnumCount<${T}>() { return ${length}; } // string array static const char* ${T}Strings[] = { ${CStrings} }; // cstring array template<> inline constexpr const char** EnumToCStringArray<${T}>() { return ${T}Strings; } // index to enum template<> inline ${T} ToEnum<${T}>(const int index) { switch (index) { ${IndexToEnumCases} default: abort(); break; } } // enum to index template<> inline int EnumToIndex<${T}>(${T} e) { switch (e) { ${EnumToIndexCases} default: abort(); break; } } // string to enum template<> inline ${T} ToEnum<${T}>(const std::string& s) { ${StringToEnumCases} abort(); } ''' t = Template(t_str) cpp_enum_code = ''' enum class TextureImporterType { Default, // This is the most common setting used for all the textures in general. NormalMap, // Select this to turn the color channels into a format suitable for real - time normal mapping. GUI, // Use this if your texture is going to be used on any HUD / GUI Controls. Sprite, // Select this if you will be using your texture for Sprite graphics. Cursor, // Use this if your texture is going to be used as a cursor. Cookie, // This sets up your texture with the basic parameters used for the Cookies of your lights. Lightmap, // This sets up your texture with the parameters used by the lightmap. SingleChannel, //Use this for texture containing a single channel. }; ''' lines = cpp_enum_code.strip().split('\n'); line1 = lines[0].strip() if line1.endswith('{'): line1 = line1[:-1] enum_name = line1.strip().split()[-1] print(enum_name) enum_elements = [] for line in lines[1:-1]: #print line if line.startswith('{'): continue if "=" in line: var = line.split('=')[0] else: var = line.split(',')[0] enum_elements.append(var.strip()) print(enum_elements) print('') #enum_name = "ShadowCastingMode" #enum_elements = ['Off', 'On', 'TwoSided', 'ShdowsOnly'] index_to_enum_case = "case {0}: return {1}::{2}; break;" enum_to_index_case = "case {1}::{2}: return {0}; break;" string_to_enum_case = 'if (s == "{1}") return {0}::{1};' index_to_enum_cases = '' enum_to_index_cases = '' string_to_enum_cases = '' for i in range(len(enum_elements)): index_to_enum_cases += index_to_enum_case.format(i, enum_name, enum_elements[i]) + '\n\t' enum_to_index_cases += enum_to_index_case.format(i, enum_name, enum_elements[i]) + '\n\t' string_to_enum_cases += string_to_enum_case.format(enum_name, enum_elements[i]) + '\n\t' CStrings = ',\n\t'.join(['"{}"'.format(e) for e in enum_elements]) print t.render(T = enum_name, length = len(enum_elements), CStrings= CStrings, \ IndexToEnumCases = index_to_enum_cases, EnumToIndexCases = enum_to_index_cases, \ StringToEnumCases = string_to_enum_cases)
mit
687,920,250,908,670,300
26.424528
116
0.642931
false
2.978484
false
false
false
McGill-DMaS/Kam1n0-Plugin-IDA-Pro
ida-plugin/Kam1n0/Plugin.py
1
1702
# ******************************************************************************* # * Copyright 2017 McGill University All rights reserved. # * # * Licensed under the Apache License, Version 2.0 (the "License"); # * you may not use this file except in compliance with the License. # * You may obtain a copy of the License at # * # * http://www.apache.org/licenses/LICENSE-2.0 # * # * Unless required by applicable law or agreed to in writing, software # * distributed under the License is distributed on an "AS IS" BASIS, # * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # * See the License for the specific language governing permissions and # * limitations under the License. # *******************************************************************************/ import idaapi import Manager from idaapi import plugin_t class kam1n0_t(plugin_t): flags = idaapi.PLUGIN_UNL comment = "Kam1n0." help = "Kam1n0." wanted_name = "Kam1n0" wanted_hotkey = "" def init(self): global kam1n0_manager # Check if already initialized if not 'kam1n0_manager' in globals(): print("Kam1n0: initializing Kam1n0 IDA-pro plugin ...") kam1n0_manager = Manager.Kam1n0PluginManager() if kam1n0_manager.register_all_actions(): print "Failed to initialize Kam1n0." # kam1n0_manager.removeAllAction() del kam1n0_manager return idaapi.PLUGIN_SKIP else: print("Kam1n0: Completed initialization.") return idaapi.PLUGIN_KEEP def run(self, arg): pass def term(self): pass
apache-2.0
9,062,801,436,616,464,000
31.75
83
0.576968
false
3.833333
false
false
false
ghchinoy/tensorflow
tensorflow/python/framework/ops.py
1
245803
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Classes and functions used to construct graphs.""" # pylint: disable=g-bad-name from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import copy import re import sys import threading import numpy as np import six from six.moves import xrange # pylint: disable=redefined-builtin from tensorflow.core.framework import attr_value_pb2 from tensorflow.core.framework import function_pb2 from tensorflow.core.framework import graph_pb2 from tensorflow.core.framework import node_def_pb2 from tensorflow.core.framework import op_def_pb2 from tensorflow.core.framework import versions_pb2 from tensorflow.core.protobuf import config_pb2 from tensorflow.python import pywrap_tensorflow as c_api from tensorflow.python import tf2 from tensorflow.python.eager import context from tensorflow.python.eager import core from tensorflow.python.eager import monitoring from tensorflow.python.eager import tape from tensorflow.python.framework import c_api_util from tensorflow.python.framework import composite_tensor from tensorflow.python.framework import device as pydev from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors from tensorflow.python.framework import op_def_registry from tensorflow.python.framework import registry from tensorflow.python.framework import tensor_shape from tensorflow.python.framework import traceable_stack from tensorflow.python.framework import versions from tensorflow.python.ops import control_flow_util from tensorflow.python.platform import app from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util import compat from tensorflow.python.util import decorator_utils from tensorflow.python.util import deprecation from tensorflow.python.util import function_utils from tensorflow.python.util import lock_util from tensorflow.python.util import memory from tensorflow.python.util import tf_contextlib from tensorflow.python.util import tf_stack from tensorflow.python.util.deprecation import deprecated_args from tensorflow.python.util.lazy_loader import LazyLoader from tensorflow.python.util.tf_export import tf_export # This is to avoid a circular dependency: ops -> tensor_spec -> ops tensor_spec = LazyLoader( "tensor_spec", globals(), "tensorflow.python.framework.tensor_spec") # Temporary global switches determining if we should enable the work-in-progress # calls to the C API. These will be removed once all functionality is supported. _USE_C_API = True _USE_C_SHAPES = True _api_usage_gauge = monitoring.BoolGauge( "/tensorflow/api/ops_eager_execution", "Whether ops.enable_eager_execution() is called.") def tensor_id(tensor): """Returns a unique identifier for this Tensor.""" return tensor._id # pylint: disable=protected-access class _UserDeviceSpec(object): """Store user-specified device and provide computation of merged device.""" def __init__(self, device_name_or_function): self._device_name_or_function = device_name_or_function self.display_name = str(self._device_name_or_function) self.function = device_name_or_function self.raw_string = None if isinstance(device_name_or_function, pydev.MergeDevice): self.is_null_merge = device_name_or_function.is_null_merge elif callable(device_name_or_function): self.is_null_merge = False dev_func = self._device_name_or_function func_name = function_utils.get_func_name(dev_func) func_code = function_utils.get_func_code(dev_func) if func_code: fname = func_code.co_filename lineno = func_code.co_firstlineno else: fname = "unknown" lineno = -1 self.display_name = "%s<%s, %d>" % (func_name, fname, lineno) elif device_name_or_function is None: # NOTE(taylorrobie): This MUST be False. None signals a break in the # device stack, so `is_null_merge` must be False for such a case to # allow callers to safely skip over null merges without missing a None. self.is_null_merge = False else: self.raw_string = device_name_or_function self.function = pydev.merge_device(device_name_or_function) self.is_null_merge = self.function.is_null_merge # We perform this check in __init__ because it is of non-trivial cost, # and self.string_merge is typically called many times. self.fast_string_merge = isinstance(self.function, pydev.MergeDevice) def string_merge(self, node_def): if self.fast_string_merge: return self.function.shortcut_string_merge(node_def) return compat.as_str(_device_string(self.function(node_def))) class NullContextmanager(object): def __init__(self, *args, **kwargs): pass def __enter__(self): pass def __exit__(self, type_arg, value_arg, traceback_arg): return False # False values do not suppress exceptions def _override_helper(clazz_object, operator, func): """Overrides (string) operator on Tensors to call func. Args: clazz_object: the class to override for; either Tensor or SparseTensor. operator: the string name of the operator to override. func: the function that replaces the overridden operator. Raises: ValueError: If operator has already been overwritten, or if operator is not allowed to be overwritten. """ existing = getattr(clazz_object, operator, None) if existing is not None: # Check to see if this is a default method-wrapper or slot wrapper which # will be true for the comparison operators. if not isinstance(existing, type(object.__lt__)): raise ValueError("operator %s cannot be overwritten again on class %s." % (operator, clazz_object)) if operator not in Tensor.OVERLOADABLE_OPERATORS: raise ValueError("Overriding %s is disallowed" % operator) setattr(clazz_object, operator, func) def _as_graph_element(obj): """Convert `obj` to a graph element if possible, otherwise return `None`. Args: obj: Object to convert. Returns: The result of `obj._as_graph_element()` if that method is available; otherwise `None`. """ conv_fn = getattr(obj, "_as_graph_element", None) if conv_fn and callable(conv_fn): return conv_fn() return None _TENSOR_LIKE_TYPES = tuple() def is_dense_tensor_like(t): """EXPERIMENTAL: Returns true if `t` implements the tensor interface. See `register_dense_tensor_like_type()` for the current definition of a "tensor-like type". Args: t: An object. Returns: True iff `t` is an instance of one of the registered "tensor-like" types. """ return isinstance(t, _TENSOR_LIKE_TYPES) def register_dense_tensor_like_type(tensor_type): """EXPERIMENTAL: Registers `tensor_type` as implementing the tensor interface. A "tensor-like type" can represent a single dense tensor, and implements the `name` and `dtype` properties. Args: tensor_type: A type implementing the tensor interface. Raises: TypeError: If `tensor_type` does not implement the tensor interface. """ try: if not isinstance(tensor_type.name, property): raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `name` property" % tensor_type.__name__) try: if not isinstance(tensor_type.dtype, property): raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) except AttributeError: raise TypeError("Type %s does not define a `dtype` property" % tensor_type.__name__) # We expect this list to be small, so choose quadratic complexity # for registration, so that we have a tuple that can be used for # more efficient `isinstance` checks later. global _TENSOR_LIKE_TYPES _TENSOR_LIKE_TYPES = tuple(list(_TENSOR_LIKE_TYPES) + [tensor_type]) def uid(): """A unique (within this program execution) integer.""" return c_api.TFE_Py_UID() def numpy_text(tensor, is_repr=False): """Human readable representation of a tensor's numpy value.""" if tensor.dtype.is_numpy_compatible: text = repr(tensor.numpy()) if is_repr else str(tensor.numpy()) else: text = "<unprintable>" if "\n" in text: text = "\n" + text return text # NOTE(ebrevdo): Do not subclass this. If you do, I will break you on purpose. class _TensorLike(object): """Internal cls for grouping Tensor, SparseTensor, ..., for is_instance.""" pass @tf_export("Tensor") class Tensor(_TensorLike): """Represents one of the outputs of an `Operation`. A `Tensor` is a symbolic handle to one of the outputs of an `Operation`. It does not hold the values of that operation's output, but instead provides a means of computing those values in a TensorFlow `tf.compat.v1.Session`. This class has two primary purposes: 1. A `Tensor` can be passed as an input to another `Operation`. This builds a dataflow connection between operations, which enables TensorFlow to execute an entire `Graph` that represents a large, multi-step computation. 2. After the graph has been launched in a session, the value of the `Tensor` can be computed by passing it to `tf.Session.run`. `t.eval()` is a shortcut for calling `tf.compat.v1.get_default_session().run(t)`. In the following example, `c`, `d`, and `e` are symbolic `Tensor` objects, whereas `result` is a numpy array that stores a concrete value: ```python # Build a dataflow graph. c = tf.constant([[1.0, 2.0], [3.0, 4.0]]) d = tf.constant([[1.0, 1.0], [0.0, 1.0]]) e = tf.matmul(c, d) # Construct a `Session` to execute the graph. sess = tf.compat.v1.Session() # Execute the graph and store the value that `e` represents in `result`. result = sess.run(e) ``` """ # List of Python operators that we allow to override. OVERLOADABLE_OPERATORS = { # Binary. "__add__", "__radd__", "__sub__", "__rsub__", "__mul__", "__rmul__", "__div__", "__rdiv__", "__truediv__", "__rtruediv__", "__floordiv__", "__rfloordiv__", "__mod__", "__rmod__", "__lt__", "__le__", "__gt__", "__ge__", "__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__", "__getitem__", "__pow__", "__rpow__", # Unary. "__invert__", "__neg__", "__abs__", "__matmul__", "__rmatmul__" } def __init__(self, op, value_index, dtype): """Creates a new `Tensor`. Args: op: An `Operation`. `Operation` that computes this tensor. value_index: An `int`. Index of the operation's endpoint that produces this tensor. dtype: A `DType`. Type of elements stored in this tensor. Raises: TypeError: If the op is not an `Operation`. """ if not isinstance(op, Operation): raise TypeError("op needs to be an Operation: %s" % op) self._op = op self._value_index = value_index self._dtype = dtypes.as_dtype(dtype) # This will be set by self._as_tf_output(). self._tf_output = None # This will be set by self.shape(). self._shape_val = None # List of operations that use this Tensor as input. We maintain this list # to easily navigate a computation graph. self._consumers = [] self._id = uid() self._name = None @property def op(self): """The `Operation` that produces this tensor as an output.""" return self._op @property def dtype(self): """The `DType` of elements in this tensor.""" return self._dtype @property def graph(self): """The `Graph` that contains this tensor.""" return self._op.graph @property def name(self): """The string name of this tensor.""" if self._name is None: if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) self._name = "%s:%d" % (self._op.name, self._value_index) return self._name @property def device(self): """The name of the device on which this tensor will be produced, or None.""" return self._op.device @property def shape(self): """Returns the `TensorShape` that represents the shape of this tensor. The shape is computed using shape inference functions that are registered in the Op for each `Operation`. See `tf.TensorShape` for more details of what a shape represents. The inferred shape of a tensor is used to provide shape information without having to launch the graph in a session. This can be used for debugging, and providing early error messages. For example: ```python c = tf.constant([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]]) print(c.shape) ==> TensorShape([Dimension(2), Dimension(3)]) d = tf.constant([[1.0, 0.0], [0.0, 1.0], [1.0, 0.0], [0.0, 1.0]]) print(d.shape) ==> TensorShape([Dimension(4), Dimension(2)]) # Raises a ValueError, because `c` and `d` do not have compatible # inner dimensions. e = tf.matmul(c, d) f = tf.matmul(c, d, transpose_a=True, transpose_b=True) print(f.shape) ==> TensorShape([Dimension(3), Dimension(4)]) ``` In some cases, the inferred shape may have unknown dimensions. If the caller has additional information about the values of these dimensions, `Tensor.set_shape()` can be used to augment the inferred shape. Returns: A `TensorShape` representing the shape of this tensor. """ if self._shape_val is None: self._shape_val = self._c_api_shape() return self._shape_val def _get_input_ops_without_shapes(self, target_op): """Returns ops needing shape inference to compute target_op's shape.""" result = [] stack = [self._op] visited = set() while stack: op = stack.pop() if op in visited: continue result.append(op) stack.extend(t.op for t in op.inputs if t._shape_val is None) visited.add(op) return result def _c_api_shape(self): """Returns the TensorShape of this tensor according to the C API.""" c_graph = self._op._graph._c_graph # pylint: disable=protected-access shape_vector, unknown_shape = c_api.TF_GraphGetTensorShapeHelper( c_graph, self._as_tf_output()) if unknown_shape: return tensor_shape.unknown_shape() else: shape_vector = [None if d == -1 else d for d in shape_vector] return tensor_shape.TensorShape(shape_vector) @property def _shape(self): logging.warning("Tensor._shape is private, use Tensor.shape " "instead. Tensor._shape will eventually be removed.") return self.shape @_shape.setter def _shape(self, value): raise ValueError( "Tensor._shape cannot be assigned, use Tensor.set_shape instead.") def __iter__(self): if not context.executing_eagerly(): raise TypeError( "Tensor objects are only iterable when eager execution is " "enabled. To iterate over this tensor use tf.map_fn.") shape = self._shape_tuple() if shape is None: raise TypeError("Cannot iterate over a tensor with unknown shape.") if not shape: raise TypeError("Cannot iterate over a scalar tensor.") if shape[0] is None: raise TypeError( "Cannot iterate over a tensor with unknown first dimension.") for i in xrange(shape[0]): yield self[i] def _shape_as_list(self): if self.shape.ndims is not None: return [dim.value for dim in self.shape.dims] else: return None def _shape_tuple(self): shape = self._shape_as_list() if shape is None: return None return tuple(shape) def _rank(self): """Integer rank of this Tensor, if known, else None. Returns: Integer rank or None """ return self.shape.ndims def get_shape(self): """Alias of Tensor.shape.""" return self.shape def set_shape(self, shape): """Updates the shape of this tensor. This method can be called multiple times, and will merge the given `shape` with the current shape of this tensor. It can be used to provide additional information about the shape of this tensor that cannot be inferred from the graph alone. For example, this can be used to provide additional information about the shapes of images: ```python _, image_data = tf.compat.v1.TFRecordReader(...).read(...) image = tf.image.decode_png(image_data, channels=3) # The height and width dimensions of `image` are data dependent, and # cannot be computed without executing the op. print(image.shape) ==> TensorShape([Dimension(None), Dimension(None), Dimension(3)]) # We know that each image in this dataset is 28 x 28 pixels. image.set_shape([28, 28, 3]) print(image.shape) ==> TensorShape([Dimension(28), Dimension(28), Dimension(3)]) ``` NOTE: This shape is not enforced at runtime. Setting incorrect shapes can result in inconsistencies between the statically-known graph and the runtime value of tensors. For runtime validation of the shape, use `tf.ensure_shape` instead. Args: shape: A `TensorShape` representing the shape of this tensor, a `TensorShapeProto`, a list, a tuple, or None. Raises: ValueError: If `shape` is not compatible with the current shape of this tensor. """ # Reset cached shape. self._shape_val = None # We want set_shape to be reflected in the C API graph for when we run it. if not isinstance(shape, tensor_shape.TensorShape): shape = tensor_shape.TensorShape(shape) dim_list = [] if shape.dims is None: unknown_shape = True else: unknown_shape = False for dim in shape.dims: if dim.value is None: dim_list.append(-1) else: dim_list.append(dim.value) try: c_api.TF_GraphSetTensorShape_wrapper( self._op._graph._c_graph, # pylint: disable=protected-access self._as_tf_output(), dim_list, unknown_shape) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) @property def value_index(self): """The index of this tensor in the outputs of its `Operation`.""" return self._value_index def consumers(self): """Returns a list of `Operation`s that consume this tensor. Returns: A list of `Operation`s. """ consumer_names = c_api.TF_OperationOutputConsumers_wrapper( self._as_tf_output()) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(name) for name in consumer_names ] # pylint: enable=protected-access def _as_node_def_input(self): """Return a value to use for the NodeDef "input" attribute. The returned string can be used in a NodeDef "input" attribute to indicate that the NodeDef uses this Tensor as input. Raises: ValueError: if this Tensor's Operation does not have a name. Returns: a string. """ if not self._op.name: raise ValueError("Operation was not named: %s" % self._op) if self._value_index == 0: return self._op.name else: return "%s:%d" % (self._op.name, self._value_index) def _as_tf_output(self): # pylint: disable=protected-access # NOTE: Beyond preventing unnecessary (re-)allocation, the cached object # also guarantees that a dictionary of tf_output objects will retain a # deterministic (yet unsorted) order which prevents memory blowup in the # cache of executor(s) stored for every session. if self._tf_output is None: self._tf_output = c_api_util.tf_output(self.op._c_op, self.value_index) return self._tf_output # pylint: enable=protected-access def __str__(self): return "Tensor(\"%s\"%s%s%s)" % ( self.name, (", shape=%s" % self.get_shape()) if self.get_shape().ndims is not None else "", (", dtype=%s" % self._dtype.name) if self._dtype else "", (", device=%s" % self.device) if self.device else "") def __repr__(self): return "<tf.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.get_shape(), self._dtype.name) def __hash__(self): # Necessary to support Python's collection membership operators return id(self) def __eq__(self, other): # Necessary to support Python's collection membership operators # NOTE(taylorrobie): equivalent to: id(self) == id(other) return self is other def __copy__(self): # TODO(b/77597810): get rid of Tensor copies. cls = self.__class__ result = cls.__new__(cls) result.__dict__.update(self.__dict__) return result # NOTE(mrry): This enables the Tensor's overloaded "right" binary # operators to run when the left operand is an ndarray, because it # accords the Tensor class higher priority than an ndarray, or a # numpy matrix. # TODO(mrry): Convert this to using numpy's __numpy_ufunc__ # mechanism, which allows more control over how Tensors interact # with ndarrays. __array_priority__ = 100 @staticmethod def _override_operator(operator, func): _override_helper(Tensor, operator, func) def __bool__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This overload raises a `TypeError` when the user inadvertently treats a `Tensor` as a boolean (e.g. in an `if` statement). For example: ```python if tf.constant(True): # Will raise. # ... if tf.constant(5) < tf.constant(7): # Will raise. # ... ``` This disallows ambiguities between testing the Python value vs testing the dynamic condition of the `Tensor`. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def __nonzero__(self): """Dummy method to prevent a tensor from being used as a Python `bool`. This is the Python 2.x counterpart to `__bool__()` above. Raises: `TypeError`. """ raise TypeError("Using a `tf.Tensor` as a Python `bool` is not allowed. " "Use `if t is not None:` instead of `if t:` to test if a " "tensor is defined, and use TensorFlow ops such as " "tf.cond to execute subgraphs conditioned on the value of " "a tensor.") def eval(self, feed_dict=None, session=None): """Evaluates this tensor in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for the operation that produces this tensor. *N.B.* Before invoking `Tensor.eval()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See `tf.Session.run` for a description of the valid feed values. session: (Optional.) The `Session` to be used to evaluate this tensor. If none, the default session will be used. Returns: A numpy array corresponding to the value of this tensor. """ return _eval_using_default_session(self, feed_dict, self.graph, session) # TODO(agarwal): consider getting rid of this. class _EagerTensorBase(Tensor): """Base class for EagerTensor.""" @property def dtype(self): # Note: using the intern table directly here as this is # performance-sensitive in some models. return dtypes._INTERN_TABLE[self._datatype_enum()] # pylint: disable=protected-access def numpy(self): """Returns a numpy array or a scalar with the same contents as the Tensor. TODO(ashankar,agarwal): Perhaps this should NOT reference the underlying buffer but instead always explicitly copy? Note that currently it may or may not copy based on whether the numpy data is properly aligned or not. Returns: A numpy array or a scalar. Numpy array may share memory with the Tensor object. Any changes to one may be reflected in the other. A scalar value is returned when self has rank 0. Raises: ValueError: if the type of this Tensor is not representable in numpy. """ if self.dtype == dtypes.resource: raise ValueError("Resource handles are not convertible to numpy.") maybe_arr = self._cpu_nograd()._numpy() # pylint: disable=protected-access return maybe_arr.copy() if isinstance(maybe_arr, np.ndarray) else maybe_arr # __int__, __float__ and __index__ may copy the tensor to CPU and # only work for scalars; values are cast as per numpy. # TODO(slebedev): avoid redundant copy in all of the following methods. def __int__(self): return int(self.numpy()) def __long__(self): return long(self.numpy()) def __float__(self): return float(self.numpy()) def __index__(self): maybe_arr = self.numpy() if isinstance(maybe_arr, np.ndarray): return maybe_arr.__index__() return int(maybe_arr) # Must be a NumPy scalar. def __array__(self, dtype=None): # This is only called if the buffer interface conversion failed. # Remove once numpy/numpy#13507 is merged and released or py_function # creates EagerTensors with a non-nullptr context. return np.asarray(self.numpy(), dtype=dtype) def __format__(self, format_spec): return self.numpy().__format__(format_spec) def __reduce__(self): return (convert_to_tensor, (self.numpy(),)) def _numpy(self): raise NotImplementedError() @property def backing_device(self): """Returns the name of the device holding this tensor's memory. `.backing_device` is usually the same as `.device`, which returns the device on which the kernel of the operation that produced this tensor ran. However, some operations can produce tensors on a different device (e.g., an operation that executes on the GPU but produces output tensors in host memory). """ raise NotImplementedError() def __copy__(self): # Eager Tensors are immutable so it's safe to return themselves as a copy. return self def __deepcopy__(self, memo): # Eager Tensors are immutable so it's safe to return themselves as a copy. del memo return self def _datatype_enum(self): raise NotImplementedError() def _shape_tuple(self): """The shape of this Tensor, as a tuple. This is more performant than tuple(shape().as_list()) as it avoids two list and one object creation. Marked private for now as from an API perspective, it would be better to have a single performant way of getting a shape rather than exposing shape() and shape_tuple() (and heaven forbid, shape_list() etc. as well!). Punting on that for now, but ideally one would work things out and remove the need for this method. Returns: tuple with the shape. """ raise NotImplementedError() def _rank(self): """Integer rank of this Tensor. Unlike regular Tensors, the rank is always known for EagerTensors. This is more performant than len(self._shape_tuple()) Returns: Integer rank """ raise NotImplementedError() def _num_elements(self): """Number of elements of this Tensor. Unlike regular Tensors, the number of elements is always known for EagerTensors. This is more performant than tensor.shape.num_elements Returns: Long - num elements in the tensor """ raise NotImplementedError() def _copy_to_device(self, context, device): # pylint: disable=redefined-outer-name raise NotImplementedError() def __str__(self): return "tf.Tensor(%s, shape=%s, dtype=%s)" % (numpy_text(self), self.shape, self.dtype.name) def __repr__(self): return "<tf.Tensor: id=%s, shape=%s, dtype=%s, numpy=%s>" % ( self._id, self.shape, self.dtype.name, numpy_text(self, is_repr=True)) @staticmethod def _override_operator(name, func): setattr(_EagerTensorBase, name, func) def _copy_nograd(self, ctx=None, device_name=None): """Copies tensor to dest device, but doesn't record the operation.""" # pylint: disable=protected-access # Creates a new tensor on the dest device. if ctx is None: ctx = context.context() if device_name is None: device_name = ctx.device_name # pylint: disable=protected-access try: ctx.ensure_initialized() new_tensor = self._copy_to_device(context=ctx._handle, device=device_name) except core._NotOkStatusException as e: six.raise_from(core._status_to_exception(e.code, e.message), None) return new_tensor def _copy(self, ctx=None, device_name=None): """Copies tensor to dest device.""" new_tensor = self._copy_nograd(ctx, device_name) # Record the copy on tape and define backprop copy as well. if context.executing_eagerly(): self_device = self.device def grad_fun(dresult): return [ dresult._copy(device_name=self_device) if hasattr(dresult, "_copy") else dresult ] tape.record_operation("_copy", [new_tensor], [self], grad_fun) return new_tensor # pylint: enable=protected-access @property def shape(self): if self._tensor_shape is None: # pylint: disable=access-member-before-definition # `_tensor_shape` is declared and defined in the definition of # `EagerTensor`, in C. self._tensor_shape = tensor_shape.TensorShape(self._shape_tuple()) return self._tensor_shape def get_shape(self): """Alias of Tensor.shape.""" return self.shape def _shape_as_list(self): """The shape of the tensor as a list.""" return list(self._shape_tuple()) @property def ndim(self): """Returns the number of Tensor dimensions.""" return self.shape.ndims def __len__(self): """Returns the length of the first dimension in the Tensor.""" if not self.shape.ndims: raise TypeError("Scalar tensor has no `len()`") return self._shape_tuple()[0] def _cpu_nograd(self): """A copy of this Tensor with contents backed by host memory. The copy cannot be differentiated through. Returns: A CPU-memory backed Tensor object with the same contents as this Tensor. """ return self._copy_nograd(context.context(), "CPU:0") def cpu(self): """A copy of this Tensor with contents backed by host memory.""" return self._copy(context.context(), "CPU:0") def gpu(self, gpu_index=0): """A copy of this Tensor with contents backed by memory on the GPU. Arguments: gpu_index: Identifies which GPU to place the contents on the returned Tensor in. Returns: A GPU-memory backed Tensor object initialized with the same contents as this Tensor. """ return self._copy(context.context(), "GPU:" + str(gpu_index)) def __bool__(self): return bool(self.numpy()) def __nonzero__(self): return self.__bool__() def set_shape(self, shape): if not self.shape.is_compatible_with(shape): raise ValueError( "Tensor's shape %s is not compatible with supplied shape %s" % (self.shape, shape)) # Methods not supported / implemented for Eager Tensors. @property def op(self): raise AttributeError( "Tensor.op is meaningless when eager execution is enabled.") @property def graph(self): raise AttributeError( "Tensor.graph is meaningless when eager execution is enabled.") @property def name(self): raise AttributeError( "Tensor.name is meaningless when eager execution is enabled.") @property def value_index(self): raise AttributeError( "Tensor.value_index is meaningless when eager execution is enabled.") def consumers(self): raise NotImplementedError( "Tensor.consumers is meaningless when eager execution is enabled.") def _add_consumer(self, consumer): raise NotImplementedError( "_add_consumer not supported when eager execution is enabled.") def _as_node_def_input(self): raise NotImplementedError( "_as_node_def_input not supported when eager execution is enabled.") def _as_tf_output(self): raise NotImplementedError( "_as_tf_output not supported when eager execution is enabled.") def eval(self, feed_dict=None, session=None): raise NotImplementedError( "eval is not supported when eager execution is enabled, " "is .numpy() what you're looking for?") # This call creates an EagerTensor class, as a subclass of _EagerTensorBase, and # registers it with the current module. EagerTensor = c_api.TFE_Py_InitEagerTensor(_EagerTensorBase) def _TensorTensorConversionFunction(t, dtype=None, name=None, as_ref=False): _ = name, as_ref if dtype and not dtype.is_compatible_with(t.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtype.name, t.dtype.name, str(t))) return t _tensor_conversion_func_registry = { 0: [(Tensor, _TensorTensorConversionFunction)] } _tensor_conversion_func_cache = {} _tensor_conversion_func_lock = threading.Lock() register_dense_tensor_like_type(Tensor) @tf_export(v1=["convert_to_tensor"]) def convert_to_tensor(value, dtype=None, name=None, preferred_dtype=None, dtype_hint=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. name: Optional name to use if a new `Tensor` is created. preferred_dtype: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. dtype_hint: same meaning as preferred_dtype, and overrides it. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. """ preferred_dtype = deprecation.deprecated_argument_lookup( "dtype_hint", dtype_hint, "preferred_dtype", preferred_dtype) return convert_to_tensor_v2(value, dtype, preferred_dtype, name) @tf_export("convert_to_tensor", v1=[]) def convert_to_tensor_v2(value, dtype=None, dtype_hint=None, name=None): """Converts the given `value` to a `Tensor`. This function converts Python objects of various types to `Tensor` objects. It accepts `Tensor` objects, numpy arrays, Python lists, and Python scalars. For example: ```python import numpy as np def my_func(arg): arg = tf.convert_to_tensor(arg, dtype=tf.float32) return tf.matmul(arg, arg) + arg # The following calls are equivalent. value_1 = my_func(tf.constant([[1.0, 2.0], [3.0, 4.0]])) value_2 = my_func([[1.0, 2.0], [3.0, 4.0]]) value_3 = my_func(np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)) ``` This function can be useful when composing a new operation in Python (such as `my_func` in the example above). All standard Python op constructors apply this function to each of their Tensor-valued inputs, which allows those ops to accept numpy arrays, Python lists, and scalars in addition to `Tensor` objects. Note: This function diverges from default Numpy behavior for `float` and `string` types when `None` is present in a Python list or scalar. Rather than silently converting `None` values, an error will be thrown. Args: value: An object whose type has a registered `Tensor` conversion function. dtype: Optional element type for the returned tensor. If missing, the type is inferred from the type of `value`. dtype_hint: Optional element type for the returned tensor, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so dtype_hint can be used as a soft preference. If the conversion to `dtype_hint` is not possible, this argument has no effect. name: Optional name to use if a new `Tensor` is created. Returns: A `Tensor` based on `value`. Raises: TypeError: If no conversion function is registered for `value` to `dtype`. RuntimeError: If a registered conversion function returns an invalid value. ValueError: If the `value` is a tensor not of given `dtype` in graph mode. """ return internal_convert_to_tensor( value=value, dtype=dtype, name=name, preferred_dtype=dtype_hint, as_ref=False) def _error_prefix(name): return "" if name is None else "%s: " % name def internal_convert_to_tensor(value, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None, accept_symbolic_tensors=True, accept_composite_tensors=False): """Implementation of the public convert_to_tensor.""" if ctx is None: ctx = context.context() if isinstance(value, EagerTensor): if ctx.executing_eagerly(): if dtype is not None: dtype = dtypes.as_dtype(dtype) value = _TensorTensorConversionFunction(value, dtype=dtype) return value else: graph = get_default_graph() if not graph.building_function: raise RuntimeError("Attempting to capture an EagerTensor without " "building a function.") return graph.capture(value, name=name) elif ((not accept_symbolic_tensors) and isinstance(value, Tensor) and ctx.executing_eagerly()): # Found a symbolic tensor in an eager context. # This happens when we use the Keras functional API (i.e. calling layers # on the output of `keras.Input()`, which is symbolic) while eager # execution is enabled. if _is_keras_symbolic_tensor(value): # If the graph of the tensor isn't the Keras graph, we should still # fail, for the time being. TODO(fchollet): consider allowing # all symbolic tensors to raise this exception in this case. raise core._SymbolicException( # pylint: disable=protected-access "Using the symbolic output of a Keras layer during eager execution.") if dtype is not None: dtype = dtypes.as_dtype(dtype) unwrapped_type = type(value) conversion_func_list = _tensor_conversion_func_cache.get(unwrapped_type, None) if conversion_func_list is None: with _tensor_conversion_func_lock: conversion_func_list = [] for _, funcs_at_priority in sorted( _tensor_conversion_func_registry.items()): for base_type, conversion_func in funcs_at_priority: if isinstance(value, base_type): conversion_func_list.append((base_type, conversion_func)) _tensor_conversion_func_cache[unwrapped_type] = conversion_func_list for base_type, conversion_func in conversion_func_list: # If dtype is None but preferred_dtype is not None, we try to # cast to preferred_dtype first. ret = None if dtype is None and preferred_dtype is not None: try: ret = conversion_func( value, dtype=preferred_dtype, name=name, as_ref=as_ref) except (TypeError, ValueError, errors.UnimplementedError, errors.InvalidArgumentError): # Could not coerce the conversion to use the preferred dtype. ret = None if ret is not None and ret is not NotImplemented: if (ret.dtype.base_dtype != dtypes.as_dtype(preferred_dtype).base_dtype): raise TypeError("convert_to_tensor did not convert to " "the preferred dtype: %s vs %s " % (ret.dtype.base_dtype, dtypes.as_dtype(preferred_dtype).base_dtype)) if ret is None: ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) if ret is NotImplemented: continue is_acceptable_type = ( isinstance(ret, Tensor) or (accept_composite_tensors and isinstance(ret, composite_tensor.CompositeTensor))) if not is_acceptable_type: raise RuntimeError( "%sConversion function %r for type %s returned non-Tensor: %r" % (_error_prefix(name), conversion_func, base_type, ret)) if dtype and not dtype.is_compatible_with(ret.dtype): raise RuntimeError( "%sConversion function %r for type %s returned incompatible " "dtype: requested = %s, actual = %s" % (_error_prefix(name), conversion_func, base_type, dtype.name, ret.dtype.name)) return ret raise TypeError("%sCannot convert %r with type %s to Tensor: " "no conversion function registered." % (_error_prefix(name), value, unwrapped_type)) def internal_convert_n_to_tensor(values, dtype=None, name=None, as_ref=False, preferred_dtype=None, ctx=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. ctx: The value of context.context(). Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a sequence.") ret = [] if ctx is None: ctx = context.context() for i, value in enumerate(values): n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor( value, dtype=dtype, name=n, as_ref=as_ref, preferred_dtype=preferred_dtype, ctx=ctx)) return ret def convert_n_to_tensor(values, dtype=None, name=None, preferred_dtype=None): """Converts `values` to a list of `Tensor` objects. Args: values: A list of objects that can be consumed by `tf.convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` objects. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. preferred_dtype: Optional element type for the returned tensors, used when dtype is None. In some cases, a caller may not have a dtype in mind when converting to a tensor, so preferred_dtype can be used as a soft preference. If the conversion to `preferred_dtype` is not possible, this argument has no effect. Returns: A list of `Tensor` and/or `IndexedSlices` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor( values=values, dtype=dtype, name=name, preferred_dtype=preferred_dtype, as_ref=False) @tf_export(v1=["convert_to_tensor_or_indexed_slices"]) def convert_to_tensor_or_indexed_slices(value, dtype=None, name=None): """Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_indexed_slices( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_indexed_slices(value, dtype=None, name=None, as_ref=False): """Converts the given object to a `Tensor` or an `IndexedSlices`. If `value` is an `IndexedSlices` or `SparseTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: An `IndexedSlices`, `SparseTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: A `Tensor`, `IndexedSlices`, or `SparseTensor` based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, EagerTensor) and not context.executing_eagerly(): return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref) elif isinstance(value, _TensorLike): if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value.dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref) def internal_convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `IndexedSlices`, `SparseTensor` and/or `None` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a sequence.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_indexed_slices( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_indexed_slices(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `IndexedSlices` objects. Any `IndexedSlices` or `SparseTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `IndexedSlices`, `SparseTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` `IndexedSlices`. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor`, `IndexedSlices`, and/or `SparseTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_indexed_slices( values=values, dtype=dtype, name=name, as_ref=False) def convert_to_tensor_or_composite(value, dtype=None, name=None): """Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor` or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ return internal_convert_to_tensor_or_composite( value=value, dtype=dtype, name=name, as_ref=False) def internal_convert_to_tensor_or_composite(value, dtype=None, name=None, as_ref=False): """Converts the given object to a `Tensor` or `CompositeTensor`. If `value` is a `CompositeTensor` it is returned unmodified. Otherwise, it is converted to a `Tensor` using `convert_to_tensor()`. Args: value: A `CompositeTensor`, or an object that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor` or `CompositeTensor`. name: (Optional.) A name to use if a new `Tensor` is created. as_ref: True if the caller wants the results as ref tensors. Returns: A `Tensor` or `CompositeTensor`, based on `value`. Raises: ValueError: If `dtype` does not match the element type of `value`. """ if isinstance(value, composite_tensor.CompositeTensor): value_dtype = getattr(value, "dtype", None) if dtype and not dtypes.as_dtype(dtype).is_compatible_with(value_dtype): raise ValueError( "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" % (dtypes.as_dtype(dtype).name, value.dtype.name, str(value))) return value else: return internal_convert_to_tensor( value, dtype=dtype, name=name, as_ref=as_ref, accept_composite_tensors=True) def internal_convert_n_to_tensor_or_composite(values, dtype=None, name=None, as_ref=False): """Converts `values` to a list of `Tensor` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor`, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. as_ref: True if the caller wants the results as ref tensors. Returns: A list of `Tensor`, `CompositeTensor`, and/or `None` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ if not isinstance(values, collections.Sequence): raise TypeError("values must be a sequence.") ret = [] for i, value in enumerate(values): if value is None: ret.append(value) else: n = None if name is None else "%s_%d" % (name, i) ret.append( internal_convert_to_tensor_or_composite( value, dtype=dtype, name=n, as_ref=as_ref)) return ret def convert_n_to_tensor_or_composite(values, dtype=None, name=None): """Converts `values` to a list of `Output` or `CompositeTensor` objects. Any `CompositeTensor` objects in `values` are returned unmodified. Args: values: A list of `None`, `CompositeTensor``, or objects that can be consumed by `convert_to_tensor()`. dtype: (Optional.) The required `DType` of the returned `Tensor`s or `CompositeTensor`s. name: (Optional.) A name prefix to used when a new `Tensor` is created, in which case element `i` will be given the name `name + '_' + i`. Returns: A list of `Tensor` and/or `CompositeTensor` objects. Raises: TypeError: If no conversion function is registered for an element in `values`. RuntimeError: If a registered conversion function returns an invalid value. """ return internal_convert_n_to_tensor_or_composite( values=values, dtype=dtype, name=name, as_ref=False) # TODO(josh11b): Add ctx argument to conversion_func() signature. @tf_export("register_tensor_conversion_function") def register_tensor_conversion_function(base_type, conversion_func, priority=100): """Registers a function for converting objects of `base_type` to `Tensor`. The conversion function must have the following signature: ```python def conversion_func(value, dtype=None, name=None, as_ref=False): # ... ``` It must return a `Tensor` with the given `dtype` if specified. If the conversion function creates a new `Tensor`, it should use the given `name` if specified. All exceptions will be propagated to the caller. The conversion function may return `NotImplemented` for some inputs. In this case, the conversion process will continue to try subsequent conversion functions. If `as_ref` is true, the function must return a `Tensor` reference, such as a `Variable`. NOTE: The conversion functions will execute in order of priority, followed by order of registration. To ensure that a conversion function `F` runs before another conversion function `G`, ensure that `F` is registered with a smaller priority than `G`. Args: base_type: The base type or tuple of base types for all objects that `conversion_func` accepts. conversion_func: A function that converts instances of `base_type` to `Tensor`. priority: Optional integer that indicates the priority for applying this conversion function. Conversion functions with smaller priority values run earlier than conversion functions with larger priority values. Defaults to 100. Raises: TypeError: If the arguments do not have the appropriate type. """ global _tensor_conversion_func_cache with _tensor_conversion_func_lock: if not (isinstance(base_type, type) or (isinstance(base_type, tuple) and all(isinstance(x, type) for x in base_type))): raise TypeError("base_type must be a type or a tuple of types.") if not callable(conversion_func): raise TypeError("conversion_func must be callable.") # context._context is checked so that we don't inadvertently create it. # This is because enable_eager_execution will fail when called from the main # function if the context._context is already created, and the # register_tensor_conversion_function calls happen when the module is # imported. if context._context is not None and context.executing_eagerly( ) and isinstance(base_type, six.integer_types + ( float, np.ndarray, )): # TODO(nareshmodi): consider setting a context variable which disables the # fastpath instead. raise TypeError( "Cannot register conversions for numpy arrays, python number types " "when executing eagerly.") try: funcs_at_priority = _tensor_conversion_func_registry[priority] except KeyError: funcs_at_priority = [] _tensor_conversion_func_registry[priority] = funcs_at_priority funcs_at_priority.append((base_type, conversion_func)) _tensor_conversion_func_cache = {} @tf_export("IndexedSlices") class IndexedSlices(_TensorLike, composite_tensor.CompositeTensor): """A sparse representation of a set of tensor slices at given indices. This class is a simple wrapper for a pair of `Tensor` objects: * `values`: A `Tensor` of any dtype with shape `[D0, D1, ..., Dn]`. * `indices`: A 1-D integer `Tensor` with shape `[D0]`. An `IndexedSlices` is typically used to represent a subset of a larger tensor `dense` of shape `[LARGE0, D1, .. , DN]` where `LARGE0 >> D0`. The values in `indices` are the indices in the first dimension of the slices that have been extracted from the larger tensor. The dense tensor `dense` represented by an `IndexedSlices` `slices` has ```python dense[slices.indices[i], :, :, :, ...] = slices.values[i, :, :, :, ...] ``` The `IndexedSlices` class is used principally in the definition of gradients for operations that have sparse gradients (e.g. `tf.gather`). Contrast this representation with `tf.SparseTensor`, which uses multi-dimensional indices and scalar values. """ def __init__(self, values, indices, dense_shape=None): """Creates an `IndexedSlices`.""" if not isinstance(values, tensor_spec.TensorSpec): _get_graph_from_inputs([values, indices, dense_shape]) self._values = values self._indices = indices self._dense_shape = dense_shape @property def values(self): """A `Tensor` containing the values of the slices.""" return self._values @property def indices(self): """A 1-D `Tensor` containing the indices of the slices.""" return self._indices @property def dense_shape(self): """A 1-D `Tensor` containing the shape of the corresponding dense tensor.""" return self._dense_shape @property def name(self): """The name of this `IndexedSlices`.""" return self.values.name @property def device(self): """The name of the device on which `values` will be produced, or `None`.""" return self.values.device @property def op(self): """The `Operation` that produces `values` as an output.""" return self.values.op @property def dtype(self): """The `DType` of elements in this tensor.""" return self.values.dtype @property def graph(self): """The `Graph` that contains the values, indices, and shape tensors.""" return self._values.graph def __str__(self): return "IndexedSlices(indices=%s, values=%s%s)" % ( self._indices, self._values, (", dense_shape=%s" % self._dense_shape) if self._dense_shape is not None else "") def __neg__(self): return IndexedSlices(-self.values, self.indices, self.dense_shape) def _to_components(self): if self._dense_shape is None: return (self._values, self._indices) else: return (self._values, self._indices, self._dense_shape) @classmethod def _from_components(cls, components, metadata): return cls(*components) def _shape_invariant_to_components(self, shape=None): if shape is None: shape = self._values.shape if self._dense_shape is None: return (shape, shape[:1]) # values, indices else: # values, indices, dense_shape return (shape, shape[:1], tensor_shape.TensorShape([shape.ndims])) @property def _is_graph_tensor(self): return hasattr(self._values, "graph") def consumers(self): return self._consumers() IndexedSlicesValue = collections.namedtuple( "IndexedSlicesValue", ["values", "indices", "dense_shape"]) def _device_string(dev_spec): if pydev.is_device_spec(dev_spec): return dev_spec.to_string() else: return dev_spec def _NodeDef(op_type, name, device=None, attrs=None): # pylint: disable=redefined-outer-name """Create a NodeDef proto. Args: op_type: Value for the "op" attribute of the NodeDef proto. name: Value for the "name" attribute of the NodeDef proto. device: string, device, or function from NodeDef to string. Value for the "device" attribute of the NodeDef proto. attrs: Optional dictionary where the key is the attribute name (a string) and the value is the respective "attr" attribute of the NodeDef proto (an AttrValue). Returns: A node_def_pb2.NodeDef protocol buffer. """ node_def = node_def_pb2.NodeDef() node_def.op = compat.as_bytes(op_type) node_def.name = compat.as_bytes(name) if attrs is not None: for k, v in six.iteritems(attrs): node_def.attr[k].CopyFrom(v) if device is not None: if callable(device): node_def.device = device(node_def) else: node_def.device = _device_string(device) return node_def # Copied from core/framework/node_def_util.cc # TODO(mrry,josh11b): Consolidate this validation in C++ code. _VALID_OP_NAME_REGEX = re.compile("^[A-Za-z0-9.][A-Za-z0-9_.\\-/]*$") _VALID_SCOPE_NAME_REGEX = re.compile("^[A-Za-z0-9_.\\-/]*$") def _create_c_op(graph, node_def, inputs, control_inputs): """Creates a TF_Operation. Args: graph: a `Graph`. node_def: `node_def_pb2.NodeDef` for the operation to create. inputs: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs, e.g. "int64 * N", "list(int64)"). The length of the list should be equal to the number of inputs specified by this operation's op def. control_inputs: A list of `Operation`s to set as control dependencies. Returns: A wrapped TF_Operation*. """ # pylint: disable=protected-access op_desc = c_api.TF_NewOperation(graph._c_graph, compat.as_str(node_def.op), compat.as_str(node_def.name)) if node_def.device: c_api.TF_SetDevice(op_desc, compat.as_str(node_def.device)) # Add inputs for op_input in inputs: if isinstance(op_input, (list, tuple)): c_api.TF_AddInputList(op_desc, [t._as_tf_output() for t in op_input]) else: c_api.TF_AddInput(op_desc, op_input._as_tf_output()) # Add control inputs for control_input in control_inputs: c_api.TF_AddControlInput(op_desc, control_input._c_op) # pylint: enable=protected-access # Add attrs for name, attr_value in node_def.attr.items(): serialized = attr_value.SerializeToString() # TODO(skyewm): this creates and deletes a new TF_Status for every attr. # It might be worth creating a convenient way to re-use the same status. c_api.TF_SetAttrValueProto(op_desc, compat.as_str(name), serialized) try: c_op = c_api.TF_FinishOperation(op_desc) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) return c_op @tf_export("Operation") class Operation(object): """Represents a graph node that performs computation on tensors. An `Operation` is a node in a TensorFlow `Graph` that takes zero or more `Tensor` objects as input, and produces zero or more `Tensor` objects as output. Objects of type `Operation` are created by calling a Python op constructor (such as `tf.matmul`) or `tf.Graph.create_op`. For example `c = tf.matmul(a, b)` creates an `Operation` of type "MatMul" that takes tensors `a` and `b` as input, and produces `c` as output. After the graph has been launched in a session, an `Operation` can be executed by passing it to `tf.Session.run`. `op.run()` is a shortcut for calling `tf.compat.v1.get_default_session().run(op)`. """ def __init__(self, node_def, g, inputs=None, output_types=None, control_inputs=None, input_types=None, original_op=None, op_def=None): r"""Creates an `Operation`. NOTE: This constructor validates the name of the `Operation` (passed as `node_def.name`). Valid `Operation` names match the following regular expression: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* Args: node_def: `node_def_pb2.NodeDef`. `NodeDef` for the `Operation`. Used for attributes of `node_def_pb2.NodeDef`, typically `name`, `op`, and `device`. The `input` attribute is irrelevant here as it will be computed when generating the model. g: `Graph`. The parent graph. inputs: list of `Tensor` objects. The inputs to this `Operation`. output_types: list of `DType` objects. List of the types of the `Tensors` computed by this operation. The length of this list indicates the number of output endpoints of the `Operation`. control_inputs: list of operations or tensors from which to have a control dependency. input_types: List of `DType` objects representing the types of the tensors accepted by the `Operation`. By default uses `[x.dtype.base_dtype for x in inputs]`. Operations that expect reference-typed inputs must specify these explicitly. original_op: Optional. Used to associate the new `Operation` with an existing `Operation` (for example, a replica with the op that was replicated). op_def: Optional. The `op_def_pb2.OpDef` proto that describes the op type that this `Operation` represents. Raises: TypeError: if control inputs are not Operations or Tensors, or if `node_def` is not a `NodeDef`, or if `g` is not a `Graph`, or if `inputs` are not tensors, or if `inputs` and `input_types` are incompatible. ValueError: if the `node_def` name is not valid. """ # For internal use only: `node_def` can be set to a TF_Operation to create # an Operation for that op. This is useful for creating Operations for ops # indirectly created by C API methods, e.g. the ops created by # TF_ImportGraphDef. When `node_def` is a TF_Operation, all optional fields # should be None. if isinstance(node_def, node_def_pb2.NodeDef): if node_def.ByteSize() >= (1 << 31) or node_def.ByteSize() < 0: raise ValueError( "Cannot create a tensor proto whose content is larger than 2GB.") if not _VALID_OP_NAME_REGEX.match(node_def.name): raise ValueError("'%s' is not a valid node name" % node_def.name) c_op = None elif type(node_def).__name__ == "SwigPyObject": assert inputs is None assert output_types is None assert control_inputs is None assert input_types is None assert original_op is None assert op_def is None c_op = node_def else: raise TypeError("node_def needs to be a NodeDef: %s" % node_def) if not isinstance(g, Graph): raise TypeError("g needs to be a Graph: %s" % g) self._graph = g if inputs is None: inputs = [] elif not isinstance(inputs, list): raise TypeError("inputs needs to be a list of Tensors: %s" % inputs) for a in inputs: if not isinstance(a, Tensor): raise TypeError("input needs to be a Tensor: %s" % a) if input_types is None: input_types = [i.dtype.base_dtype for i in inputs] else: if not all( x.is_compatible_with(i.dtype) for i, x in zip(inputs, input_types)): raise TypeError("In op '%s', input types (%s) are not compatible " "with expected types (%s)" % (node_def.name, [i.dtype for i in inputs], input_types)) # Build the list of control inputs. control_input_ops = [] if control_inputs: for c in control_inputs: control_op = None if isinstance(c, Operation): control_op = c elif isinstance(c, (Tensor, IndexedSlices)): control_op = c.op else: raise TypeError("Control input must be an Operation, " "a Tensor, or IndexedSlices: %s" % c) control_input_ops.append(control_op) # This will be set by self.inputs. self._inputs_val = None # pylint: disable=protected-access self._id_value = self._graph._next_id() self._original_op = original_op self._traceback = tf_stack.extract_stack() # List of _UserDevSpecs holding code location of device context manager # invocations and the users original argument to them. self._device_code_locations = None # Dict mapping op name to file and line information for op colocation # context managers. self._colocation_code_locations = None self._control_flow_context = self.graph._get_control_flow_context() # pylint: enable=protected-access # Initialize self._c_op. if c_op: self._c_op = c_op else: if op_def is None: op_def = self._graph._get_op_def(node_def.op) # TODO(skyewm): op_def_library.apply_op() flattens the incoming inputs. # Refactor so we don't have to do this here. grouped_inputs = self._reconstruct_sequence_inputs( op_def, inputs, node_def.attr) self._c_op = _create_c_op(self._graph, node_def, grouped_inputs, control_input_ops) # Initialize self._outputs. num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(c_api_util.tf_output(self._c_op, i)) for i in range(num_outputs) ] self._outputs = [ Tensor(self, i, output_type) for i, output_type in enumerate(output_types) ] self._graph._add_op(self) # pylint: disable=protected-access if not c_op: self._control_flow_post_processing() def _control_flow_post_processing(self): """Add this op to its control flow context. This may add new ops and change this op's inputs. self.inputs must be available before calling this method. """ for input_tensor in self.inputs: control_flow_util.CheckInputFromValidContext(self, input_tensor.op) if self._control_flow_context is not None: self._control_flow_context.AddOp(self) def _reconstruct_sequence_inputs(self, op_def, inputs, attrs): """Regroups a flat list of input tensors into scalar and sequence inputs. Args: op_def: The `op_def_pb2.OpDef` (for knowing the input types) inputs: a list of input `Tensor`s to the op. attrs: mapping from attr name to `attr_value_pb2.AttrValue` (these define how long each sequence is) Returns: A list of `Tensor`s (corresponding to scalar inputs) and lists of `Tensor`s (corresponding to sequence inputs). """ grouped_inputs = [] i = 0 for input_arg in op_def.input_arg: if input_arg.number_attr: input_len = attrs[input_arg.number_attr].i is_sequence = True elif input_arg.type_list_attr: input_len = len(attrs[input_arg.type_list_attr].list.type) is_sequence = True else: input_len = 1 is_sequence = False if is_sequence: grouped_inputs.append(inputs[i:i + input_len]) else: grouped_inputs.append(inputs[i]) i += input_len assert i == len(inputs) return grouped_inputs def colocation_groups(self): """Returns the list of colocation groups of the op.""" default_colocation_group = [compat.as_bytes("loc:@%s" % self.name)] try: class_attr = self.get_attr("_class") except ValueError: # This op has no explicit colocation group, so it is itself its # own root of a colocation group. return default_colocation_group attr_groups = [ class_name for class_name in class_attr if class_name.startswith(b"loc:@") ] # If there are no colocation groups in the explicit _class field, # return the default colocation group. return attr_groups if attr_groups else default_colocation_group def values(self): """DEPRECATED: Use outputs.""" return tuple(self.outputs) def _get_control_flow_context(self): """Returns the control flow context of this op. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context of this op. Args: ctx: a context object. """ self._control_flow_context = ctx @property def name(self): """The full name of this operation.""" return c_api.TF_OperationName(self._c_op) @property def _id(self): """The unique integer id of this operation.""" return self._id_value @property def device(self): """The name of the device to which this op has been assigned, if any. Returns: The string name of the device to which this op has been assigned, or an empty string if it has not been assigned to a device. """ return c_api.TF_OperationDevice(self._c_op) @property def _device_assignments(self): """Code locations for device context managers active at op creation. This property will return a list of traceable_stack.TraceableObject instances where .obj is a string representing the assigned device (or information about the function that would be applied to this op to compute the desired device) and the filename and lineno members record the location of the relevant device context manager. For example, suppose file_a contained these lines: file_a.py: 15: with tf.device('/gpu:0'): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the device context manager would have these member values: t_obj.obj -> '/gpu:0' t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._device_assignments would return the list [t_obj]. Returns: [str: traceable_stack.TraceableObject, ...] as per this method's description, above. """ return self._device_code_locations or [] @property def _colocation_dict(self): """Code locations for colocation context managers active at op creation. This property will return a dictionary for which the keys are nodes with which this Operation is colocated, and for which the values are traceable_stack.TraceableObject instances. The TraceableObject instances record the location of the relevant colocation context manager but have the "obj" field set to None to prevent leaking private data. For example, suppose file_a contained these lines: file_a.py: 14: node_a = tf.constant(3, name='NODE_A') 15: with tf.compat.v1.colocate_with(node_a): 16: node_b = tf.constant(4, name='NODE_B') Then a TraceableObject t_obj representing the colocation context manager would have these member values: t_obj.obj -> None t_obj.filename = 'file_a.py' t_obj.lineno = 15 and node_b.op._colocation_dict would return the dictionary { 'NODE_A': t_obj } Returns: {str: traceable_stack.TraceableObject} as per this method's description, above. """ locations_dict = self._colocation_code_locations or {} return locations_dict.copy() @property def _output_types(self): """List this operation's output types. Returns: List of the types of the Tensors computed by this operation. Each element in the list is an integer whose value is one of the TF_DataType enums defined in c_api.h The length of this list indicates the number of output endpoints of the operation. """ num_outputs = c_api.TF_OperationNumOutputs(self._c_op) output_types = [ c_api.TF_OperationOutputType(self._tf_output(i)) for i in xrange(num_outputs) ] # In all the tests we have output_types that are passed into # Operation.__init__ are a list of ints (which is illegal according # to the docstring), but input_types are instances of DType. # This extra assert is to catch if we ever use DType for output_types. if output_types: assert isinstance(output_types[0], int) return output_types def _tf_output(self, output_idx): """Create and return a new TF_Output for output_idx'th output of this op.""" tf_output = c_api.TF_Output() tf_output.oper = self._c_op tf_output.index = output_idx return tf_output def _tf_input(self, input_idx): """Create and return a new TF_Input for input_idx'th input of this op.""" tf_input = c_api.TF_Input() tf_input.oper = self._c_op tf_input.index = input_idx return tf_input def _set_device(self, device): # pylint: disable=redefined-outer-name """Set the device of this operation. Args: device: string or device.. The device to set. """ self._set_device_from_string(compat.as_str(_device_string(device))) def _set_device_from_string(self, device_str): """Fast path to set device if the type is known to be a string. This function is called frequently enough during graph construction that there are non-trivial performance gains if the caller can guarantee that the specified device is already a string. Args: device_str: A string specifying where to place this op. """ c_api.SetRequestedDevice( self._graph._c_graph, # pylint: disable=protected-access self._c_op, # pylint: disable=protected-access device_str) def _update_input(self, index, tensor): """Update the input to this operation at the given index. NOTE: This is for TF internal use only. Please don't use it. Args: index: the index of the input to update. tensor: the Tensor to be used as the input at the given index. Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None c_api.UpdateEdge( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._tf_input(index)) def _add_while_inputs(self, tensors): """See AddWhileInputHack in python_api.h. NOTE: This is for TF internal use only. Please don't use it. Args: tensors: list of Tensors Raises: TypeError: if tensor is not a Tensor, or if input tensor type is not convertible to dtype. ValueError: if the Tensor is from a different graph. """ for tensor in tensors: if not isinstance(tensor, Tensor): raise TypeError("tensor must be a Tensor: %s" % tensor) _assert_same_graph(self, tensor) # Reset cached inputs. self._inputs_val = None c_api.AddWhileInputHack( self._graph._c_graph, # pylint: disable=protected-access tensor._as_tf_output(), # pylint: disable=protected-access self._c_op) def _add_control_inputs(self, ops): """Add a list of new control inputs to this operation. Args: ops: the list of Operations to add as control input. Raises: TypeError: if ops is not a list of Operations. ValueError: if any op in ops is from a different graph. """ for op in ops: if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access def _add_control_input(self, op): """Add a new control input to this operation. Args: op: the Operation to add as control input. Raises: TypeError: if op is not an Operation. ValueError: if op is from a different graph. """ if not isinstance(op, Operation): raise TypeError("op must be an Operation: %s" % op) c_api.AddControlInput(self._graph._c_graph, self._c_op, op._c_op) # pylint: disable=protected-access def _remove_all_control_inputs(self): """Removes any control inputs to this operation.""" c_api.RemoveAllControlInputs(self._graph._c_graph, self._c_op) # pylint: disable=protected-access def _add_outputs(self, types, shapes): """Adds new Tensors to self.outputs. Note: this is generally unsafe to use. This is used in certain situations in conjunction with _set_type_list_attr. Arguments: types: list of DTypes shapes: list of TensorShapes """ assert len(types) == len(shapes) orig_num_outputs = len(self.outputs) for i in range(len(types)): t = Tensor(self, orig_num_outputs + i, types[i]) self._outputs.append(t) t.set_shape(shapes[i]) def __str__(self): return str(self.node_def) def __repr__(self): return "<tf.Operation '%s' type=%s>" % (self.name, self.type) @property def outputs(self): """The list of `Tensor` objects representing the outputs of this op.""" return self._outputs # pylint: disable=protected-access class _InputList(object): """Immutable input list wrapper.""" def __init__(self, inputs): self._inputs = inputs def __iter__(self): return iter(self._inputs) def __len__(self): return len(self._inputs) def __bool__(self): return bool(self._inputs) # Python 3 wants __bool__, Python 2.7 wants __nonzero__ __nonzero__ = __bool__ def __getitem__(self, i): return self._inputs[i] # pylint: enable=protected-access @property def inputs(self): """The list of `Tensor` objects representing the data inputs of this op.""" if self._inputs_val is None: tf_outputs = c_api.GetOperationInputs(self._c_op) # pylint: disable=protected-access retval = [ self.graph._get_tensor_by_tf_output(tf_output) for tf_output in tf_outputs ] # pylint: enable=protected-access self._inputs_val = Operation._InputList(retval) return self._inputs_val @property def _inputs(self): logging.warning("Operation._inputs is private, use Operation.inputs " "instead. Operation._inputs will eventually be removed.") return self.inputs @_inputs.setter def _inputs(self, value): raise ValueError("Cannot assign _inputs") @property def _input_types(self): num_inputs = c_api.TF_OperationNumInputs(self._c_op) input_types = [ dtypes.as_dtype(c_api.TF_OperationInputType(self._tf_input(i))) for i in xrange(num_inputs) ] return input_types @_input_types.setter def _input_types(self, value): raise ValueError("Cannot assign _input_types") @property def control_inputs(self): """The `Operation` objects on which this op has a control dependency. Before this op is executed, TensorFlow will ensure that the operations in `self.control_inputs` have finished executing. This mechanism can be used to run ops sequentially for performance reasons, or to ensure that the side effects of an op are observed in the correct order. Returns: A list of `Operation` objects. """ control_c_ops = c_api.TF_OperationGetControlInputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def _control_outputs(self): """The `Operation` objects which have a control dependency on this op. Before any of the ops in self._control_outputs can execute tensorflow will ensure self has finished executing. Returns: A list of `Operation` objects. """ control_c_ops = c_api.TF_OperationGetControlOutputs_wrapper(self._c_op) # pylint: disable=protected-access return [ self.graph._get_operation_by_name_unsafe(c_api.TF_OperationName(c_op)) for c_op in control_c_ops ] # pylint: enable=protected-access @property def _control_inputs(self): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") return self.control_inputs @_control_inputs.setter def _control_inputs(self, value): logging.warning("Operation._control_inputs is private, use " "Operation.control_inputs instead. " "Operation._control_inputs will eventually be removed.") # Copy value because it may be self._control_inputs_val (in particular if # this is called from self._control_inputs += ...), and we don't want to # clear value below. value = copy.copy(value) self._remove_all_control_inputs() self._add_control_inputs(value) @property def type(self): """The type of the op (e.g. `"MatMul"`).""" return c_api.TF_OperationOpType(self._c_op) @property def graph(self): """The `Graph` that contains this operation.""" return self._graph @property def node_def(self): # pylint: disable=line-too-long """Returns the `NodeDef` representation of this operation. Returns: A [`NodeDef`](https://www.tensorflow.org/code/tensorflow/core/framework/node_def.proto) protocol buffer. """ # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: c_api.TF_OperationToNodeDef(self._c_op, buf) data = c_api.TF_GetBuffer(buf) node_def = node_def_pb2.NodeDef() node_def.ParseFromString(compat.as_bytes(data)) return node_def @property def _node_def(self): logging.warning("Operation._node_def is private, use Operation.node_def " "instead. Operation._node_def will eventually be removed.") return self.node_def @property def op_def(self): # pylint: disable=line-too-long """Returns the `OpDef` proto that represents the type of this op. Returns: An [`OpDef`](https://www.tensorflow.org/code/tensorflow/core/framework/op_def.proto) protocol buffer. """ # pylint: enable=line-too-long return self._graph._get_op_def(self.type) @property def _op_def(self): logging.warning("Operation._op_def is private, use Operation.op_def " "instead. Operation._op_def will eventually be removed.") return self.op_def @property def traceback(self): """Returns the call stack from when this operation was constructed.""" return tf_stack.convert_stack(self._traceback) @property def traceback_with_start_lines(self): """Same as traceback but includes start line of function definition. Returns: A list of 5-tuples (filename, lineno, name, code, func_start_lineno). """ return tf_stack.convert_stack( self._traceback, include_func_start_lineno=True) def _set_attr(self, attr_name, attr_value): """Private method used to set an attribute in the node_def.""" buf = c_api.TF_NewBufferFromString( compat.as_bytes(attr_value.SerializeToString())) try: # pylint: disable=protected-access c_api.SetAttr(self._graph._c_graph, self._c_op, attr_name, buf) # pylint: enable=protected-access finally: c_api.TF_DeleteBuffer(buf) def _set_func_attr(self, attr_name, func_name): """Private method used to set a function attribute in the node_def.""" func = attr_value_pb2.NameAttrList(name=func_name) self._set_attr(attr_name, attr_value_pb2.AttrValue(func=func)) def _set_func_list_attr(self, attr_name, func_names): """Private method used to set a list(function) attribute in the node_def.""" funcs = [attr_value_pb2.NameAttrList(name=func_name) for func_name in func_names] funcs_list = attr_value_pb2.AttrValue.ListValue(func=funcs) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=funcs_list)) def _set_type_list_attr(self, attr_name, types): """Private method used to set a list(type) attribute in the node_def.""" if not types: return if isinstance(types[0], dtypes.DType): types = [dt.as_datatype_enum for dt in types] types_list = attr_value_pb2.AttrValue.ListValue(type=types) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=types_list)) def _set_shape_list_attr(self, attr_name, shapes): """Private method used to set a list(shape) attribute in the node_def.""" shapes = [s.as_proto() for s in shapes] shapes_list = attr_value_pb2.AttrValue.ListValue(shape=shapes) self._set_attr(attr_name, attr_value_pb2.AttrValue(list=shapes_list)) def _clear_attr(self, attr_name): """Private method used to clear an attribute in the node_def.""" # pylint: disable=protected-access c_api.ClearAttr(self._graph._c_graph, self._c_op, attr_name) # pylint: enable=protected-access def get_attr(self, name): """Returns the value of the attr of this op with the given `name`. Args: name: The name of the attr to fetch. Returns: The value of the attr, as a Python object. Raises: ValueError: If this op does not have an attr with the given `name`. """ fields = ("s", "i", "f", "b", "type", "shape", "tensor", "func") try: with c_api_util.tf_buffer() as buf: c_api.TF_OperationGetAttrValueProto(self._c_op, name, buf) data = c_api.TF_GetBuffer(buf) except errors.InvalidArgumentError as e: # Convert to ValueError for backwards compatibility. raise ValueError(str(e)) x = attr_value_pb2.AttrValue() x.ParseFromString(data) oneof_value = x.WhichOneof("value") if oneof_value is None: return [] if oneof_value == "list": for f in fields: if getattr(x.list, f): if f == "type": return [dtypes.as_dtype(t) for t in x.list.type] else: return list(getattr(x.list, f)) return [] if oneof_value == "type": return dtypes.as_dtype(x.type) assert oneof_value in fields, "Unsupported field type in " + str(x) return getattr(x, oneof_value) def run(self, feed_dict=None, session=None): """Runs this operation in a `Session`. Calling this method will execute all preceding operations that produce the inputs needed for this operation. *N.B.* Before invoking `Operation.run()`, its graph must have been launched in a session, and either a default session must be available, or `session` must be specified explicitly. Args: feed_dict: A dictionary that maps `Tensor` objects to feed values. See `tf.Session.run` for a description of the valid feed values. session: (Optional.) The `Session` to be used to run to this operation. If none, the default session will be used. """ _run_using_default_session(self, feed_dict, self.graph, session) _gradient_registry = registry.Registry("gradient") @tf_export("RegisterGradient") class RegisterGradient(object): """A decorator for registering the gradient function for an op type. This decorator is only used when defining a new op type. For an op with `m` inputs and `n` outputs, the gradient function is a function that takes the original `Operation` and `n` `Tensor` objects (representing the gradients with respect to each output of the op), and returns `m` `Tensor` objects (representing the partial gradients with respect to each input of the op). For example, assuming that operations of type `"Sub"` take two inputs `x` and `y`, and return a single output `x - y`, the following gradient function would be registered: ```python @tf.RegisterGradient("Sub") def _sub_grad(unused_op, grad): return grad, tf.negative(grad) ``` The decorator argument `op_type` is the string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ def __init__(self, op_type): """Creates a new decorator with `op_type` as the Operation type. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers the function `f` as gradient function for `op_type`.""" _gradient_registry.register(f, self._op_type) return f @deprecation.deprecated_endpoints("NotDifferentiable", "NoGradient") @tf_export("no_gradient", v1=["no_gradient", "NotDifferentiable", "NoGradient"]) def no_gradient(op_type): """Specifies that ops of type `op_type` is not differentiable. This function should *not* be used for operations that have a well-defined gradient that is not yet implemented. This function is only used when defining a new op type. It may be used for ops such as `tf.size()` that are not differentiable. For example: ```python tf.no_gradient("Size") ``` The gradient computed for 'op_type' will then propagate zeros. For ops that have a well-defined gradient but are not yet implemented, no declaration should be made, and an error *must* be thrown if an attempt to request its gradient is made. Args: op_type: The string type of an operation. This corresponds to the `OpDef.name` field for the proto that defines the operation. Raises: TypeError: If `op_type` is not a string. """ if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") _gradient_registry.register(None, op_type) # Aliases for the old names, will be eventually removed. NoGradient = no_gradient NotDifferentiable = no_gradient def get_gradient_function(op): """Returns the function that computes gradients for "op".""" if not op.inputs: return None try: op_type = op.get_attr("_gradient_op_type") except ValueError: op_type = op.type return _gradient_registry.lookup(op_type) _shape_registry = registry.Registry("shape functions") _default_shape_function_registry = registry.Registry("default shape functions") # These are set to common_shapes.call_cpp_shape_fn by op generated code # (generated by python_op_gen.cc). # It is set outside ops.py to avoid a circular dependency. _call_cpp_shape_fn = None _call_cpp_shape_fn_and_require_op = None def _set_call_cpp_shape_fn(call_cpp_shape_fn): """Sets default shape fns from passed common_shapes.call_cpp_shape_fn.""" global _call_cpp_shape_fn, _call_cpp_shape_fn_and_require_op if _call_cpp_shape_fn: return # already registered def call_without_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=False) _call_cpp_shape_fn = call_without_requiring def call_with_requiring(op): return call_cpp_shape_fn(op, require_shape_fn=True) _call_cpp_shape_fn_and_require_op = call_with_requiring class RegisterShape(object): """No longer used. Was: A decorator for registering a shape function. Shape functions must now be registered via the SetShapeFn on the original Op specification in C++. """ def __init__(self, op_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string") self._op_type = op_type def __call__(self, f): """Registers "f" as the shape function for "op_type".""" if f is None: assert _call_cpp_shape_fn # None is a special "weak" value that provides a default shape function, # and can be overridden by a non-None registration. try: _default_shape_function_registry.register(_call_cpp_shape_fn, self._op_type) except KeyError: # Ignore duplicate registrations of the weak value. This can # occur if the op library input to wrapper generation # inadvertently links in one or more of the standard op # libraries. pass else: _shape_registry.register(f, self._op_type) return f def set_shape_and_handle_data_for_outputs(_): """No op. TODO(b/74620627): Remove this.""" pass class OpStats(object): """A holder for statistics about an operator. This class holds information about the resource requirements for an op, including the size of its weight parameters on-disk and how many FLOPS it requires to execute forward inference. If you define a new operation, you can create a function that will return a set of information about its usage of the CPU and disk space when serialized. The function itself takes a Graph object that's been set up so you can call methods like get_tensor_by_name to help calculate the results, and a NodeDef argument. """ def __init__(self, statistic_type, value=None): """Sets up the initial placeholders for the statistics.""" self.statistic_type = statistic_type self.value = value @property def statistic_type(self): return self._statistic_type @statistic_type.setter def statistic_type(self, statistic_type): self._statistic_type = statistic_type @property def value(self): return self._value @value.setter def value(self, value): self._value = value def __iadd__(self, other): if other.statistic_type != self.statistic_type: raise ValueError("Can't add an OpStat of type %s to one of %s." % (self.statistic_type, other.statistic_type)) if self.value is None: self.value = other.value elif other.value is not None: self._value += other.value return self _stats_registry = registry.Registry("statistical functions") class RegisterStatistics(object): """A decorator for registering the statistics function for an op type. This decorator can be defined for an op type so that it gives a report on the resources used by an instance of an operator, in the form of an OpStats object. Well-known types of statistics include these so far: - flops: When running a graph, the bulk of the computation happens doing numerical calculations like matrix multiplications. This type allows a node to return how many floating-point operations it takes to complete. The total number of FLOPs for a graph is a good guide to its expected latency. You can add your own statistics just by picking a new type string, registering functions for the ops you care about, and then calling get_stats_for_node_def. If a statistic for an op is registered multiple times, a KeyError will be raised. Since the statistics is counted on a per-op basis. It is not suitable for model parameters (capacity), which is expected to be counted only once, even if it is shared by multiple ops. (e.g. RNN) For example, you can define a new metric called doohickey for a Foo operation by placing this in your code: ```python @ops.RegisterStatistics("Foo", "doohickey") def _calc_foo_bojangles(unused_graph, unused_node_def): return ops.OpStats("doohickey", 20) ``` Then in client code you can retrieve the value by making this call: ```python doohickey = ops.get_stats_for_node_def(graph, node_def, "doohickey") ``` If the NodeDef is for an op with a registered doohickey function, you'll get back the calculated amount in doohickey.value, or None if it's not defined. """ def __init__(self, op_type, statistic_type): """Saves the `op_type` as the `Operation` type.""" if not isinstance(op_type, six.string_types): raise TypeError("op_type must be a string.") if "," in op_type: raise TypeError("op_type must not contain a comma.") self._op_type = op_type if not isinstance(statistic_type, six.string_types): raise TypeError("statistic_type must be a string.") if "," in statistic_type: raise TypeError("statistic_type must not contain a comma.") self._statistic_type = statistic_type def __call__(self, f): """Registers "f" as the statistics function for "op_type".""" _stats_registry.register(f, self._op_type + "," + self._statistic_type) return f def get_stats_for_node_def(graph, node, statistic_type): """Looks up the node's statistics function in the registry and calls it. This function takes a Graph object and a NodeDef from a GraphDef, and if there's an associated statistics method, calls it and returns a result. If no function has been registered for the particular node type, it returns an empty statistics object. Args: graph: A Graph object that's been set up with the node's graph. node: A NodeDef describing the operator. statistic_type: A string identifying the statistic we're interested in. Returns: An OpStats object containing information about resource usage. """ try: stats_func = _stats_registry.lookup(node.op + "," + statistic_type) result = stats_func(graph, node) except LookupError: result = OpStats(statistic_type) return result def name_from_scope_name(name): """Returns the name of an op given the name of its scope. Args: name: the name of the scope. Returns: the name of the op (equal to scope name minus any trailing slash). """ return name[:-1] if (name and name[-1] == "/") else name _MUTATION_LOCK_GROUP = 0 _SESSION_RUN_LOCK_GROUP = 1 @tf_export("Graph") class Graph(object): """A TensorFlow computation, represented as a dataflow graph. A `Graph` contains a set of `tf.Operation` objects, which represent units of computation; and `tf.Tensor` objects, which represent the units of data that flow between operations. A default `Graph` is always registered, and accessible by calling `tf.compat.v1.get_default_graph`. To add an operation to the default graph, simply call one of the functions that defines a new `Operation`: ```python c = tf.constant(4.0) assert c.graph is tf.compat.v1.get_default_graph() ``` Another typical usage involves the `tf.Graph.as_default` context manager, which overrides the current default graph for the lifetime of the context: ```python g = tf.Graph() with g.as_default(): # Define operations and tensors in `g`. c = tf.constant(30.0) assert c.graph is g ``` Important note: This class *is not* thread-safe for graph construction. All operations should be created from a single thread, or external synchronization must be provided. Unless otherwise specified, all methods are not thread-safe. A `Graph` instance supports an arbitrary number of "collections" that are identified by name. For convenience when building a large graph, collections can store groups of related objects: for example, the `tf.Variable` uses a collection (named `tf.GraphKeys.GLOBAL_VARIABLES`) for all variables that are created during the construction of a graph. The caller may define additional collections by specifying a new name. """ def __init__(self): """Creates a new, empty Graph.""" # Protects core state that can be returned via public accessors. # Thread-safety is provided on a best-effort basis to support buggy # programs, and is not guaranteed by the public `tf.Graph` API. # # NOTE(mrry): This does not protect the various stacks. A warning will # be reported if these are used from multiple threads self._lock = threading.RLock() # The group lock synchronizes Session.run calls with methods that create # and mutate ops (e.g. Graph.create_op()). This synchronization is # necessary because it's illegal to modify an operation after it's been run. # The group lock allows any number of threads to mutate ops at the same time # but if any modification is going on, all Session.run calls have to wait. # Similarly, if one or more Session.run calls are going on, all mutate ops # have to wait until all Session.run calls have finished. self._group_lock = lock_util.GroupLock(num_groups=2) self._nodes_by_id = {} # GUARDED_BY(self._lock) self._next_id_counter = 0 # GUARDED_BY(self._lock) self._nodes_by_name = {} # GUARDED_BY(self._lock) self._version = 0 # GUARDED_BY(self._lock) # Maps a name used in the graph to the next id to use for that name. self._names_in_use = {} self._stack_state_is_thread_local = False self._thread_local = threading.local() # Functions that will be applied to choose a device if none is specified. # In TF2.x or after switch_to_thread_local(), # self._thread_local._device_function_stack is used instead. self._graph_device_function_stack = traceable_stack.TraceableStack() # Default original_op applied to new ops. self._default_original_op = None # Current control flow context. It could be either CondContext or # WhileContext defined in ops/control_flow_ops.py self._control_flow_context = None # A new node will depend of the union of all of the nodes in the stack. # In TF2.x or after switch_to_thread_local(), # self._thread_local._control_dependencies_stack is used instead. self._graph_control_dependencies_stack = [] # Arbitrary collections of objects. self._collections = {} # The graph-level random seed self._seed = None # A dictionary of attributes that should be applied to all ops. self._attr_scope_map = {} # A map from op type to the kernel label that should be used. self._op_to_kernel_label_map = {} # A map from op type to an alternative op type that should be used when # computing gradients. self._gradient_override_map = {} # True if the graph is considered "finalized". In that case no # new operations can be added. self._finalized = False # Functions defined in the graph self._functions = collections.OrderedDict() # Default GraphDef versions self._graph_def_versions = versions_pb2.VersionDef( producer=versions.GRAPH_DEF_VERSION, min_consumer=versions.GRAPH_DEF_VERSION_MIN_CONSUMER) self._building_function = False # Stack of colocate_with ops. In TF2.x or after switch_to_thread_local(), # self._thread_local._colocation_stack is used instead. self._graph_colocation_stack = traceable_stack.TraceableStack() # Set of tensors that are dangerous to feed! self._unfeedable_tensors = set() # Set of operations that are dangerous to fetch! self._unfetchable_ops = set() # A map of tensor handle placeholder to tensor dtype. self._handle_feeders = {} # A map from tensor handle to its read op. self._handle_readers = {} # A map from tensor handle to its move op. self._handle_movers = {} # A map from tensor handle to its delete op. self._handle_deleters = {} # Allow optimizers and other objects to pseudo-uniquely key graphs (this key # will be shared when defining function graphs, for example, so optimizers # being called inside function definitions behave as if they were seeing the # actual outside graph). self._graph_key = "grap-key-%d/" % (uid(),) # A string with the last reduction method passed to # losses.compute_weighted_loss(), or None. This is required only for # backward compatibility with Estimator and optimizer V1 use cases. self._last_loss_reduction = None # Flag that is used to indicate whether loss has been scaled by optimizer. # If this flag has been set, then estimator uses it to scale losss back # before reporting. This is required only for backward compatibility with # Estimator and optimizer V1 use cases. self._is_loss_scaled_by_optimizer = False self._container = "" self._registered_ops = op_def_registry.get_registered_ops() # Set to True if this graph is being built in an # AutomaticControlDependencies context. self._add_control_dependencies = False # TODO(skyewm): fold as much of the above as possible into the C # implementation self._scoped_c_graph = c_api_util.ScopedTFGraph() # The C API requires all ops to have shape functions. Disable this # requirement (many custom ops do not have shape functions, and we don't # want to break these existing cases). c_api.SetRequireShapeInferenceFns(self._c_graph, False) if tf2.enabled(): self.switch_to_thread_local() # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @tf_contextlib.contextmanager def _variable_creator_scope(self, creator, priority=100): """Scope which defines a variable creation function. Args: creator: A callable taking `next_creator` and `kwargs`. See the `tf.variable_creator_scope` docstring. priority: Creators with a higher `priority` are called first. Within the same priority, creators are called inner-to-outer. Yields: `_variable_creator_scope` is a context manager with a side effect, but doesn't return a value. Raises: RuntimeError: If variable creator scopes are not properly nested. """ # This step keeps a reference to the existing stack, and it also initializes # self._thread_local._variable_creator_stack if it doesn't exist yet. old = self._variable_creator_stack new = list(old) new.append((priority, creator)) # Sorting is stable, so we'll put higher-priority creators later in the list # but otherwise maintain registration order. new.sort(key=lambda item: item[0]) self._thread_local._variable_creator_stack = new # pylint: disable=protected-access try: yield finally: if self._thread_local._variable_creator_stack is not new: # pylint: disable=protected-access raise RuntimeError( "Exiting variable_creator_scope without proper nesting.") self._thread_local._variable_creator_stack = old # pylint: disable=protected-access # Note: this method is private because the API of tf.Graph() is public and # frozen, and this functionality is still not ready for public visibility. @property def _variable_creator_stack(self): if not hasattr(self._thread_local, "_variable_creator_stack"): self._thread_local._variable_creator_stack = [] # pylint: disable=protected-access # This previously returned a copy of the stack instead of the stack itself, # to guard against accidental mutation. Consider, however, code that wants # to save and restore the variable creator stack: # def f(): # original_stack = graph._variable_creator_stack # graph._variable_creator_stack = new_stack # ... # Some code # graph._variable_creator_stack = original_stack # # And lets say you have some code that calls this function with some # variable_creator: # def g(): # with variable_scope.variable_creator_scope(creator): # f() # When exiting the variable creator scope, it would see a different stack # object than it expected leading to a "Exiting variable_creator_scope # without proper nesting" error. return self._thread_local._variable_creator_stack # pylint: disable=protected-access @_variable_creator_stack.setter def _variable_creator_stack(self, variable_creator_stack): self._thread_local._variable_creator_stack = variable_creator_stack # pylint: disable=protected-access def _check_not_finalized(self): """Check if the graph is finalized. Raises: RuntimeError: If the graph finalized. """ if self._finalized: raise RuntimeError("Graph is finalized and cannot be modified.") def _add_op(self, op): """Adds 'op' to the graph. Args: op: the Operator or Tensor to add. Raises: TypeError: if op is not an Operation or Tensor. ValueError: if the op.name or op._id are already used. """ self._check_not_finalized() if not isinstance(op, (Tensor, Operation)): raise TypeError("op must be a Tensor or Operation: %s" % op) with self._lock: # pylint: disable=protected-access if op._id in self._nodes_by_id: raise ValueError("cannot add an op with id %d as it already " "exists in the graph" % op._id) if op.name in self._nodes_by_name: raise ValueError("cannot add op with name %s as that name " "is already used" % op.name) self._nodes_by_id[op._id] = op self._nodes_by_name[op.name] = op self._version = max(self._version, op._id) # pylint: enable=protected-access @property def _c_graph(self): if self._scoped_c_graph: return self._scoped_c_graph.graph return None @property def version(self): """Returns a version number that increases as ops are added to the graph. Note that this is unrelated to the `tf.Graph.graph_def_versions`. Returns: An integer version that increases as ops are added to the graph. """ if self._finalized: return self._version with self._lock: return self._version @property def graph_def_versions(self): # pylint: disable=line-too-long """The GraphDef version information of this graph. For details on the meaning of each version, see [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto). Returns: A `VersionDef`. """ # pylint: enable=line-too-long with c_api_util.tf_buffer() as buf: c_api.TF_GraphVersions(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) version_def = versions_pb2.VersionDef() version_def.ParseFromString(compat.as_bytes(data)) return version_def @property def seed(self): """The graph-level random seed of this graph.""" return self._seed @seed.setter def seed(self, seed): self._seed = seed @property def finalized(self): """True if this graph has been finalized.""" return self._finalized def finalize(self): """Finalizes this graph, making it read-only. After calling `g.finalize()`, no new operations can be added to `g`. This method is used to ensure that no operations are added to a graph when it is shared between multiple threads, for example when using a `tf.compat.v1.train.QueueRunner`. """ self._finalized = True def _unsafe_unfinalize(self): """Opposite of `finalize`. Internal interface. NOTE: Unfinalizing a graph could have negative impact on performance, especially in a multi-threaded environment. Unfinalizing a graph when it is in use by a Session may lead to undefined behavior. Ensure that all sessions using a graph are closed before calling this method. """ self._finalized = False def _get_control_flow_context(self): """Returns the current control flow context. Returns: A context object. """ return self._control_flow_context def _set_control_flow_context(self, ctx): """Sets the current control flow context. Args: ctx: a context object. """ self._control_flow_context = ctx def _copy_functions_to_graph_def(self, graph_def, starting_bytesize): """If this graph contains functions, copy them to `graph_def`.""" bytesize = starting_bytesize for f in self._functions.values(): bytesize += f.definition.ByteSize() if bytesize >= (1 << 31) or bytesize < 0: raise ValueError("GraphDef cannot be larger than 2GB.") graph_def.library.function.extend([f.definition]) if f.grad_func_name: grad_def = function_pb2.GradientDef() grad_def.function_name = f.name grad_def.gradient_func = f.grad_func_name graph_def.library.gradient.extend([grad_def]) def _as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using `tf.import_graph_def`) or used with the [C++ Session API](../../../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A tuple containing a [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer, and the version of the graph to which that `GraphDef` corresponds. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long with self._lock: with c_api_util.tf_buffer() as buf: c_api.TF_GraphToGraphDef(self._c_graph, buf) data = c_api.TF_GetBuffer(buf) graph = graph_pb2.GraphDef() graph.ParseFromString(compat.as_bytes(data)) # Strip the experimental library field iff it's empty. if not graph.library.function: graph.ClearField("library") if add_shapes: for node in graph.node: op = self._nodes_by_name[node.name] if op.outputs: node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) for function_def in graph.library.function: defined_function = self._functions[function_def.signature.name] try: func_graph = defined_function.graph except AttributeError: # _DefinedFunction doesn't have a graph, _EagerDefinedFunction # does. Both rely on ops.py, so we can't really isinstance check # them. continue input_shapes = function_def.attr["_input_shapes"] try: func_graph_inputs = func_graph.inputs except AttributeError: continue for input_tensor in func_graph_inputs: if input_tensor.dtype == dtypes.resource: # TODO(allenl): Save and restore handle data, then save the # resource placeholder's shape. Right now some shape functions get # confused if we set the shape of the resource placeholder (to a # scalar of course) and there isn't any handle data. input_shapes.list.shape.add().CopyFrom( tensor_shape.TensorShape(None).as_proto()) else: input_shapes.list.shape.add().CopyFrom( input_tensor.get_shape().as_proto()) for node in function_def.node_def: try: op = func_graph.get_operation_by_name(node.name) except KeyError: continue node.attr["_output_shapes"].list.shape.extend( [output.get_shape().as_proto() for output in op.outputs]) return graph, self._version def as_graph_def(self, from_version=None, add_shapes=False): # pylint: disable=line-too-long """Returns a serialized `GraphDef` representation of this graph. The serialized `GraphDef` can be imported into another `Graph` (using `tf.import_graph_def`) or used with the [C++ Session API](../../api_docs/cc/index.md). This method is thread-safe. Args: from_version: Optional. If this is set, returns a `GraphDef` containing only the nodes that were added to this graph since its `version` property had the given value. add_shapes: If true, adds an "_output_shapes" list attr to each node with the inferred shapes of each of its outputs. Returns: A [`GraphDef`](https://www.tensorflow.org/code/tensorflow/core/framework/graph.proto) protocol buffer. Raises: ValueError: If the `graph_def` would be too large. """ # pylint: enable=line-too-long result, _ = self._as_graph_def(from_version, add_shapes) return result def _is_function(self, name): """Tests whether 'name' is registered in this graph's function library. Args: name: string op name. Returns: bool indicating whether or not 'name' is registered in function library. """ return compat.as_str(name) in self._functions def _get_function(self, name): """Returns the function definition for 'name'. Args: name: string function name. Returns: The function def proto. """ return self._functions.get(compat.as_str(name), None) def _add_function(self, function): """Adds a function to the graph. After the function has been added, you can call to the function by passing the function name in place of an op name to `Graph.create_op()`. Args: function: A `_DefinedFunction` object. Raises: ValueError: if another function is defined with the same name. """ name = function.name # Sanity checks on gradient definition. if (function.grad_func_name is not None) and (function.python_grad_func is not None): raise ValueError("Gradient defined twice for function %s" % name) # Add function to graph # pylint: disable=protected-access gradient = ( function._grad_func._c_func.func if function._grad_func else None) c_api.TF_GraphCopyFunction(self._c_graph, function._c_func.func, gradient) # pylint: enable=protected-access self._functions[compat.as_str(name)] = function # Need a new-enough consumer to support the functions we add to the graph. if self._graph_def_versions.min_consumer < 12: self._graph_def_versions.min_consumer = 12 @property def building_function(self): """Returns True iff this graph represents a function.""" return self._building_function # Helper functions to create operations. @deprecated_args(None, "Shapes are always computed; don't use the compute_shapes " "as it has no effect.", "compute_shapes") def create_op( self, op_type, inputs, dtypes=None, # pylint: disable=redefined-outer-name input_types=None, name=None, attrs=None, op_def=None, compute_shapes=True, compute_device=True): """Creates an `Operation` in this graph. This is a low-level interface for creating an `Operation`. Most programs will not call this method directly, and instead use the Python op constructors, such as `tf.constant()`, which add ops to the default graph. Args: op_type: The `Operation` type to create. This corresponds to the `OpDef.name` field for the proto that defines the operation. inputs: A list of `Tensor` objects that will be inputs to the `Operation`. dtypes: (Optional) A list of `DType` objects that will be the types of the tensors that the operation produces. input_types: (Optional.) A list of `DType`s that will be the types of the tensors that the operation consumes. By default, uses the base `DType` of each input in `inputs`. Operations that expect reference-typed inputs must specify `input_types` explicitly. name: (Optional.) A string name for the operation. If not specified, a name is generated based on `op_type`. attrs: (Optional.) A dictionary where the key is the attribute name (a string) and the value is the respective `attr` attribute of the `NodeDef` proto that will represent the operation (an `AttrValue` proto). op_def: (Optional.) The `OpDef` proto that describes the `op_type` that the operation will have. compute_shapes: (Optional.) Deprecated. Has no effect (shapes are always computed). compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Raises: TypeError: if any of the inputs is not a `Tensor`. ValueError: if colocation conflicts with existing device assignment. Returns: An `Operation` object. """ del compute_shapes self._check_not_finalized() for idx, a in enumerate(inputs): if not isinstance(a, Tensor): raise TypeError("Input #%d is not a tensor: %s" % (idx, a)) if name is None: name = op_type # If a names ends with a '/' it is a "name scope" and we use it as-is, # after removing the trailing '/'. if name and name[-1] == "/": name = name_from_scope_name(name) else: name = self.unique_name(name) node_def = _NodeDef(op_type, name, device=None, attrs=attrs) input_ops = set([t.op for t in inputs]) control_inputs = self._control_dependencies_for_inputs(input_ops) # _create_op_helper mutates the new Operation. `_mutation_lock` ensures a # Session.run call cannot occur between creating and mutating the op. with self._mutation_lock(): ret = Operation( node_def, self, inputs=inputs, output_types=dtypes, control_inputs=control_inputs, input_types=input_types, original_op=self._default_original_op, op_def=op_def) self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_from_tf_operation(self, c_op, compute_device=True): """Creates an `Operation` in this graph from the supplied TF_Operation. This method is like create_op() except the new Operation is constructed using `c_op`. The returned Operation will have `c_op` as its _c_op field. This is used to create Operation objects around TF_Operations created indirectly by the C API (e.g. by TF_ImportGraphDef, TF_FinishWhile). This function does not call Operation._control_flow_post_processing or Graph._control_dependencies_for_inputs (since the inputs may not be available yet). The caller is responsible for calling these methods. Args: c_op: a wrapped TF_Operation compute_device: (Optional.) If True, device functions will be executed to compute the device property of the Operation. Returns: An `Operation` object. """ self._check_not_finalized() ret = Operation(c_op, self) # If a name_scope was created with ret.name but no nodes were created in it, # the name will still appear in _names_in_use even though the name hasn't # been used. This is ok, just leave _names_in_use as-is in this case. # TODO(skyewm): make the C API guarantee no name conflicts. name_key = ret.name.lower() if name_key not in self._names_in_use: self._names_in_use[name_key] = 1 self._create_op_helper(ret, compute_device=compute_device) return ret def _create_op_helper(self, op, compute_device=True): """Common logic for creating an op in this graph.""" # Apply any additional attributes requested. Do not overwrite any existing # attributes. for key, value in self._attr_scope_map.items(): try: op.get_attr(key) except ValueError: if callable(value): value = value(op.node_def) if not isinstance(value, (type(None), attr_value_pb2.AttrValue)): raise TypeError( "Callable for scope map key '%s' must return either None or " "an AttrValue protocol buffer; but it returned: %s" % (key, value)) if value: op._set_attr(key, value) # pylint: disable=protected-access # Apply a kernel label if one has been specified for this op type. try: kernel_label = self._op_to_kernel_label_map[op.type] op._set_attr("_kernel", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(kernel_label))) except KeyError: pass # Apply the overriding op type for gradients if one has been specified for # this op type. try: mapped_op_type = self._gradient_override_map[op.type] op._set_attr("_gradient_op_type", # pylint: disable=protected-access attr_value_pb2.AttrValue(s=compat.as_bytes(mapped_op_type))) except KeyError: pass self._record_op_seen_by_control_dependencies(op) if compute_device: self._apply_device_functions(op) # Snapshot the colocation stack metadata before we might generate error # messages using it. Note that this snapshot depends on the actual stack # and is independent of the op's _class attribute. # pylint: disable=protected-access op._colocation_code_locations = self._snapshot_colocation_stack_metadata() # pylint: enable=protected-access if self._colocation_stack: all_colocation_groups = [] for colocation_op in self._colocation_stack.peek_objs(): all_colocation_groups.extend(colocation_op.colocation_groups()) if colocation_op.device: # pylint: disable=protected-access op._set_device(colocation_op.device) # pylint: enable=protected-access all_colocation_groups = sorted(set(all_colocation_groups)) # pylint: disable=protected-access op._set_attr( "_class", attr_value_pb2.AttrValue( list=attr_value_pb2.AttrValue.ListValue(s=all_colocation_groups))) # pylint: enable=protected-access # Sets "container" attribute if # (1) self._container is not None # (2) "is_stateful" is set in OpDef # (3) "container" attribute is in OpDef # (4) "container" attribute is None if self._container and op.op_def.is_stateful: try: container_attr = op.get_attr("container") except ValueError: # "container" attribute is not in OpDef pass else: if not container_attr: op._set_attr("container", attr_value_pb2.AttrValue( # pylint: disable=protected-access s=compat.as_bytes(self._container))) def _add_new_tf_operations(self, compute_devices=True): """Creates `Operations` in this graph for any new TF_Operations. This is useful for when TF_Operations are indirectly created by the C API outside of the Operation constructor (e.g. by TF_ImportGraphDef, TF_FinishWhile). This ensures there are corresponding Operations for all TF_Operations in the underlying TF_Graph. Args: compute_devices: (Optional.) If True, device functions will be executed to compute the device properties of each new Operation. Returns: A list of the new `Operation` objects. """ # Create all Operation objects before accessing their inputs since an op may # be created before its inputs. new_ops = [ self._create_op_from_tf_operation(c_op, compute_device=compute_devices) for c_op in c_api_util.new_tf_operations(self) ] # pylint: disable=protected-access for op in new_ops: new_control_inputs = self._control_dependencies_for_inputs(op.inputs) op._add_control_inputs(new_control_inputs) op._control_flow_post_processing() # pylint: enable=protected-access return new_ops def as_graph_element(self, obj, allow_tensor=True, allow_operation=True): """Returns the object referred to by `obj`, as an `Operation` or `Tensor`. This function validates that `obj` represents an element of this graph, and gives an informative error message if it is not. This function is the canonical way to get/validate an object of one of the allowed types from an external argument reference in the Session API. This method may be called concurrently from multiple threads. Args: obj: A `Tensor`, an `Operation`, or the name of a tensor or operation. Can also be any object with an `_as_graph_element()` method that returns a value of one of these types. allow_tensor: If true, `obj` may refer to a `Tensor`. allow_operation: If true, `obj` may refer to an `Operation`. Returns: The `Tensor` or `Operation` in the Graph corresponding to `obj`. Raises: TypeError: If `obj` is not a type we support attempting to convert to types. ValueError: If `obj` is of an appropriate type but invalid. For example, an invalid string. KeyError: If `obj` is not an object in the graph. """ if self._finalized: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) with self._lock: return self._as_graph_element_locked(obj, allow_tensor, allow_operation) def _as_graph_element_locked(self, obj, allow_tensor, allow_operation): """See `Graph.as_graph_element()` for details.""" # The vast majority of this function is figuring # out what an API user might be doing wrong, so # that we can give helpful error messages. # # Ideally, it would be nice to split it up, but we # need context to generate nice error messages. if allow_tensor and allow_operation: types_str = "Tensor or Operation" elif allow_tensor: types_str = "Tensor" elif allow_operation: types_str = "Operation" else: raise ValueError("allow_tensor and allow_operation can't both be False.") temp_obj = _as_graph_element(obj) if temp_obj is not None: obj = temp_obj # If obj appears to be a name... if isinstance(obj, compat.bytes_or_text_types): name = compat.as_str(obj) if ":" in name and allow_tensor: # Looks like a Tensor name and can be a Tensor. try: op_name, out_n = name.split(":") out_n = int(out_n) except: raise ValueError("The name %s looks a like a Tensor name, but is " "not a valid one. Tensor names must be of the " "form \"<op_name>:<output_index>\"." % repr(name)) if op_name in self._nodes_by_name: op = self._nodes_by_name[op_name] else: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, does not exist in the " "graph." % (repr(name), repr(op_name))) try: return op.outputs[out_n] except: raise KeyError("The name %s refers to a Tensor which does not " "exist. The operation, %s, exists but only has " "%s outputs." % (repr(name), repr(op_name), len(op.outputs))) elif ":" in name and not allow_tensor: # Looks like a Tensor name but can't be a Tensor. raise ValueError("Name %s appears to refer to a Tensor, not a %s." % (repr(name), types_str)) elif ":" not in name and allow_operation: # Looks like an Operation name and can be an Operation. if name not in self._nodes_by_name: raise KeyError("The name %s refers to an Operation not in the " "graph." % repr(name)) return self._nodes_by_name[name] elif ":" not in name and not allow_operation: # Looks like an Operation name but can't be an Operation. if name in self._nodes_by_name: # Yep, it's an Operation name err_msg = ("The name %s refers to an Operation, not a %s." % (repr(name), types_str)) else: err_msg = ("The name %s looks like an (invalid) Operation name, " "not a %s." % (repr(name), types_str)) err_msg += (" Tensor names must be of the form " "\"<op_name>:<output_index>\".") raise ValueError(err_msg) elif isinstance(obj, Tensor) and allow_tensor: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Tensor %s is not an element of this graph." % obj) return obj elif isinstance(obj, Operation) and allow_operation: # Actually obj is just the object it's referring to. if obj.graph is not self: raise ValueError("Operation %s is not an element of this graph." % obj) return obj else: # We give up! raise TypeError("Can not convert a %s into a %s." % (type(obj).__name__, types_str)) def get_operations(self): """Return the list of operations in the graph. You can modify the operations in place, but modifications to the list such as inserts/delete have no effect on the list of operations known to the graph. This method may be called concurrently from multiple threads. Returns: A list of Operations. """ if self._finalized: return list(self._nodes_by_id.values()) with self._lock: return list(self._nodes_by_id.values()) def get_operation_by_name(self, name): """Returns the `Operation` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to an operation in this graph. """ if not isinstance(name, six.string_types): raise TypeError("Operation names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=False, allow_operation=True) def _get_operation_by_name_unsafe(self, name): """Returns the `Operation` with the given `name`. This is a internal unsafe version of get_operation_by_name. It skips many checks and does not have user friedly error messages but runs considerably faster. This method may be called concurrently from multiple threads. Args: name: The name of the `Operation` to return. Returns: The `Operation` with the given `name`. Raises: KeyError: If `name` does not correspond to an operation in this graph. """ if self._finalized: return self._nodes_by_name[name] with self._lock: return self._nodes_by_name[name] def _get_operation_by_tf_operation(self, tf_oper): op_name = c_api.TF_OperationName(tf_oper) return self._get_operation_by_name_unsafe(op_name) def get_tensor_by_name(self, name): """Returns the `Tensor` with the given `name`. This method may be called concurrently from multiple threads. Args: name: The name of the `Tensor` to return. Returns: The `Tensor` with the given `name`. Raises: TypeError: If `name` is not a string. KeyError: If `name` does not correspond to a tensor in this graph. """ # Names should be strings. if not isinstance(name, six.string_types): raise TypeError("Tensor names are strings (or similar), not %s." % type(name).__name__) return self.as_graph_element(name, allow_tensor=True, allow_operation=False) def _get_tensor_by_tf_output(self, tf_output): """Returns the `Tensor` representing `tf_output`. Note that there is only one such `Tensor`, i.e. multiple calls to this function with the same TF_Output value will always return the same `Tensor` object. Args: tf_output: A wrapped `TF_Output` (the C API equivalent of `Tensor`). Returns: The `Tensor` that represents `tf_output`. """ op = self._get_operation_by_tf_operation(tf_output.oper) return op.outputs[tf_output.index] def _next_id(self): """Id for next Operation instance. Also increments the internal id.""" self._check_not_finalized() with self._lock: self._next_id_counter += 1 return self._next_id_counter @property def _last_id(self): return self._next_id_counter def _get_op_def(self, type): # pylint: disable=redefined-builtin """Returns the `OpDef` proto for `type`. `type` is a string.""" with c_api_util.tf_buffer() as buf: # pylint: disable=protected-access c_api.TF_GraphGetOpDef(self._c_graph, compat.as_bytes(type), buf) # pylint: enable=protected-access data = c_api.TF_GetBuffer(buf) op_def = op_def_pb2.OpDef() op_def.ParseFromString(compat.as_bytes(data)) return op_def def as_default(self): """Returns a context manager that makes this `Graph` the default graph. This method should be used if you want to create multiple graphs in the same process. For convenience, a global default graph is provided, and all ops will be added to this graph if you do not create a new graph explicitly. Use this method with the `with` keyword to specify that ops created within the scope of a block should be added to this graph. In this case, once the scope of the `with` is exited, the previous default graph is set again as default. There is a stack, so it's ok to have multiple nested levels of `as_default` calls. The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. The following code examples are equivalent: ```python # 1. Using Graph.as_default(): g = tf.Graph() with g.as_default(): c = tf.constant(5.0) assert c.graph is g # 2. Constructing and making default: with tf.Graph().as_default() as g: c = tf.constant(5.0) assert c.graph is g ``` If eager execution is enabled ops created under this context manager will be added to the graph instead of executed eagerly. Returns: A context manager for using this graph as the default graph. """ return _default_graph_stack.get_controller(self) @property def collections(self): """Returns the names of the collections known to this graph.""" return list(self._collections) def add_to_collection(self, name, value): """Stores `value` in the collection with the given `name`. Note that collections are not sets, so it is possible to add a value to a collection several times. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. """ # pylint: disable=g-doc-exception self._check_not_finalized() with self._lock: if name not in self._collections: self._collections[name] = [value] else: self._collections[name].append(value) def add_to_collections(self, names, value): """Stores `value` in the collections given by `names`. Note that collections are not sets, so it is possible to add a value to a collection several times. This function makes sure that duplicates in `names` are ignored, but it will not check for pre-existing membership of `value` in any of the collections in `names`. `names` can be any iterable, but if `names` is a string, it is treated as a single collection name. Args: names: The keys for the collections to add to. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. """ # Make sure names are unique, but treat strings as a single collection name names = (names,) if isinstance(names, six.string_types) else set(names) for name in names: self.add_to_collection(name, value) def get_collection_ref(self, name): """Returns a list of values in the collection with the given `name`. If the collection exists, this returns the list itself, which can be modified in place to change the collection. If the collection does not exist, it is created as an empty list and the list is returned. This is different from `get_collection()` which always returns a copy of the collection list if it exists and never creates an empty collection. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. """ # pylint: disable=g-doc-exception with self._lock: coll_list = self._collections.get(name, None) if coll_list is None: coll_list = [] self._collections[name] = coll_list return coll_list def get_collection(self, name, scope=None): """Returns a list of values in the collection with the given `name`. This is different from `get_collection_ref()` which always returns the actual collection list if it exists in that it returns a new list each time it is called. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) A string. If supplied, the resulting list is filtered to include only items whose `name` attribute matches `scope` using `re.match`. Items without a `name` attribute are never returned if a scope is supplied. The choice of `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. """ # pylint: disable=g-doc-exception with self._lock: collection = self._collections.get(name, None) if collection is None: return [] if scope is None: return list(collection) else: c = [] regex = re.compile(scope) for item in collection: if hasattr(item, "name") and regex.match(item.name): c.append(item) return c def get_all_collection_keys(self): """Returns a list of collections used in this graph.""" with self._lock: return [x for x in self._collections if isinstance(x, six.string_types)] def clear_collection(self, name): """Clears all values in a collection. Args: name: The key for the collection. The `GraphKeys` class contains many standard names for collections. """ self._check_not_finalized() with self._lock: if name in self._collections: del self._collections[name] @tf_contextlib.contextmanager def _original_op(self, op): """Python 'with' handler to help annotate ops with their originator. An op may have an 'original_op' property that indicates the op on which it was based. For example a replica op is based on the op that was replicated and a gradient op is based on the op that was differentiated. All ops created in the scope of this 'with' handler will have the given 'op' as their original op. Args: op: The Operation that all ops created in this scope will have as their original op. Yields: Nothing. """ old_original_op = self._default_original_op self._default_original_op = op try: yield finally: self._default_original_op = old_original_op @property def _name_stack(self): # This may be called from a thread where name_stack doesn't yet exist. if not hasattr(self._thread_local, "_name_stack"): self._thread_local._name_stack = "" return self._thread_local._name_stack @_name_stack.setter def _name_stack(self, name_stack): self._thread_local._name_stack = name_stack # pylint: disable=g-doc-return-or-yield,line-too-long @tf_contextlib.contextmanager def name_scope(self, name): """Returns a context manager that creates hierarchical names for operations. A graph maintains a stack of name scopes. A `with name_scope(...):` statement pushes a new name onto the stack for the lifetime of the context. The `name` argument will be interpreted as follows: * A string (not ending with '/') will create a new name scope, in which `name` is appended to the prefix of all operations created in the context. If `name` has been used before, it will be made unique by calling `self.unique_name(name)`. * A scope previously captured from a `with g.name_scope(...) as scope:` statement will be treated as an "absolute" name scope, which makes it possible to re-enter existing scopes. * A value of `None` or the empty string will reset the current name scope to the top-level (empty) name scope. For example: ```python with tf.Graph().as_default() as g: c = tf.constant(5.0, name="c") assert c.op.name == "c" c_1 = tf.constant(6.0, name="c") assert c_1.op.name == "c_1" # Creates a scope called "nested" with g.name_scope("nested") as scope: nested_c = tf.constant(10.0, name="c") assert nested_c.op.name == "nested/c" # Creates a nested scope called "inner". with g.name_scope("inner"): nested_inner_c = tf.constant(20.0, name="c") assert nested_inner_c.op.name == "nested/inner/c" # Create a nested scope called "inner_1". with g.name_scope("inner"): nested_inner_1_c = tf.constant(30.0, name="c") assert nested_inner_1_c.op.name == "nested/inner_1/c" # Treats `scope` as an absolute name scope, and # switches to the "nested/" scope. with g.name_scope(scope): nested_d = tf.constant(40.0, name="d") assert nested_d.op.name == "nested/d" with g.name_scope(""): e = tf.constant(50.0, name="e") assert e.op.name == "e" ``` The name of the scope itself can be captured by `with g.name_scope(...) as scope:`, which stores the name of the scope in the variable `scope`. This value can be used to name an operation that represents the overall result of executing the ops in a scope. For example: ```python inputs = tf.constant(...) with g.name_scope('my_layer') as scope: weights = tf.Variable(..., name="weights") biases = tf.Variable(..., name="biases") affine = tf.matmul(inputs, weights) + biases output = tf.nn.relu(affine, name=scope) ``` NOTE: This constructor validates the given `name`. Valid scope names match one of the following regular expressions: [A-Za-z0-9.][A-Za-z0-9_.\\-/]* (for scopes at the root) [A-Za-z0-9_.\\-/]* (for other scopes) Args: name: A name for the scope. Returns: A context manager that installs `name` as a new name scope. Raises: ValueError: If `name` is not a valid scope name, according to the rules above. """ if name: if isinstance(name, compat.bytes_or_text_types): name = compat.as_str(name) if self._name_stack: # Scopes created in a nested scope may have initial characters # that are illegal as the initial character of an op name # (viz. '-', '\', '/', and '_'). if not _VALID_SCOPE_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) else: # Scopes created in the root must match the more restrictive # op name regex, which constrains the initial character. if not _VALID_OP_NAME_REGEX.match(name): raise ValueError("'%s' is not a valid scope name" % name) old_stack = self._name_stack if not name: # Both for name=None and name="" we re-set to empty scope. new_stack = None elif name[-1] == "/": new_stack = name_from_scope_name(name) else: new_stack = self.unique_name(name) self._name_stack = new_stack try: yield "" if new_stack is None else new_stack + "/" finally: self._name_stack = old_stack # pylint: enable=g-doc-return-or-yield,line-too-long def unique_name(self, name, mark_as_used=True): """Return a unique operation name for `name`. Note: You rarely need to call `unique_name()` directly. Most of the time you just need to create `with g.name_scope()` blocks to generate structured names. `unique_name` is used to generate structured names, separated by `"/"`, to help identify operations when debugging a graph. Operation names are displayed in error messages reported by the TensorFlow runtime, and in various visualization tools such as TensorBoard. If `mark_as_used` is set to `True`, which is the default, a new unique name is created and marked as in use. If it's set to `False`, the unique name is returned without actually being marked as used. This is useful when the caller simply wants to know what the name to be created will be. Args: name: The name for an operation. mark_as_used: Whether to mark this name as being used. Returns: A string to be passed to `create_op()` that will be used to name the operation being created. """ if self._name_stack: name = self._name_stack + "/" + name # For the sake of checking for names in use, we treat names as case # insensitive (e.g. foo = Foo). name_key = name.lower() i = self._names_in_use.get(name_key, 0) # Increment the number for "name_key". if mark_as_used: self._names_in_use[name_key] = i + 1 if i > 0: base_name_key = name_key # Make sure the composed name key is not already used. while name_key in self._names_in_use: name_key = "%s_%d" % (base_name_key, i) i += 1 # Mark the composed name_key as used in case someone wants # to call unique_name("name_1"). if mark_as_used: self._names_in_use[name_key] = 1 # Return the new name with the original capitalization of the given name. name = "%s_%d" % (name, i - 1) return name def get_name_scope(self): """Returns the current name scope. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.compat.v1.get_default_graph().get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ return self._name_stack @tf_contextlib.contextmanager def _colocate_with_for_gradient(self, op, gradient_uid, ignore_existing=False): with self.colocate_with(op, ignore_existing): if gradient_uid is not None and self._control_flow_context is not None: self._control_flow_context.EnterGradientColocation(op, gradient_uid) try: yield finally: self._control_flow_context.ExitGradientColocation(op, gradient_uid) else: yield @tf_contextlib.contextmanager def colocate_with(self, op, ignore_existing=False): """Returns a context manager that specifies an op to colocate with. Note: this function is not for public use, only for internal libraries. For example: ```python a = tf.Variable([1.0]) with g.colocate_with(a): b = tf.constant(1.0) c = tf.add(a, b) ``` `b` and `c` will always be colocated with `a`, no matter where `a` is eventually placed. **NOTE** Using a colocation scope resets any existing device constraints. If `op` is `None` then `ignore_existing` must be `True` and the new scope resets all colocation and device constraints. Args: op: The op to colocate all created ops with, or `None`. ignore_existing: If true, only applies colocation of this op within the context, rather than applying all colocation properties on the stack. If `op` is `None`, this value must be `True`. Raises: ValueError: if op is None but ignore_existing is False. Yields: A context manager that specifies the op with which to colocate newly created ops. """ if op is None and not ignore_existing: raise ValueError("Trying to reset colocation (op is None) but " "ignore_existing is not True") op = _op_to_colocate_with(op) # By default, colocate_with resets the device function stack, # since colocate_with is typically used in specific internal # library functions where colocation is intended to be "stronger" # than device functions. # # In the future, a caller may specify that device_functions win # over colocation, in which case we can add support. device_fn_tmp = self._device_function_stack self._device_function_stack = traceable_stack.TraceableStack() if ignore_existing: current_stack = self._colocation_stack self._colocation_stack = traceable_stack.TraceableStack() if op is not None: # offset refers to the stack frame used for storing code location. # We use 4, the sum of 1 to use our caller's stack frame and 3 # to jump over layers of context managers above us. self._colocation_stack.push_obj(op, offset=4) try: yield finally: # Restore device function stack self._device_function_stack = device_fn_tmp if op is not None: self._colocation_stack.pop_obj() # Reset the colocation stack if requested. if ignore_existing: self._colocation_stack = current_stack def _add_device_to_stack(self, device_name_or_function, offset=0): """Add device to stack manually, separate from a context manager.""" total_offset = 1 + offset spec = _UserDeviceSpec(device_name_or_function) self._device_function_stack.push_obj(spec, offset=total_offset) return spec @tf_contextlib.contextmanager def device(self, device_name_or_function): # pylint: disable=line-too-long """Returns a context manager that specifies the default device to use. The `device_name_or_function` argument may either be a device name string, a device function, or None: * If it is a device name string, all operations constructed in this context will be assigned to the device with that name, unless overridden by a nested `device()` context. * If it is a function, it will be treated as a function from Operation objects to device name strings, and invoked each time a new Operation is created. The Operation will be assigned to the device with the returned name. * If it is None, all `device()` invocations from the enclosing context will be ignored. For information about the valid syntax of device name strings, see the documentation in [`DeviceNameUtils`](https://www.tensorflow.org/code/tensorflow/core/util/device_name_utils.h). For example: ```python with g.device('/device:GPU:0'): # All operations constructed in this context will be placed # on GPU 0. with g.device(None): # All operations constructed in this context will have no # assigned device. # Defines a function from `Operation` to device string. def matmul_on_gpu(n): if n.type == "MatMul": return "/device:GPU:0" else: return "/cpu:0" with g.device(matmul_on_gpu): # All operations of type "MatMul" constructed in this context # will be placed on GPU 0; all other operations will be placed # on CPU 0. ``` **N.B.** The device scope may be overridden by op wrappers or other library code. For example, a variable assignment op `v.assign()` must be colocated with the `tf.Variable` `v`, and incompatible device scopes will be ignored. Args: device_name_or_function: The device name or function to use in the context. Yields: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If device scopes are not properly nested. """ self._add_device_to_stack(device_name_or_function, offset=2) old_top_of_stack = self._device_function_stack.peek_top_obj() try: yield finally: new_top_of_stack = self._device_function_stack.peek_top_obj() if old_top_of_stack is not new_top_of_stack: raise RuntimeError("Exiting device scope without proper scope nesting.") self._device_function_stack.pop_obj() def _apply_device_functions(self, op): """Applies the current device function stack to the given operation.""" # Apply any device functions in LIFO order, so that the most recently # pushed function has the first chance to apply a device to the op. # We apply here because the result can depend on the Operation's # signature, which is computed in the Operation constructor. # pylint: disable=protected-access prior_device_string = None for device_spec in self._device_function_stack.peek_objs(): if device_spec.is_null_merge: continue if device_spec.function is None: break device_string = device_spec.string_merge(op) # Take advantage of the fact that None is a singleton and Python interns # strings, since identity checks are faster than equality checks. if device_string is not prior_device_string: op._set_device_from_string(device_string) prior_device_string = device_string op._device_code_locations = self._snapshot_device_function_stack_metadata() # pylint: enable=protected-access # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def container(self, container_name): """Returns a context manager that specifies the resource container to use. Stateful operations, such as variables and queues, can maintain their states on devices so that they can be shared by multiple processes. A resource container is a string name under which these stateful operations are tracked. These resources can be released or cleared with `tf.Session.reset()`. For example: ```python with g.container('experiment0'): # All stateful Operations constructed in this context will be placed # in resource container "experiment0". v1 = tf.Variable([1.0]) v2 = tf.Variable([2.0]) with g.container("experiment1"): # All stateful Operations constructed in this context will be # placed in resource container "experiment1". v3 = tf.Variable([3.0]) q1 = tf.queue.FIFOQueue(10, tf.float32) # All stateful Operations constructed in this context will be # be created in the "experiment0". v4 = tf.Variable([4.0]) q1 = tf.queue.FIFOQueue(20, tf.float32) with g.container(""): # All stateful Operations constructed in this context will be # be placed in the default resource container. v5 = tf.Variable([5.0]) q3 = tf.queue.FIFOQueue(30, tf.float32) # Resets container "experiment0", after which the state of v1, v2, v4, q1 # will become undefined (such as uninitialized). tf.Session.reset(target, ["experiment0"]) ``` Args: container_name: container name string. Returns: A context manager for defining resource containers for stateful ops, yields the container name. """ original_container = self._container self._container = container_name try: yield self._container finally: self._container = original_container # pylint: enable=g-doc-return-or-yield class _ControlDependenciesController(object): """Context manager for `control_dependencies()`.""" def __init__(self, graph, control_inputs): """Create a new `_ControlDependenciesController`. A `_ControlDependenciesController` is the context manager for `with tf.control_dependencies()` blocks. These normally nest, as described in the documentation for `control_dependencies()`. The `control_inputs` argument list control dependencies that must be added to the current set of control dependencies. Because of uniquification the set can be empty even if the caller passed a list of ops. The special value `None` indicates that we want to start a new empty set of control dependencies instead of extending the current set. In that case we also clear the current control flow context, which is an additional mechanism to add control dependencies. Args: graph: The graph that this controller is managing. control_inputs: List of ops to use as control inputs in addition to the current control dependencies. None to indicate that the dependencies should be cleared. """ self._graph = graph if control_inputs is None: self._control_inputs_val = [] self._new_stack = True else: self._control_inputs_val = control_inputs self._new_stack = False self._seen_nodes = set() self._old_stack = None self._old_control_flow_context = None # pylint: disable=protected-access def __enter__(self): if self._new_stack: # Clear the control_dependencies graph. self._old_stack = self._graph._control_dependencies_stack self._graph._control_dependencies_stack = [] # Clear the control_flow_context too. self._old_control_flow_context = self._graph._get_control_flow_context() self._graph._set_control_flow_context(None) self._graph._push_control_dependencies_controller(self) def __exit__(self, unused_type, unused_value, unused_traceback): self._graph._pop_control_dependencies_controller(self) if self._new_stack: self._graph._control_dependencies_stack = self._old_stack self._graph._set_control_flow_context(self._old_control_flow_context) # pylint: enable=protected-access @property def control_inputs(self): return self._control_inputs_val def add_op(self, op): self._seen_nodes.add(op) def op_in_group(self, op): return op in self._seen_nodes def _push_control_dependencies_controller(self, controller): self._control_dependencies_stack.append(controller) def _pop_control_dependencies_controller(self, controller): assert self._control_dependencies_stack[-1] is controller self._control_dependencies_stack.pop() def _current_control_dependencies(self): ret = set() for controller in self._control_dependencies_stack: for op in controller.control_inputs: ret.add(op) return ret def _control_dependencies_for_inputs(self, input_ops): """For an op that takes `input_ops` as inputs, compute control inputs. The returned control dependencies should yield an execution that is equivalent to adding all control inputs in self._control_dependencies_stack to a newly created op. However, this function attempts to prune the returned control dependencies by observing that nodes created within the same `with control_dependencies(...):` block may have data dependencies that make the explicit approach redundant. Args: input_ops: The data input ops for an op to be created. Returns: A list of control inputs for the op to be created. """ ret = [] for controller in self._control_dependencies_stack: # If any of the input_ops already depends on the inputs from controller, # we say that the new op is dominated (by that input), and we therefore # do not need to add control dependencies for this controller's inputs. dominated = False for op in input_ops: if controller.op_in_group(op): dominated = True break if not dominated: # Don't add a control input if we already have a data dependency on i. # NOTE(mrry): We do not currently track transitive data dependencies, # so we may add redundant control inputs. ret.extend([c for c in controller.control_inputs if c not in input_ops]) return ret def _record_op_seen_by_control_dependencies(self, op): """Record that the given op depends on all registered control dependencies. Args: op: An Operation. """ for controller in self._control_dependencies_stack: controller.add_op(op) def control_dependencies(self, control_inputs): """Returns a context manager that specifies control dependencies. Use with the `with` keyword to specify that all operations constructed within the context should have control dependencies on `control_inputs`. For example: ```python with g.control_dependencies([a, b, c]): # `d` and `e` will only run after `a`, `b`, and `c` have executed. d = ... e = ... ``` Multiple calls to `control_dependencies()` can be nested, and in that case a new `Operation` will have control dependencies on the union of `control_inputs` from all active contexts. ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `a`, `b`, `c`, and `d`. ``` You can pass None to clear the control dependencies: ```python with g.control_dependencies([a, b]): # Ops constructed here run after `a` and `b`. with g.control_dependencies(None): # Ops constructed here run normally, not waiting for either `a` or `b`. with g.control_dependencies([c, d]): # Ops constructed here run after `c` and `d`, also not waiting # for either `a` or `b`. ``` *N.B.* The control dependencies context applies *only* to ops that are constructed within the context. Merely using an op or tensor in the context does not add a control dependency. The following example illustrates this point: ```python # WRONG def my_func(pred, tensor): t = tf.matmul(tensor, tensor) with tf.control_dependencies([pred]): # The matmul op is created outside the context, so no control # dependency will be added. return t # RIGHT def my_func(pred, tensor): with tf.control_dependencies([pred]): # The matmul op is created in the context, so a control dependency # will be added. return tf.matmul(tensor, tensor) ``` Also note that though execution of ops created under this scope will trigger execution of the dependencies, the ops created under this scope might still be pruned from a normal tensorflow graph. For example, in the following snippet of code the dependencies are never executed: ```python loss = model.loss() with tf.control_dependencies(dependencies): loss = loss + tf.constant(1) # note: dependencies ignored in the # backward pass return tf.gradients(loss, model.variables) ``` This is because evaluating the gradient graph does not require evaluating the constant(1) op created in the forward pass. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. Returns: A context manager that specifies control dependencies for all operations constructed within the context. Raises: TypeError: If `control_inputs` is not a list of `Operation` or `Tensor` objects. """ if control_inputs is None: return self._ControlDependenciesController(self, None) # First convert the inputs to ops, and deduplicate them. # NOTE(mrry): Other than deduplication, we do not currently track direct # or indirect dependencies between control_inputs, which may result in # redundant control inputs. control_ops = [] current = self._current_control_dependencies() for c in control_inputs: # The hasattr(handle) is designed to match ResourceVariables. This is so # control dependencies on a variable or on an unread variable don't # trigger reads. if (isinstance(c, IndexedSlices) or (hasattr(c, "_handle") and hasattr(c, "op"))): c = c.op c = self.as_graph_element(c) if isinstance(c, Tensor): c = c.op elif not isinstance(c, Operation): raise TypeError("Control input must be Operation or Tensor: %s" % c) if c not in current: control_ops.append(c) current.add(c) return self._ControlDependenciesController(self, control_ops) # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _attr_scope(self, attr_map): """EXPERIMENTAL: A context manager for setting attributes on operators. This context manager can be used to add additional attributes to operators within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # No extra attributes with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=False)}): f_2 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": tf.attr_value_pb2.AttrValue(b=True)}): f_3 = Foo() # Additional attribute _a=False with g._attr_scope({"_a": None}): f_4 = Foo() # No additional attributes. Args: attr_map: A dictionary mapping attr name strings to AttrValue protocol buffers or None. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If attr_map is not a dictionary mapping strings to AttrValue protobufs. """ if not isinstance(attr_map, dict): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers") # The saved_attrs dictionary stores any currently-set labels that # will be overridden by this context manager. saved_attrs = {} # Install the given attribute for name, attr in attr_map.items(): if not (isinstance(name, six.string_types) and (isinstance(attr, (type(None), attr_value_pb2.AttrValue)) or callable(attr))): raise TypeError("attr_map must be a dictionary mapping " "strings to AttrValue protocol buffers or " "callables that emit AttrValue protocol buffers") try: saved_attrs[name] = self._attr_scope_map[name] except KeyError: pass if attr is None: del self._attr_scope_map[name] else: self._attr_scope_map[name] = attr try: yield # The code within the context runs here. finally: # Remove the attributes set for this context, and restore any saved # attributes. for name, attr in attr_map.items(): try: self._attr_scope_map[name] = saved_attrs[name] except KeyError: del self._attr_scope_map[name] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def _kernel_label_map(self, op_to_kernel_label_map): """EXPERIMENTAL: A context manager for setting kernel labels. This context manager can be used to select particular implementations of kernels within the scope of the context. For example: with ops.Graph().as_default() as g: f_1 = Foo() # Uses the default registered kernel for the Foo op. with g.kernel_label_map({"Foo": "v_2"}): f_2 = Foo() # Uses the registered kernel with label "v_2" # for the Foo op. with g.kernel_label_map({"Foo": "v_3"}): f_3 = Foo() # Uses the registered kernel with label "v_3" # for the Foo op. with g.kernel_label_map({"Foo": ""}): f_4 = Foo() # Uses the default registered kernel # for the Foo op. Args: op_to_kernel_label_map: A dictionary mapping op type strings to kernel label strings. Returns: A context manager that sets the kernel label to be used for one or more ops created in that context. Raises: TypeError: If op_to_kernel_label_map is not a dictionary mapping strings to strings. """ if not isinstance(op_to_kernel_label_map, dict): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") # The saved_labels dictionary stores any currently-set labels that # will be overridden by this context manager. saved_labels = {} # Install the given label for op_type, label in op_to_kernel_label_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(label, six.string_types)): raise TypeError("op_to_kernel_label_map must be a dictionary mapping " "strings to strings") try: saved_labels[op_type] = self._op_to_kernel_label_map[op_type] except KeyError: pass self._op_to_kernel_label_map[op_type] = label try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, label in op_to_kernel_label_map.items(): try: self._op_to_kernel_label_map[op_type] = saved_labels[op_type] except KeyError: del self._op_to_kernel_label_map[op_type] # pylint: enable=g-doc-return-or-yield # pylint: disable=g-doc-return-or-yield @tf_contextlib.contextmanager def gradient_override_map(self, op_type_map): """EXPERIMENTAL: A context manager for overriding gradient functions. This context manager can be used to override the gradient function that will be used for ops within the scope of the context. For example: ```python @tf.RegisterGradient("CustomSquare") def _custom_square_grad(op, grad): # ... with tf.Graph().as_default() as g: c = tf.constant(5.0) s_1 = tf.square(c) # Uses the default gradient for tf.square. with g.gradient_override_map({"Square": "CustomSquare"}): s_2 = tf.square(s_2) # Uses _custom_square_grad to compute the # gradient of s_2. ``` Args: op_type_map: A dictionary mapping op type strings to alternative op type strings. Returns: A context manager that sets the alternative op type to be used for one or more ops created in that context. Raises: TypeError: If `op_type_map` is not a dictionary mapping strings to strings. """ if not isinstance(op_type_map, dict): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") # The saved_mappings dictionary stores any currently-set mappings that # will be overridden by this context manager. saved_mappings = {} # Install the given label for op_type, mapped_op_type in op_type_map.items(): if not (isinstance(op_type, six.string_types) and isinstance(mapped_op_type, six.string_types)): raise TypeError("op_type_map must be a dictionary mapping " "strings to strings") try: saved_mappings[op_type] = self._gradient_override_map[op_type] except KeyError: pass self._gradient_override_map[op_type] = mapped_op_type try: yield # The code within the context runs here. finally: # Remove the labels set for this context, and restore any saved labels. for op_type, mapped_op_type in op_type_map.items(): try: self._gradient_override_map[op_type] = saved_mappings[op_type] except KeyError: del self._gradient_override_map[op_type] # pylint: enable=g-doc-return-or-yield def prevent_feeding(self, tensor): """Marks the given `tensor` as unfeedable in this graph.""" self._unfeedable_tensors.add(tensor) def is_feedable(self, tensor): """Returns `True` if and only if `tensor` is feedable.""" return tensor not in self._unfeedable_tensors def prevent_fetching(self, op): """Marks the given `op` as unfetchable in this graph.""" self._unfetchable_ops.add(op) def is_fetchable(self, tensor_or_op): """Returns `True` if and only if `tensor_or_op` is fetchable.""" if isinstance(tensor_or_op, Tensor): return tensor_or_op.op not in self._unfetchable_ops else: return tensor_or_op not in self._unfetchable_ops def switch_to_thread_local(self): """Make device, colocation and dependencies stacks thread-local. Device, colocation and dependencies stacks are not thread-local be default. If multiple threads access them, then the state is shared. This means that one thread may affect the behavior of another thread. After this method is called, the stacks become thread-local. If multiple threads access them, then the state is not shared. Each thread uses its own value; a thread doesn't affect other threads by mutating such a stack. The initial value for every thread's stack is set to the current value of the stack when `switch_to_thread_local()` was first called. """ if not self._stack_state_is_thread_local: self._stack_state_is_thread_local = True @property def _device_function_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where device_function_stack doesn't yet # exist. # pylint: disable=protected-access if not hasattr(self._thread_local, "_device_function_stack"): stack_copy_for_this_thread = self._graph_device_function_stack.copy() self._thread_local._device_function_stack = stack_copy_for_this_thread return self._thread_local._device_function_stack # pylint: enable=protected-access else: return self._graph_device_function_stack @property def _device_functions_outer_to_inner(self): user_device_specs = self._device_function_stack.peek_objs() device_functions = [spec.function for spec in user_device_specs] device_functions_outer_to_inner = list(reversed(device_functions)) return device_functions_outer_to_inner def _snapshot_device_function_stack_metadata(self): """Return device function stack as a list of TraceableObjects. Returns: [traceable_stack.TraceableObject, ...] where each TraceableObject's .obj member is a displayable name for the user's argument to Graph.device, and the filename and lineno members point to the code location where Graph.device was called directly or indirectly by the user. """ snapshot = [] for obj in self._device_function_stack.peek_traceable_objs(): obj_copy = obj.copy_metadata() obj_copy.obj = obj.obj.display_name snapshot.append(obj_copy) return snapshot @_device_function_stack.setter def _device_function_stack(self, device_function_stack): if self._stack_state_is_thread_local: # pylint: disable=protected-access self._thread_local._device_function_stack = device_function_stack # pylint: enable=protected-access else: self._graph_device_function_stack = device_function_stack @property def _colocation_stack(self): """Return thread-local copy of colocation stack.""" if self._stack_state_is_thread_local: # This may be called from a thread where colocation_stack doesn't yet # exist. # pylint: disable=protected-access if not hasattr(self._thread_local, "_colocation_stack"): stack_copy_for_this_thread = self._graph_colocation_stack.copy() self._thread_local._colocation_stack = stack_copy_for_this_thread return self._thread_local._colocation_stack # pylint: enable=protected-access else: return self._graph_colocation_stack def _snapshot_colocation_stack_metadata(self): """Return colocation stack metadata as a dictionary.""" return { traceable_obj.obj.name: traceable_obj.copy_metadata() for traceable_obj in self._colocation_stack.peek_traceable_objs() } @_colocation_stack.setter def _colocation_stack(self, colocation_stack): if self._stack_state_is_thread_local: # pylint: disable=protected-access self._thread_local._colocation_stack = colocation_stack # pylint: enable=protected-access else: self._graph_colocation_stack = colocation_stack @property def _control_dependencies_stack(self): if self._stack_state_is_thread_local: # This may be called from a thread where control_dependencies_stack # doesn't yet exist. if not hasattr(self._thread_local, "_control_dependencies_stack"): self._thread_local._control_dependencies_stack = ( self._graph_control_dependencies_stack[:]) return self._thread_local._control_dependencies_stack else: return self._graph_control_dependencies_stack @_control_dependencies_stack.setter def _control_dependencies_stack(self, control_dependencies): if self._stack_state_is_thread_local: self._thread_local._control_dependencies_stack = control_dependencies else: self._graph_control_dependencies_stack = control_dependencies @property def _distribution_strategy_stack(self): """A stack to maintain distribution strategy context for each thread.""" if not hasattr(self._thread_local, "_distribution_strategy_stack"): self._thread_local._distribution_strategy_stack = [] # pylint: disable=protected-access return self._thread_local._distribution_strategy_stack # pylint: disable=protected-access @_distribution_strategy_stack.setter def _distribution_strategy_stack(self, _distribution_strategy_stack): self._thread_local._distribution_strategy_stack = ( # pylint: disable=protected-access _distribution_strategy_stack) @property def _global_distribute_strategy_scope(self): """For implementing `tf.distribute.set_strategy()`.""" if not hasattr(self._thread_local, "distribute_strategy_scope"): self._thread_local.distribute_strategy_scope = None return self._thread_local.distribute_strategy_scope @_global_distribute_strategy_scope.setter def _global_distribute_strategy_scope(self, distribute_strategy_scope): self._thread_local.distribute_strategy_scope = (distribute_strategy_scope) @property def _auto_cast_variable_read_dtype(self): """The dtype that instances of `AutoCastVariable` will be casted to. This is None if `AutoCastVariables` should not be casted. See `AutoCastVariable` for more information. Returns: The dtype that instances of `AutoCastVariable` will be casted to. """ if not hasattr(self._thread_local, "_auto_cast_variable_read_dtype"): self._thread_local._auto_cast_variable_read_dtype = None # pylint: disable=protected-access return self._thread_local._auto_cast_variable_read_dtype # pylint: disable=protected-access @_auto_cast_variable_read_dtype.setter def _auto_cast_variable_read_dtype(self, _auto_cast_variable_read_dtype): self._thread_local._auto_cast_variable_read_dtype = ( # pylint: disable=protected-access _auto_cast_variable_read_dtype) @tf_contextlib.contextmanager def _enable_auto_casting_variables(self, dtype): """Context manager to automatically cast AutoCastVariables. If an AutoCastVariable `var` is used under this context manager, it will be casted to `dtype` before being used. See `AutoCastVariable` for more information. Args: dtype: The dtype that AutoCastVariables should be casted to. Yields: Nothing. """ prev_read_dtype = self._auto_cast_variable_read_dtype try: self._auto_cast_variable_read_dtype = dtype yield finally: self._auto_cast_variable_read_dtype = prev_read_dtype def _mutation_lock(self): """Returns a lock to guard code that creates & mutates ops. See the comment for self._group_lock for more info. """ return self._group_lock.group(_MUTATION_LOCK_GROUP) def _session_run_lock(self): """Returns a lock to guard code for Session.run. See the comment for self._group_lock for more info. """ return self._group_lock.group(_SESSION_RUN_LOCK_GROUP) # TODO(agarwal): currently device directives in an outer eager scope will not # apply to inner graph mode code. Fix that. @tf_export(v1=["device"]) def device(device_name_or_function): """Wrapper for `Graph.device()` using the default graph. See `tf.Graph.device` for more details. Args: device_name_or_function: The device name or function to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If eager execution is enabled and a function is passed in. """ if context.executing_eagerly(): # TODO(agarwal): support device functions in EAGER mode. if callable(device_name_or_function): raise RuntimeError( "tf.device does not support functions when eager execution " "is enabled.") return context.device(device_name_or_function) else: return get_default_graph().device(device_name_or_function) @tf_export("device", v1=[]) def device_v2(device_name): """Specifies the device for ops created/executed in this context. `device_name` can be fully specified, as in "/job:worker/task:1/device:cpu:0", or partially specified, containing only a subset of the "/"-separated fields. Any fields which are specified override device annotations from outer scopes. For example: ```python with tf.device('/job:foo'): # ops created here have devices with /job:foo with tf.device('/job:bar/task:0/device:gpu:2'): # ops created here have the fully specified device above with tf.device('/device:gpu:1'): # ops created here have the device '/job:foo/device:gpu:1' ``` Args: device_name: The device name to use in the context. Returns: A context manager that specifies the default device to use for newly created ops. Raises: RuntimeError: If a function is passed in. """ if callable(device_name): raise RuntimeError("tf.device does not support functions.") if context.executing_eagerly(): return context.device(device_name) else: return get_default_graph().device(device_name) @tf_export(v1=["container"]) def container(container_name): """Wrapper for `Graph.container()` using the default graph. Args: container_name: The container string to use in the context. Returns: A context manager that specifies the default container to use for newly created stateful ops. """ return get_default_graph().container(container_name) def _colocate_with_for_gradient(op, gradient_uid, ignore_existing=False): if context.executing_eagerly(): if op is not None: if not hasattr(op, "device"): op = internal_convert_to_tensor_or_indexed_slices(op) return device(op.device) else: return NullContextmanager() else: default_graph = get_default_graph() if isinstance(op, EagerTensor): if default_graph.building_function: return default_graph.device(op.device) else: raise ValueError("Encountered an Eager-defined Tensor during graph " "construction, but a function was not being built.") return default_graph._colocate_with_for_gradient( op, gradient_uid=gradient_uid, ignore_existing=ignore_existing) # Internal interface to colocate_with. colocate_with has been deprecated from # public API. There are still a few internal uses of colocate_with. Add internal # only API for those uses to avoid deprecation warning. def colocate_with(op, ignore_existing=False): return _colocate_with_for_gradient(op, None, ignore_existing=ignore_existing) @deprecation.deprecated( date=None, instructions="Colocations handled automatically by placer.") @tf_export(v1=["colocate_with"]) def _colocate_with(op, ignore_existing=False): return colocate_with(op, ignore_existing) @tf_export("control_dependencies") def control_dependencies(control_inputs): """Wrapper for `Graph.control_dependencies()` using the default graph. See `tf.Graph.control_dependencies` for more details. When eager execution is enabled, any callable object in the `control_inputs` list will be called. Args: control_inputs: A list of `Operation` or `Tensor` objects which must be executed or computed before running the operations defined in the context. Can also be `None` to clear the control dependencies. If eager execution is enabled, any callable object in the `control_inputs` list will be called. Returns: A context manager that specifies control dependencies for all operations constructed within the context. """ if context.executing_eagerly(): if control_inputs: # Excute any pending callables. for control in control_inputs: if callable(control): control() return NullContextmanager() else: return get_default_graph().control_dependencies(control_inputs) class _DefaultStack(threading.local): """A thread-local stack of objects for providing implicit defaults.""" def __init__(self): super(_DefaultStack, self).__init__() self._enforce_nesting = True self.stack = [] def get_default(self): return self.stack[-1] if len(self.stack) >= 1 else None def reset(self): self.stack = [] def is_cleared(self): return not self.stack @property def enforce_nesting(self): return self._enforce_nesting @enforce_nesting.setter def enforce_nesting(self, value): self._enforce_nesting = value @tf_contextlib.contextmanager def get_controller(self, default): """A context manager for manipulating a default stack.""" self.stack.append(default) try: yield default finally: # stack may be empty if reset() was called if self.stack: if self._enforce_nesting: if self.stack[-1] is not default: raise AssertionError( "Nesting violated for default stack of %s objects" % type(default)) self.stack.pop() else: self.stack.remove(default) _default_session_stack = _DefaultStack() # pylint: disable=protected-access def default_session(session): """Python "with" handler for defining a default session. This function provides a means of registering a session for handling Tensor.eval() and Operation.run() calls. It is primarily intended for use by session.Session, but can be used with any object that implements the Session.run() interface. Use with the "with" keyword to specify that Tensor.eval() and Operation.run() invocations within the scope of a block should be executed by a particular session. The default session applies to the current thread only, so it is always possible to inspect the call stack and determine the scope of a default session. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a "with ops.default_session(sess):" block in that thread's function. Example: The following code examples are equivalent: # 1. Using the Session object directly: sess = ... c = tf.constant(5.0) sess.run(c) # 2. Using default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) result = c.eval() # 3. Overriding default_session(): sess = ... with ops.default_session(sess): c = tf.constant(5.0) with ops.default_session(...): c.eval(session=sess) Args: session: The session to be installed as the default session. Returns: A context manager for the default session. """ return _default_session_stack.get_controller(session) @tf_export(v1=["get_default_session"]) def get_default_session(): """Returns the default session for the current thread. The returned `Session` will be the innermost session on which a `Session` or `Session.as_default()` context has been entered. NOTE: The default session is a property of the current thread. If you create a new thread, and wish to use the default session in that thread, you must explicitly add a `with sess.as_default():` in that thread's function. Returns: The default `Session` being used in the current thread. """ return _default_session_stack.get_default() def _eval_using_default_session(tensors, feed_dict, graph, session=None): """Uses the default session to evaluate one or more tensors. Args: tensors: A single Tensor, or a list of Tensor objects. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which the tensors are defined. session: (Optional) A different session to use to evaluate "tensors". Returns: Either a single numpy ndarray if "tensors" is a single tensor; or a list of numpy ndarrays that each correspond to the respective element in "tensors". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot evaluate tensor using `eval()`: No default " "session is registered. Use `with " "sess.as_default()` or pass an explicit session to " "`eval(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to evaluate tensor: " "the tensor's graph is different from the session's " "graph. Pass an explicit session to " "`eval(session=sess)`.") else: if session.graph is not graph: raise ValueError("Cannot use the given session to evaluate tensor: " "the tensor's graph is different from the session's " "graph.") return session.run(tensors, feed_dict) def _run_using_default_session(operation, feed_dict, graph, session=None): """Uses the default session to run "operation". Args: operation: The Operation to be run. feed_dict: A dictionary that maps Tensor objects (or tensor names) to lists, numpy ndarrays, TensorProtos, or strings. graph: The graph in which "operation" is defined. session: (Optional) A different session to use to run "operation". Raises: ValueError: If no default session is available; the default session does not have "graph" as its graph; or if "session" is specified, and it does not have "graph" as its graph. """ if session is None: session = get_default_session() if session is None: raise ValueError("Cannot execute operation using `run()`: No default " "session is registered. Use `with " "sess.as_default():` or pass an explicit session to " "`run(session=sess)`") if session.graph is not graph: raise ValueError("Cannot use the default session to execute operation: " "the operation's graph is different from the " "session's graph. Pass an explicit session to " "run(session=sess).") else: if session.graph is not graph: raise ValueError("Cannot use the given session to execute operation: " "the operation's graph is different from the session's " "graph.") session.run(operation, feed_dict) class _DefaultGraphStack(_DefaultStack): # pylint: disable=protected-access """A thread-local stack of objects for providing an implicit default graph.""" def __init__(self): super(_DefaultGraphStack, self).__init__() self._global_default_graph = None def get_default(self): """Override that returns a global default if the stack is empty.""" ret = super(_DefaultGraphStack, self).get_default() if ret is None: ret = self._GetGlobalDefaultGraph() return ret def _GetGlobalDefaultGraph(self): if self._global_default_graph is None: # TODO(mrry): Perhaps log that the default graph is being used, or set # provide some other feedback to prevent confusion when a mixture of # the global default graph and an explicit graph are combined in the # same process. self._global_default_graph = Graph() return self._global_default_graph def reset(self): super(_DefaultGraphStack, self).reset() self._global_default_graph = None @tf_contextlib.contextmanager def get_controller(self, default): context.context().context_switches.push(default.building_function, default.as_default, default._device_function_stack) try: with super(_DefaultGraphStack, self).get_controller(default) as g, context.graph_mode(): yield g finally: # If an exception is raised here it may be hiding a related exception in # the try-block (just above). context.context().context_switches.pop() _default_graph_stack = _DefaultGraphStack() # pylint: disable=g-doc-return-or-yield,line-too-long @tf_export("init_scope") @tf_contextlib.contextmanager def init_scope(): """A context manager that lifts ops out of control-flow scopes and function-building graphs. There is often a need to lift variable initialization ops out of control-flow scopes, function-building graphs, and gradient tapes. Entering an `init_scope` is a mechanism for satisfying these desiderata. In particular, entering an `init_scope` has three effects: (1) All control dependencies are cleared the moment the scope is entered; this is equivalent to entering the context manager returned from `control_dependencies(None)`, which has the side-effect of exiting control-flow scopes like `tf.cond` and `tf.while_loop`. (2) All operations that are created while the scope is active are lifted into the lowest context on the `context_stack` that is not building a graph function. Here, a context is defined as either a graph or an eager context. Every context switch, i.e., every installation of a graph as the default graph and every switch into eager mode, is logged in a thread-local stack called `context_switches`; the log entry for a context switch is popped from the stack when the context is exited. Entering an `init_scope` is equivalent to crawling up `context_switches`, finding the first context that is not building a graph function, and entering it. A caveat is that if graph mode is enabled but the default graph stack is empty, then entering an `init_scope` will simply install a fresh graph as the default one. (3) The gradient tape is paused while the scope is active. When eager execution is enabled, code inside an init_scope block runs with eager execution enabled even when defining graph functions via tf.contrib.eager.defun. For example: ```python tf.compat.v1.enable_eager_execution() @tf.contrib.eager.defun def func(): # A defun-decorated function constructs TensorFlow graphs, # it does not execute eagerly. assert not tf.executing_eagerly() with tf.init_scope(): # Initialization runs with eager execution enabled assert tf.executing_eagerly() ``` Raises: RuntimeError: if graph state is incompatible with this initialization. """ # pylint: enable=g-doc-return-or-yield,line-too-long if context.executing_eagerly(): # Fastpath. with tape.stop_recording(): yield else: # Retrieve the active name scope: entering an `init_scope` preserves # the name scope of the current context. default_graph = get_default_graph() scope = default_graph.get_name_scope() if scope and scope[-1] != "/": # Names that end with trailing slashes are treated by `name_scope` as # absolute. scope = scope + "/" innermost_nonempty_device_stack = default_graph._device_function_stack # pylint: disable=protected-access outer_context = None if not _default_graph_stack.stack: # If the default graph stack is empty, then we cannot be building a # function. Install the global graph (which, in this case, is also the # default graph) as the outer context. if default_graph.building_function: raise RuntimeError("The global graph is building a function.") outer_context = default_graph.as_default else: # Find a context that is not building a function. for stack_entry in reversed(context.context().context_switches.stack): if not innermost_nonempty_device_stack: innermost_nonempty_device_stack = stack_entry.device_stack if not stack_entry.is_building_function: outer_context = stack_entry.enter_context_fn break if outer_context is None: # As a last resort, obtain the global default graph; this graph doesn't # necessarily live on the graph stack (and hence it doesn't necessarily # live on the context stack), but it is stored in the graph stack's # encapsulating object. outer_context = _default_graph_stack._GetGlobalDefaultGraph().as_default # pylint: disable=protected-access if outer_context is None: # Sanity check; this shouldn't be triggered. raise RuntimeError("All graphs are building functions, and no " "eager context was previously active.") outer_graph = None outer_device_stack = None try: with outer_context(), name_scope(scope), control_dependencies( None), tape.stop_recording(): context_manager = NullContextmanager context_manager_input = None if not context.executing_eagerly(): # The device stack is preserved when lifting into a graph. Eager # execution doesn't implement device stacks and in particular it # doesn't support device functions, so in general it's not possible # to do the same when lifting into the eager context. outer_graph = get_default_graph() outer_device_stack = outer_graph._device_function_stack # pylint: disable=protected-access outer_graph._device_function_stack = innermost_nonempty_device_stack # pylint: disable=protected-access elif innermost_nonempty_device_stack is not None: for device_spec in innermost_nonempty_device_stack.peek_objs(): if device_spec.function is None: break if device_spec.raw_string: context_manager = context.device context_manager_input = device_spec.raw_string break # It is currently not possible to have a device function in V2, # but in V1 we are unable to apply device functions in eager mode. # This means that we will silently skip some of the entries on the # device stack in V1 + eager mode. with context_manager(context_manager_input): yield finally: # If an exception is raised here it may be hiding a related exception in # try-block (just above). if outer_graph is not None: outer_graph._device_function_stack = outer_device_stack # pylint: disable=protected-access def executing_eagerly_outside_functions(): """Returns True if executing eagerly, even if inside a graph function.""" # Fastpath for when this is called eagerly (its not necessary to init_scope). if context.executing_eagerly(): return True with init_scope(): return context.executing_eagerly() def inside_function(): return get_default_graph().building_function @tf_export(v1=["enable_eager_execution"]) def enable_eager_execution(config=None, device_policy=None, execution_mode=None): """Enables eager execution for the lifetime of this program. Eager execution provides an imperative interface to TensorFlow. With eager execution enabled, TensorFlow functions execute operations immediately (as opposed to adding to a graph to be executed later in a `tf.compat.v1.Session`) and return concrete values (as opposed to symbolic references to a node in a computational graph). For example: ```python tf.compat.v1.enable_eager_execution() # After eager execution is enabled, operations are executed as they are # defined and Tensor objects hold concrete values, which can be accessed as # numpy.ndarray`s through the numpy() method. assert tf.multiply(6, 7).numpy() == 42 ``` Eager execution cannot be enabled after TensorFlow APIs have been used to create or execute graphs. It is typically recommended to invoke this function at program startup and not in a library (as most libraries should be usable both with and without eager execution). Args: config: (Optional.) A `tf.compat.v1.ConfigProto` to use to configure the environment in which operations are executed. Note that `tf.compat.v1.ConfigProto` is also used to configure graph execution (via `tf.compat.v1.Session`) and many options within `tf.compat.v1.ConfigProto` are not implemented (or are irrelevant) when eager execution is enabled. device_policy: (Optional.) Policy controlling how operations requiring inputs on a specific device (e.g., a GPU 0) handle inputs on a different device (e.g. GPU 1 or CPU). When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.DEVICE_PLACEMENT_EXPLICIT: raises an error if the placement is not correct. - tf.contrib.eager.DEVICE_PLACEMENT_WARN: copies the tensors which are not on the right device but logs a warning. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT: silently copies the tensors. Note that this may hide performance problems as there is no notification provided when operations are blocked on the tensor being copied between devices. - tf.contrib.eager.DEVICE_PLACEMENT_SILENT_FOR_INT32: silently copies int32 tensors, raising errors on the other ones. execution_mode: (Optional.) Policy controlling how operations dispatched are actually executed. When set to None, an appropriate value will be picked automatically. The value picked may change between TensorFlow releases. Valid values: - tf.contrib.eager.SYNC: executes each operation synchronously. - tf.contrib.eager.ASYNC: executes each operation asynchronously. These operations may return "non-ready" handles. Raises: ValueError: If eager execution is enabled after creating/executing a TensorFlow graph, or if options provided conflict with a previous call to this function. """ _api_usage_gauge.get_cell().set(True) if context.default_execution_mode != context.EAGER_MODE: return enable_eager_execution_internal( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=None) @tf_export(v1=["disable_eager_execution"]) def disable_eager_execution(): """Disables eager execution. This function can only be called before any Graphs, Ops, or Tensors have been created. It can be used at the beginning of the program for complex migration projects from TensorFlow 1.x to 2.x. """ _api_usage_gauge.get_cell().set(False) context.default_execution_mode = context.GRAPH_MODE c = context.context_safe() if c is not None: c._thread_local_data.is_eager = False # pylint: disable=protected-access def enable_eager_execution_internal(config=None, device_policy=None, execution_mode=None, server_def=None): """Enables eager execution for the lifetime of this program. Most of the doc string for enable_eager_execution is relevant here as well. Args: config: See enable_eager_execution doc string device_policy: See enable_eager_execution doc string execution_mode: See enable_eager_execution doc string server_def: (Optional.) A tensorflow::ServerDef proto. Enables execution on remote devices. GrpcServers need to be started by creating an identical server_def to this, and setting the appropriate task_indexes, so that the servers can communicate. It will then be possible to execute operations on remote devices. Raises: ValueError """ if config is not None and not isinstance(config, config_pb2.ConfigProto): raise TypeError("config must be a tf.ConfigProto, but got %s" % type(config)) if device_policy not in (None, context.DEVICE_PLACEMENT_EXPLICIT, context.DEVICE_PLACEMENT_WARN, context.DEVICE_PLACEMENT_SILENT, context.DEVICE_PLACEMENT_SILENT_FOR_INT32): raise ValueError( "device_policy must be one of None, tf.contrib.eager.DEVICE_PLACEMENT_*" ) if execution_mode not in (None, context.SYNC, context.ASYNC): raise ValueError( "execution_mode must be one of None, tf.contrib.eager.SYNC, " "tf.contrib.eager.ASYNC") if context.default_execution_mode == context.GRAPH_MODE: graph_mode_has_been_used = ( _default_graph_stack._global_default_graph is not None) # pylint: disable=protected-access if graph_mode_has_been_used: raise ValueError( "tf.enable_eager_execution must be called at program startup.") context.default_execution_mode = context.EAGER_MODE # pylint: disable=protected-access if context._context is None: context._context = context.Context( config=config, device_policy=device_policy, execution_mode=execution_mode, server_def=server_def) elif ((config is not None and config is not context._context._config) or (device_policy is not None and device_policy is not context._context._device_policy) or (execution_mode is not None and execution_mode is not context._context._execution_mode)): raise ValueError( "Trying to change the options of an active eager" " execution. Context config: %s, specified config:" " %s. Context device policy: %s, specified device" " policy: %s. Context execution mode: %s, " " specified execution mode %s." % (context._context._config, config, context._context._device_policy, device_policy, context._context._execution_mode, execution_mode)) else: # We already created everything, so update the thread local data. context._context._thread_local_data.is_eager = True # Monkey patch to get rid of an unnecessary conditional since the context is # now initialized. context.context = context.context_safe def eager_run(main=None, argv=None): """Runs the program with an optional main function and argv list. The program will run with eager execution enabled. Example: ```python import tensorflow as tf # Import subject to future changes: from tensorflow.contrib.eager.python import tfe def main(_): u = tf.constant(6.0) v = tf.constant(7.0) print(u * v) if __name__ == "__main__": tfe.run() ``` Args: main: the main function to run. argv: the arguments to pass to it. """ enable_eager_execution() app.run(main, argv) @tf_export(v1=["reset_default_graph"]) def reset_default_graph(): """Clears the default graph stack and resets the global default graph. NOTE: The default graph is a property of the current thread. This function applies only to the current thread. Calling this function while a `tf.compat.v1.Session` or `tf.compat.v1.InteractiveSession` is active will result in undefined behavior. Using any previously created `tf.Operation` or `tf.Tensor` objects after calling this function will result in undefined behavior. Raises: AssertionError: If this function is called within a nested graph. """ if not _default_graph_stack.is_cleared(): raise AssertionError("Do not use tf.reset_default_graph() to clear " "nested graphs. If you need a cleared graph, " "exit the nesting and create a new graph.") _default_graph_stack.reset() @tf_export(v1=["get_default_graph"]) def get_default_graph(): """Returns the default graph for the current thread. The returned graph will be the innermost graph on which a `Graph.as_default()` context has been entered, or a global default graph if none has been explicitly created. NOTE: The default graph is a property of the current thread. If you create a new thread, and wish to use the default graph in that thread, you must explicitly add a `with g.as_default():` in that thread's function. Returns: The default `Graph` being used in the current thread. """ return _default_graph_stack.get_default() def has_default_graph(): """Returns True if there is a default graph.""" return len(_default_graph_stack.stack) >= 1 def get_name_scope(): """Returns the current name scope in the default_graph. For example: ```python with tf.name_scope('scope1'): with tf.name_scope('scope2'): print(tf.get_name_scope()) ``` would print the string `scope1/scope2`. Returns: A string representing the current name scope. """ if context.executing_eagerly(): return context.context().scope_name.rstrip("/") return get_default_graph().get_name_scope() def _assert_same_graph(original_item, item): """Fail if the 2 items are from different graphs. Args: original_item: Original item to check against. item: Item to check. Raises: ValueError: if graphs do not match. """ if original_item.graph is not item.graph: raise ValueError("%s must be from the same graph as %s." % (item, original_item)) def _get_graph_from_inputs(op_input_list, graph=None): """Returns the appropriate graph to use for the given inputs. This library method provides a consistent algorithm for choosing the graph in which an Operation should be constructed: 1. If the default graph is being used to construct a function, we use the default graph. 2. If the "graph" is specified explicitly, we validate that all of the inputs in "op_input_list" are compatible with that graph. 3. Otherwise, we attempt to select a graph from the first Operation- or Tensor-valued input in "op_input_list", and validate that all other such inputs are in the same graph. 4. If the graph was not specified and it could not be inferred from "op_input_list", we attempt to use the default graph. Args: op_input_list: A list of inputs to an operation, which may include `Tensor`, `Operation`, and other objects that may be converted to a graph element. graph: (Optional) The explicit graph to use. Raises: TypeError: If op_input_list is not a list or tuple, or if graph is not a Graph. ValueError: If a graph is explicitly passed and not all inputs are from it, or if the inputs are from multiple graphs, or we could not find a graph and there was no default graph. Returns: The appropriate graph to use for the given inputs. """ if get_default_graph().building_function: return get_default_graph() op_input_list = tuple(op_input_list) # Handle generators correctly if graph and not isinstance(graph, Graph): raise TypeError("Input graph needs to be a Graph: %s" % graph) # 1. We validate that all of the inputs are from the same graph. This is # either the supplied graph parameter, or the first one selected from one # the graph-element-valued inputs. In the latter case, we hold onto # that input in original_graph_element so we can provide a more # informative error if a mismatch is found. original_graph_element = None for op_input in op_input_list: # Determine if this is a valid graph_element. # TODO(josh11b): Note that we exclude subclasses of Tensor. Need to clean this # up. graph_element = None if (isinstance(op_input, (Operation, _TensorLike)) and ((not isinstance(op_input, Tensor)) or type(op_input) == Tensor)): # pylint: disable=unidiomatic-typecheck graph_element = op_input else: graph_element = _as_graph_element(op_input) if graph_element is not None: if not graph: original_graph_element = graph_element graph = graph_element.graph elif original_graph_element is not None: _assert_same_graph(original_graph_element, graph_element) elif graph_element.graph is not graph: raise ValueError("%s is not from the passed-in graph." % graph_element) # 2. If all else fails, we use the default graph, which is always there. return graph or get_default_graph() @tf_export(v1=["GraphKeys"]) class GraphKeys(object): """Standard names to use for graph collections. The standard library uses various well-known names to collect and retrieve values associated with a graph. For example, the `tf.Optimizer` subclasses default to optimizing the variables collected under `tf.GraphKeys.TRAINABLE_VARIABLES` if none is specified, but it is also possible to pass an explicit list of variables. The following standard keys are defined: * `GLOBAL_VARIABLES`: the default collection of `Variable` objects, shared across distributed environment (model variables are subset of these). See `tf.compat.v1.global_variables` for more details. Commonly, all `TRAINABLE_VARIABLES` variables will be in `MODEL_VARIABLES`, and all `MODEL_VARIABLES` variables will be in `GLOBAL_VARIABLES`. * `LOCAL_VARIABLES`: the subset of `Variable` objects that are local to each machine. Usually used for temporarily variables, like counters. Note: use `tf.contrib.framework.local_variable` to add to this collection. * `MODEL_VARIABLES`: the subset of `Variable` objects that are used in the model for inference (feed forward). Note: use `tf.contrib.framework.model_variable` to add to this collection. * `TRAINABLE_VARIABLES`: the subset of `Variable` objects that will be trained by an optimizer. See `tf.compat.v1.trainable_variables` for more details. * `SUMMARIES`: the summary `Tensor` objects that have been created in the graph. See `tf.compat.v1.summary.merge_all` for more details. * `QUEUE_RUNNERS`: the `QueueRunner` objects that are used to produce input for a computation. See `tf.compat.v1.train.start_queue_runners` for more details. * `MOVING_AVERAGE_VARIABLES`: the subset of `Variable` objects that will also keep moving averages. See `tf.compat.v1.moving_average_variables` for more details. * `REGULARIZATION_LOSSES`: regularization losses collected during graph construction. The following standard keys are _defined_, but their collections are **not** automatically populated as many of the others are: * `WEIGHTS` * `BIASES` * `ACTIVATIONS` """ # Key to collect Variable objects that are global (shared across machines). # Default collection for all variables, except local ones. GLOBAL_VARIABLES = "variables" # Key to collect local variables that are local to the machine and are not # saved/restored. LOCAL_VARIABLES = "local_variables" # Key to collect local variables which are used to accumulate interal state # to be used in tf.metrics.*. METRIC_VARIABLES = "metric_variables" # Key to collect model variables defined by layers. MODEL_VARIABLES = "model_variables" # Key to collect Variable objects that will be trained by the # optimizers. TRAINABLE_VARIABLES = "trainable_variables" # Key to collect summaries. SUMMARIES = "summaries" # Key to collect QueueRunners. QUEUE_RUNNERS = "queue_runners" # Key to collect table initializers. TABLE_INITIALIZERS = "table_initializer" # Key to collect asset filepaths. An asset represents an external resource # like a vocabulary file. ASSET_FILEPATHS = "asset_filepaths" # Key to collect Variable objects that keep moving averages. MOVING_AVERAGE_VARIABLES = "moving_average_variables" # Key to collect regularization losses at graph construction. REGULARIZATION_LOSSES = "regularization_losses" # Key to collect concatenated sharded variables. CONCATENATED_VARIABLES = "concatenated_variables" # Key to collect savers. SAVERS = "savers" # Key to collect weights WEIGHTS = "weights" # Key to collect biases BIASES = "biases" # Key to collect activations ACTIVATIONS = "activations" # Key to collect update_ops UPDATE_OPS = "update_ops" # Key to collect losses LOSSES = "losses" # Key to collect BaseSaverBuilder.SaveableObject instances for checkpointing. SAVEABLE_OBJECTS = "saveable_objects" # Key to collect all shared resources used by the graph which need to be # initialized once per cluster. RESOURCES = "resources" # Key to collect all shared resources used in this graph which need to be # initialized once per session. LOCAL_RESOURCES = "local_resources" # Trainable resource-style variables. TRAINABLE_RESOURCE_VARIABLES = "trainable_resource_variables" # Key to indicate various ops. INIT_OP = "init_op" LOCAL_INIT_OP = "local_init_op" READY_OP = "ready_op" READY_FOR_LOCAL_INIT_OP = "ready_for_local_init_op" SUMMARY_OP = "summary_op" GLOBAL_STEP = "global_step" # Used to count the number of evaluations performed during a single evaluation # run. EVAL_STEP = "eval_step" TRAIN_OP = "train_op" # Key for control flow context. COND_CONTEXT = "cond_context" WHILE_CONTEXT = "while_context" # Used to store v2 summary names. _SUMMARY_COLLECTION = "_SUMMARY_V2" # List of all collections that keep track of variables. _VARIABLE_COLLECTIONS = [ GLOBAL_VARIABLES, LOCAL_VARIABLES, METRIC_VARIABLES, MODEL_VARIABLES, TRAINABLE_VARIABLES, MOVING_AVERAGE_VARIABLES, CONCATENATED_VARIABLES, TRAINABLE_RESOURCE_VARIABLES, ] # Key for streaming model ports. # NOTE(yuanbyu): internal and experimental. _STREAMING_MODEL_PORTS = "streaming_model_ports" @decorator_utils.classproperty @deprecation.deprecated(None, "Use `tf.GraphKeys.GLOBAL_VARIABLES` instead.") def VARIABLES(cls): # pylint: disable=no-self-argument return cls.GLOBAL_VARIABLES def dismantle_graph(graph): """Cleans up reference cycles from a `Graph`. Helpful for making sure the garbage collector doesn't need to run after a temporary `Graph` is no longer needed. Args: graph: A `Graph` object to destroy. Neither it nor any of its ops are usable after this function runs. """ memory.dismantle_ordered_dict(graph._functions) # pylint: disable=protected-access # Now clean up Operation<->Graph reference cycles by clearing all of the # attributes for the Graph and its ops. graph_operations = graph.get_operations() for op in graph_operations: op.__dict__ = {} graph.__dict__ = {} @tf_export(v1=["add_to_collection"]) def add_to_collection(name, value): """Wrapper for `Graph.add_to_collection()` using the default graph. See `tf.Graph.add_to_collection` for more details. Args: name: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. value: The value to add to the collection. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility """ get_default_graph().add_to_collection(name, value) @tf_export(v1=["add_to_collections"]) def add_to_collections(names, value): """Wrapper for `Graph.add_to_collections()` using the default graph. See `tf.Graph.add_to_collections` for more details. Args: names: The key for the collections. The `GraphKeys` class contains many standard names for collections. value: The value to add to the collections. @compatibility(eager) Collections are only supported in eager when variables are created inside an EagerVariableStore (e.g. as part of a layer or template). @end_compatibility """ get_default_graph().add_to_collections(names, value) @tf_export(v1=["get_collection_ref"]) def get_collection_ref(key): """Wrapper for `Graph.get_collection_ref()` using the default graph. See `tf.Graph.get_collection_ref` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. Note that this returns the collection list itself, which can be modified in place to change the collection. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection_ref(key) @tf_export(v1=["get_collection"]) def get_collection(key, scope=None): """Wrapper for `Graph.get_collection()` using the default graph. See `tf.Graph.get_collection` for more details. Args: key: The key for the collection. For example, the `GraphKeys` class contains many standard names for collections. scope: (Optional.) If supplied, the resulting list is filtered to include only items whose `name` attribute matches using `re.match`. Items without a `name` attribute are never returned if a scope is supplied and the choice or `re.match` means that a `scope` without special tokens filters by prefix. Returns: The list of values in the collection with the given `name`, or an empty list if no value has been added to that collection. The list contains the values in the order under which they were collected. @compatibility(eager) Collections are not supported when eager execution is enabled. @end_compatibility """ return get_default_graph().get_collection(key, scope) def get_all_collection_keys(): """Returns a list of collections used in the default graph.""" return get_default_graph().get_all_collection_keys() name_scope_cache = {} # Named like a function for backwards compatibility with the # @tf_contextlib.contextmanager version, which was switched to a class to avoid # some object creation overhead. @tf_export(v1=["name_scope"]) class name_scope(object): # pylint: disable=invalid-name """A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see `tf.Graph.name_scope` for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` """ @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. Raises: TypeError: if `default_name` is passed in but not a string. """ if not (default_name is None or isinstance(default_name, six.string_types)): raise TypeError( "`default_name` type (%s) is not a string type. You likely meant to " "pass this into the `values` kwarg." % type(default_name)) self._name = default_name if name is None else name self._default_name = default_name self._values = values self._ctx = context.context() self._in_eager_mode = self._ctx.executing_eagerly() self._has_symbolic_input_in_eager = False if self._values and self._in_eager_mode: # The presence of a graph tensor in `self._values` overrides the context. for value in self._values: if hasattr(value, "graph"): self._has_symbolic_input_in_eager = True self._name_scope = value.graph.name_scope(self._name) def __enter__(self): """Start the scope block. Returns: The scope name. Raises: ValueError: if neither `name` nor `default_name` is provided but `values` are. """ if self._has_symbolic_input_in_eager: return self._name_scope.__enter__() if self._in_eager_mode: self._old_name = self._ctx.scope_name if not self._name: scope_name = "" else: cache_key = self._name, self._old_name, self._default_name if cache_key in name_scope_cache: self._ctx.scope_name = name_scope_cache[cache_key] return self._ctx.scope_name elif self._name[-1] == "/": # A trailing slash breaks out of nested name scopes, indicating a # fully specified scope name, for compatibility with Graph.name_scope. scope_name = self._name else: name_with_trailing_slash = self._name + "/" scope_name = ( self._old_name + name_with_trailing_slash if self._old_name else name_with_trailing_slash) name_scope_cache[cache_key] = scope_name self._ctx.scope_name = scope_name return scope_name else: if self._name is None and self._values is not None: # We only raise an error if values is not None (provided) because # currently tf.name_scope(None) (values=None then) is sometimes used as # an idiom to reset to top scope. raise ValueError( "At least one of name (%s) and default_name (%s) must be provided." % (self._name, self._default_name)) if self._values is None: self._values = [] g = _get_graph_from_inputs(self._values) self._g_manager = g.as_default() self._g_manager.__enter__() try: self._name_scope = g.name_scope(self._name) return self._name_scope.__enter__() except: self._g_manager.__exit__(*sys.exc_info()) raise def __exit__(self, type_arg, value_arg, traceback_arg): if self._has_symbolic_input_in_eager: self._name_scope.__exit__(type_arg, value_arg, traceback_arg) elif self._in_eager_mode: self._ctx.scope_name = self._old_name else: self._name_scope.__exit__(type_arg, value_arg, traceback_arg) self._g_manager.__exit__(type_arg, value_arg, traceback_arg) return False # False values do not suppress exceptions @tf_export("name_scope", v1=[]) class name_scope_v2(name_scope): """A context manager for use when defining a Python op. This context manager pushes a name scope, which will make the name of all operations added within it have a prefix. For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope("MyOp") as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` When executed, the Tensors `a`, `b`, `c`, will have names `MyOp/a`, `MyOp/b`, and `MyOp/c`. If the scope name already exists, the name will be made unique by appending `_n`. For example, calling `my_op` the second time will generate `MyOp_1/a`, etc. """ def __init__(self, name): """Initialize the context manager. Args: name: The prefix to use on all names created within the name scope. Raises: ValueError: If name is None, or not a string. """ if name is None or not isinstance(name, six.string_types): raise ValueError("name for name_scope must be a string.") super(name_scope_v2, self).__init__(name=None, default_name=name) def strip_name_scope(name, export_scope): """Removes name scope from a name. Args: name: A `string` name. export_scope: Optional `string`. Name scope to remove. Returns: Name with name scope removed, or the original name if export_scope is None. """ if export_scope: if export_scope[-1] == "/": export_scope = export_scope[:-1] try: # Strips export_scope/, export_scope///, # ^export_scope/, loc:@export_scope/. str_to_replace = r"([\^]|loc:@|^)" + export_scope + r"[\/]+(.*)" return re.sub(str_to_replace, r"\1\2", compat.as_str(name), count=1) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name def prepend_name_scope(name, import_scope): """Prepends name scope to a name. Args: name: A `string` name. import_scope: Optional `string`. Name scope to add. Returns: Name with name scope added, or the original name if import_scope is None. """ if import_scope: if import_scope[-1] == "/": import_scope = import_scope[:-1] try: str_to_replace = r"([\^]|loc:@|^)(.*)" return re.sub(str_to_replace, r"\1" + import_scope + r"/\2", compat.as_str(name)) except TypeError as e: # If the name is not of a type we can process, simply return it. logging.warning(e) return name else: return name # pylint: disable=g-doc-return-or-yield # pylint: disable=not-context-manager @tf_export(v1=["op_scope"]) @tf_contextlib.contextmanager def op_scope(values, name, default_name=None): """DEPRECATED. Same as name_scope above, just different argument order.""" logging.warn("tf.op_scope(values, name, default_name) is deprecated," " use tf.name_scope(name, default_name, values)") with name_scope(name, default_name=default_name, values=values) as scope: yield scope _proto_function_registry = registry.Registry("proto functions") def register_proto_function(collection_name, proto_type=None, to_proto=None, from_proto=None): """Registers `to_proto` and `from_proto` functions for collection_name. `to_proto` function converts a Python object to the corresponding protocol buffer, and returns the protocol buffer. `from_proto` function converts protocol buffer into a Python object, and returns the object.. Args: collection_name: Name of the collection. proto_type: Protobuf type, such as `saver_pb2.SaverDef`, `variable_pb2.VariableDef`, `queue_runner_pb2.QueueRunnerDef`.. to_proto: Function that implements Python object to protobuf conversion. from_proto: Function that implements protobuf to Python object conversion. """ if to_proto and not callable(to_proto): raise TypeError("to_proto must be callable.") if from_proto and not callable(from_proto): raise TypeError("from_proto must be callable.") _proto_function_registry.register((proto_type, to_proto, from_proto), collection_name) def get_collection_proto_type(collection_name): """Returns the proto_type for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[0] except LookupError: return None def get_to_proto_function(collection_name): """Returns the to_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[1] except LookupError: return None def get_from_proto_function(collection_name): """Returns the from_proto function for collection_name.""" try: return _proto_function_registry.lookup(collection_name)[2] except LookupError: return None def _operation_conversion_error(op, dtype=None, name=None, as_ref=False): """Produce a nice error if someone converts an Operation to a Tensor.""" raise TypeError(("Can't convert Operation '%s' to Tensor " "(target dtype=%r, name=%r, as_ref=%r)") % (op.name, dtype, name, as_ref)) def _op_to_colocate_with(v): """Operation object corresponding to v to use for colocation constraints.""" if v is None: return None if isinstance(v, Operation): return v # We always want to colocate with the reference op. # When 'v' is a ResourceVariable, the reference op is the handle creating op. # # What this should be is: # if isinstance(v, ResourceVariable): # return v.handle.op # However, that would require a circular import dependency. # As of October 2018, there were attempts underway to remove # colocation constraints altogether. Assuming that will # happen soon, perhaps this hack to work around the circular # import dependency is acceptable. if hasattr(v, "handle") and hasattr(v.handle, "op") and isinstance( v.handle.op, Operation): return v.handle.op return internal_convert_to_tensor_or_indexed_slices(v, as_ref=True).op def _is_keras_symbolic_tensor(x): return hasattr(x, "graph") and getattr(x.graph, "name", None) == "keras_graph" register_tensor_conversion_function(Operation, _operation_conversion_error)
apache-2.0
4,550,424,314,189,283,000
35.490944
116
0.666058
false
3.969494
false
false
false
andrew-blais/VirtualMachine
model.py
1
23863
#!/usr/bin/python2.7 # Copyright 2014 by Andrew L. Blais. # This program is distributed under the terms of the # GNU General Public License version 3. from constants import Range8, Egnar8, Egnar16, listToList, \ listToLists, listsToList, RangeMEM from random import randint from thread import start_new_thread #from _thread import start_new_thread #import threading class model: # ===== Load =================================================================== def loadA(self): self.setMessage("load A") listToList(self.DATABUS, self.A) self.paintView() def loadB(self): self.setMessage("load B") listToList(self.DATABUS, self.B) self.updateALU() self.paintView() def loadC(self): self.setMessage("load C") listToList(self.DATABUS, self.C) self.updateALU() self.paintView() def loadD(self): self.setMessage("load D") listToList(self.DATABUS, self.D) self.paintView() def loadM1(self): self.setMessage("load M1") listToList(self.DATABUS, self.M1) self.paintView() def loadM2(self): self.setMessage("load M2") listToList(self.DATABUS, self.M2) self.paintView() def loadX(self): self.setMessage("load X") listToList(self.DATABUS, self.X) self.paintView() def loadY(self): self.setMessage("load Y") listToList(self.DATABUS, self.Y) self.paintView() def loadJ1(self): self.setMessage("load J1") listToList(self.DATABUS, self.J1) self.paintView() def loadJ2(self): self.setMessage("load J2") listToList(self.DATABUS, self.J2) self.paintView() def loadInst(self): self.setMessage("load Inst") listToList(self.DATABUS, self.Inst) self.paintView() def loadXY(self): self.setMessage("load XY") listToLists(self.X, self.Y, self.ADDRESSBUS) self.paintView() def loadPC(self): self.setMessage("load PC") listToLists(self.PC1, self.PC2, self.ADDRESSBUS) self.paintView() def loadINC(self): self.setMessage("load INC") listToList(self.IncUnit1, self.Inc1) listToList(self.IncUnit2, self.Inc2) self.paintView() # ===== Select ================================================================= def selectA(self): self.setMessage("select A") listToList(self.A, self.DATABUS) self.paintView() def selectB(self): self.setMessage("select B") listToList(self.B, self.DATABUS) self.paintView() def selectC(self): self.setMessage("select C") listToList(self.C, self.DATABUS) self.paintView() def selectD(self): self.setMessage("select D") listToList(self.D, self.DATABUS) self.paintView() def selectM1(self): self.setMessage("select M1") listToList(self.M1, self.DATABUS) self.paintView() def selectM2(self): self.setMessage("select M2") listToList(self.M2, self.DATABUS) self.paintView() def selectX(self): self.setMessage("select X") listToList(self.X, self.DATABUS) self.paintView() def selectY(self): self.setMessage("select Y") listToList(self.Y, self.DATABUS) self.paintView() def selectM(self): self.setMessage("select M") listsToList(self.M1, self.M2, self.ADDRESSBUS) self.updateIncrUnit() self.paintView() def selectXY(self): self.setMessage("select XY") listsToList(self.X, self.Y, self.ADDRESSBUS) self.updateIncrUnit() self.paintView() def selectJ(self): self.setMessage("select J") listsToList(self.J1, self.J2, self.ADDRESSBUS) self.updateIncrUnit() self.paintView() def selectPC(self): self.setMessage("select PC") listsToList(self.PC1, self.PC2, self.ADDRESSBUS) self.updateIncrUnit() self.paintView() def selectINC(self): self.setMessage("select INC") listsToList(self.Inc1, self.Inc2, self.ADDRESSBUS) self.updateIncrUnit() self.paintView() # ===== ALU ==================================================================== def setFUNCTION(self, f): self.oldFUNCTION = self.FUNCTION[:] listToList(f, self.FUNCTION) self.updateALU() def updateF0(self): listToList(self.FUNCTION, self.oldFUNCTION) self.FUNCTION[0] = (0 if self.FUNCTION[0] == 1 else 1) self.updateALU() self.paintView() def updateF1(self): listToList(self.FUNCTION, self.oldFUNCTION) self.FUNCTION[1] = (0 if self.FUNCTION[1] == 1 else 1) self.updateALU() self.paintView() def updateF2(self): listToList(self.FUNCTION, self.oldFUNCTION) self.FUNCTION[2] = (0 if self.FUNCTION[2] == 1 else 1) self.updateALU() self.paintView() # ===== Mathematics ============================================================ def getSum(self, k, b, c): return int(((not k) and (not b) and c) or \ ((not k) and b and (not c)) or \ (k and (not b) and (not c)) or \ (k and b and c)) def getCarry(self, k, b, c): return int(((not k) and b and c ) or \ (k and (not b) and c) or \ (k and b and (not c)) or \ (k and b and c)) def addBandC(self): self.ADDcarry = 0 for i in Egnar8: b = self.B[i] c = self.C[i] self.ADD[i] = self.getSum(self.ADDcarry, b, c) self.ADDcarry = self.getCarry(self.ADDcarry, b, c) def incB(self): self.INCcarry = 1 for i in Egnar8: b = self.B[i] self.INC[i] = self.getSum(self.INCcarry, b, 0) self.INCcarry = self.getCarry(self.INCcarry, b, 0) def shlB(self): x = self.B[:] x = x[1:] + [x[0]] listToList(x, self.SHL) # ===== Update ================================================================= def updateALU(self): self.updateFunctions() self.updateDatabus() self.updateStates() def updateFunctions(self): self.addBandC() self.incB() self.shlB() for i in Range8: b = self.B[i] c = self.C[i] self.AND[i] = int(b and c) self.OR[i] = int(b or c) self.NOT[i] = (0 if b == 1 else 1) self.XOR[i] = int(b ^ c) def updateDatabus(self): f = tuple(self.FUNCTION) F = self.functionLabelsDictionary[f] listToList(F, self.DATABUS) # Sets DATABUS relative to current function # as linked in functionLabelsDictionary. def updateStates(self): self.setCarryState() self.setZeroState() self.setSignState() def setCarryState(self): self.CARRY = int(self.ADDcarry == 1 or self.INCcarry == 1) def setZeroState(self): self.ZERO = int(self.DATABUS == [0,0,0,0,0,0,0,0]) def setSignState(self): self.SIGN = int(self.DATABUS[0] == 1) # ===== BUSES ================================================================== def setADDRESSBUSpart(self, i): self.ADDRESSBUS[i] = (1 if self.ADDRESSBUS[i] == 0 else 0) self.updateIncrUnit() self.paintView() def setDATABUSwhole(self, x): listToList(x, self.DATABUS) def setDATABUSpart(self, i): self.DATABUS[i] = (1 if self.DATABUS[i] == 0 else 0) self.paintView() # ===== Increment Unit ========================================================= def updateIncrUnit(self): Cy = 1 x = [0]*16 for i in Egnar16: A = self.ADDRESSBUS[i] x[i] = self.getSum(Cy, A, 0) Cy = self.getCarry(Cy, A, 0) listToList(x[0:8], self.IncUnit1) listToList(x[8:16], self.IncUnit2) # ===== Memory ================================================================= def increment(self, A): Cy = 1 R = [0] * len(A) # Since this is little endian, a reversed list is needed for # the for loop. L = list( range( len(A) ) ) L.reverse() for i in L: R[i] = self.getSum(Cy, A[i], 0) Cy = self.getCarry(Cy, A[i], 0) return R def mkMemory(self): A = [0]*15 R = {} for unused_i in RangeMEM: R.update({tuple(A) : [0,0,0,0,0,0,0,0]}) A = self.increment(A) return R def getMEMORY(self): return self.MEMORY[tuple(self.MEMORYADDRESS)] def addressbusToMemoryAddress(self): listToList(self.ADDRESSBUS[1:], self.MEMORYADDRESS) self.paintView() self.setMessage("Address bus to memory address: BusToMem") def readMemoryToDatabus(self): listToList(self.MEMORY[tuple(self.MEMORYADDRESS)], self.DATABUS) self.paintView() self.setMessage("Write memory to databus: WRITE MEM") def writeDatabusToMemory(self): listToList(self.DATABUS, self.MEMORY[tuple(self.MEMORYADDRESS)]) self.paintView() self.setMessage("Write databus to memory: READ MEM") def CLEARMEM(self): self.setMessage("Clear Memory: start") A = [0]*15 for unused_i in RangeMEM: listToList([0,0,0,0,0,0,0,0], self.MEMORY[tuple(A)]) A = self.increment(A) self.paintView() self.setMessage("Clear Memory: end") def clearMemory(self): start_new_thread( self.CLEARMEM, () ) def RANDMEM(self): self.setMessage("Random Memory: start") A = [0]*15 for unused_i in RangeMEM: r = [ randint(0,1) for unused_i in range(8) ] listToList(r, self.MEMORY[tuple(A)]) A = self.increment(A) self.paintView() self.setMessage("Random Memory: end") def randomMemory(self): start_new_thread( self.RANDMEM, () ) def loadPGMtoMEM(self, filename): try: pgmFile = open(filename, 'r') for LINE in pgmFile: LINE = LINE.split() Address = [ int(i) for i in LINE[0]] Code = [ int(i) for i in LINE[1]] listToList(Code, self.MEMORY[tuple(Address[1:])]) pgmFile.close() fn = filename.split('/') self.setMessage("Loaded " + fn[len(fn) - 1] + " to MEMORY") self.paintView() except IOError: self.setMessage("File IO Error") # ===== CALLBACKS ============================================================== def setPaintCallback(self, cb): self.paintView = cb def setMessageCallback(self, tcb): self.setMessage = tcb # ===== Fetch, Increment & Execute ============================================= def FETCH(self): self.setMessage("<<< FETCH >>>") self.selectPC() self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadInst() def INCREMENT(self): self.setMessage("<<< INCREMENT >>>") self.loadINC() self.selectINC() self.loadPC() def MOVfunction(self): self.setMessage("MOVE") if self.Inst[2:5] == self.Inst[5:8]: self.setDATABUSwhole([0,0,0,0,0,0,0,0]) self.setMessage("D = S: set to [0,0,0,0,0,0,0,0]") else: self.setMessage("Moving stuff: ") self.regSelectMap[tuple(self.Inst[5:8])]() self.regLoadMap[tuple(self.Inst[2:5])]() def SETABfunction(self): self.setMessage("SETABfunction") p = [1,1,1] if self.Inst[3] == 1 else [0,0,0] # Since the negative numbers are represented by "two's # complement" the first three digits will be either 0s or # 1s depending on whether the number is positive, zero or # negative. This fixes that. self.setDATABUSwhole(p + self.Inst[3:8]) if self.Inst[2] == 0: self.loadA() else: self.loadB() self.setMessage(str(p + self.Inst[3:8])) def ALUfunction(self): self.setMessage("ALU function: " + str(self.Inst[5:8])) self.setFUNCTION(self.Inst[5:8]) if self.Inst[4] == 0: self.loadA() else: self.loadD() def LOADfunction(self): self.setMessage("LOADfunction") self.selectM() self.addressbusToMemoryAddress() self.readMemoryToDatabus() if self.Inst[6:8] == [0,0]: self.loadA() else: if self.Inst[6:8] == [0,1]: self.loadB() else: if self.Inst[6:8] == [1,0]: self.loadC() else: self.loadD() def STOREfunction(self): self.setMessage("STOREfunction") self.selectM() if self.Inst[6:8] == [0,0]: self.selectA() else: if self.Inst[6:8] == [0,1]: self.selectB() else: if self.Inst[6:8] == [1,0]: self.selectC() else: self.selectD() self.addressbusToMemoryAddress() self.writeDatabusToMemory() def RET_MOV16function(self): self.setMessage("RETURN / MOVE 16 bits: " + str(self.Inst)) RUN = True if self.Inst[5:7] == [1,1]: self.setMessage("HALT ") # Set PC to zero................................ listToList([0,0,0,0,0,0,0,0], self.PC1) listToList([0,0,0,0,0,0,0,0], self.PC2) RUN = False else: self.setMessage("MOV16") if self.Inst[4] == 0: # d is XY if self.Inst[5:7] == [0,0]: self.selectM() if self.Inst[5:7] == [0,1]: # What would Harry's machine do? self.selectXY() if self.Inst[5:7] == [1,0]: self.selectJ() self.loadXY() else: # d is PC if self.Inst[5:7] == [0,0]: self.selectM() if self.Inst[5:7] == [0,1]: self.selectXY() if self.Inst[5:7] == [1,0]: self.selectJ() self.loadPC() return RUN def INCfunction(self): self.setMessage("INC: XY > XY + 1") self.selectXY() self.loadINC() self.selectINC() self.loadXY() def SETMfunction(self): self.setMessage("SETMfunction: Move next 16 bits to M") self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadM1() self.loadINC() self.selectINC() self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadM2() self.loadINC() self.selectINC() self.loadPC() def GOTOfunction(self): self.setMessage("GOTOfunction: set address bus, PC, to next 16 bits") self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ1() self.loadINC() self.selectINC() self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ2() self.selectJ() self.loadPC() def CALLfunction(self): self.setMessage("CALLfunction: set address bus to next 16 bits & PC => XY") # CALLfunction is like GOTOfunction except that the address of the next instruction # after CALLfunction is saved in XY. self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ1() self.loadINC() self.selectINC() self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ2() self.loadINC() self.selectINC() self.loadXY() self.selectJ() self.loadPC() def BCfunction(self): self.setMessage("Branch Conditionally") C0 = (self.Inst[3] == 1) and (self.SIGN == 1) C1 = (self.Inst[4] == 1) and (self.CARRY == 0) C2 = (self.Inst[5] == 1) and (self.ZERO == 1) C3 = (self.Inst[6] == 1) and (self.ZERO == 0) c0 = " S1+ " if self.Inst[3] == 1 else " S1- " c1 = "Cy0+ " if self.Inst[4] == 1 else "Cy0- " c2 = " Z1+ " if self.Inst[5] == 1 else " Z1- " c3 = " Z0+ " if self.Inst[6] == 1 else " Z0- " a0 = "S=1" if self.SIGN == 1 else "S=0" a1 = "Cy=0" if self.CARRY == 0 else "Cy=1" a2 = "Z=1" if self.ZERO == 1 else "Z=0" a3 = "Z=0" if self.ZERO == 0 else "Z=1" m0 = c0 + " " + a0 + "\n" m1 = c1 + " " + a1 + "\n" m2 = c2 + " " + a2 + "\n" m3 = c3 + " " + a3 M = m0 + m1 + m2 + m3 self.setMessage(M) if C0 or C1 or C2 or C3: self.setMessage("Branch") self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ1() self.loadINC() self.selectINC() self.addressbusToMemoryAddress() self.readMemoryToDatabus() self.loadJ2() self.selectJ() self.loadPC() else: self.setMessage("No Branch") self.loadINC() self.selectINC() self.loadINC() self.selectINC() self.loadPC() def EXECUTE(self): self.setMessage("<<< EXECUTE >>>") RUN = True if self.Inst[0] == 0: if self.Inst[1] == 0: self.MOVfunction() else: self.SETABfunction() else: if self.Inst[1] == 0: if self.Inst[2] == 0: if self.Inst[3] == 0: self.ALUfunction() else: if self.Inst[4] == 0: self.LOADfunction() else: self.STOREfunction() else: if self.Inst[3] == 0: RUN = self.RET_MOV16function() else: self.INCfunction() else: if self.Inst[2] == 0: self.SETMfunction() else: if self.Inst[5:7] == [1,1]: if self.Inst[7] == 0: self.GOTOfunction() else: self.CALLfunction() else: self.BCfunction() self.setMessage("*"*50) return RUN # ===== RUN & CONTROLS ========================================================= def step(self): self.PAUSE = False def pause(self): while self.PAUSE == True: pass self.PAUSE = True def noStep(self): self.NOSTEP = True if self.NOSTEP == False else False self.paintView() def FetchIncrementExecute(self): self.PAUSE = True self.RUN = True while self.RUN == True: self.FETCH() self.INCREMENT() self.RUN = self.EXECUTE() if self.RUN == True and self.NOSTEP == False: self.pause() # Make time to inspect the machine.... self.setMessage("="*50) def run(self): start_new_thread( self.FetchIncrementExecute, () ) # self.T = threading.Thread(target = self.FetchIncrementExecute ) # TL = threading.Lock() # TL.acquire() # self.T.start() # TL.release() # ===== Initialization ========================================================= def __init__(self): self.DATABUS = [0,0,0,0,0,0,0,0] self.ADDRESSBUS = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0] self.MEMORYADDRESS = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0 ] # Memory addresses are 15 bits, but the address # bus is 16 bits. So, the highest bit is dropped. self.MEMORY = self.mkMemory() self.Inst = [0,0,0,0,0,0,0,0] self.PGM = [] self.FUNCTION = [0,0,0] self.oldFUNCTION = [0,0,0] self.CARRY = 0 self.ADDcarry = 0 self.INCcarry = 0 self.SIGN = 0 self.ZERO = 0 self.PAUSE = True self.NOSTEP = False self.RUN = True self.A = [0,0,0,0,0,0,0,0] self.B = [0,0,0,0,0,0,0,0] self.C = [0,0,0,0,0,0,0,0] self.D = [0,0,0,0,0,0,0,0] self.M1 = [0,0,0,0,0,0,0,0] self.M2 = [0,0,0,0,0,0,0,0] self.X = [0,0,0,0,0,0,0,0] self.Y = [0,0,0,0,0,0,0,0] self.J1 = [0,0,0,0,0,0,0,0] self.J2 = [0,0,0,0,0,0,0,0] # Program Counter self.PC1 = [0,0,0,0,0,0,0,0] self.PC2 = [0,0,0,0,0,0,0,0] # Increment Unit # This is always the address bus plus one. self.IncUnit1 = [0,0,0,0,0,0,0,0] self.IncUnit2 = [0,0,0,0,0,0,0,0] # IncUnit is loaded into Inc # Inc is selected onto the address bus self.Inc1 = [0,0,0,0,0,0,0,0] self.Inc2 = [0,0,0,0,0,0,0,0] self.ADD = [0,0,0,0,0,0,0,0] self.INC = [0,0,0,0,0,0,0,0] self.AND = [0,0,0,0,0,0,0,0] self.OR = [0,0,0,0,0,0,0,0] self.XOR = [0,0,0,0,0,0,0,0] self.NOT = [0,0,0,0,0,0,0,0] self.SHL = [0,0,0,0,0,0,0,0] self.CLR = [0,0,0,0,0,0,0,0] # ===== Dictionaries =========================================================== self.functionLabelsDictionary = { (0,0,0) : self.ADD, \ (0,0,1) : self.INC, \ (0,1,0) : self.AND, \ (0,1,1) : self.OR, \ (1,0,0) : self.XOR, \ (1,0,1) : self.NOT, \ (1,1,0) : self.SHL, \ (1,1,1) : self.CLR \ } self.regLoadMap = { (0,0,0) : self.loadA, \ (0,0,1) : self.loadB, \ (0,1,0) : self.loadC, \ (0,1,1) : self.loadD, \ (1,0,0) : self.loadM1, \ (1,0,1) : self.loadM2, \ (1,1,0) : self.loadX, \ (1,1,1) : self.loadY } self.regSelectMap = { (0,0,0) : self.selectA, \ (0,0,1) : self.selectB, \ (0,1,0) : self.selectC, \ (0,1,1) : self.selectD, \ (1,0,0) : self.selectM1, \ (1,0,1) : self.selectM2, \ (1,1,0) : self.selectX, \ (1,1,1) : self.selectY } # ============================================================================== self.updateALU() self.updateIncrUnit() # ===== END Initialization =====================================================
gpl-3.0
2,728,928,531,485,198,300
30.112125
92
0.471315
false
3.480601
false
false
false
fernandog/Medusa
medusa/providers/torrent/html/morethantv.py
1
9151
# coding=utf-8 """Provider code for MoreThanTV.""" from __future__ import unicode_literals import logging import re import time from medusa import tv from medusa.bs4_parser import BS4Parser from medusa.helper.common import ( convert_size, try_int, ) from medusa.helper.exceptions import AuthException from medusa.logger.adapters.style import BraceAdapter from medusa.providers.torrent.torrent_provider import TorrentProvider from requests.compat import urljoin from requests.utils import dict_from_cookiejar from six.moves.urllib_parse import parse_qs log = BraceAdapter(logging.getLogger(__name__)) log.logger.addHandler(logging.NullHandler()) class MoreThanTVProvider(TorrentProvider): """MoreThanTV Torrent provider.""" def __init__(self): """Initialize the class.""" super(MoreThanTVProvider, self).__init__('MoreThanTV') # Credentials self.username = None self.password = None self._uid = None self._hash = None # URLs self.url = 'https://www.morethan.tv/' self.urls = { 'login': urljoin(self.url, 'login.php'), 'search': urljoin(self.url, 'torrents.php'), } # Proper Strings self.proper_strings = ['PROPER', 'REPACK'] # Miscellaneous Options # Torrent Stats self.minseed = None self.minleech = None # Cache self.cache = tv.Cache(self) def search(self, search_strings, age=0, ep_obj=None, **kwargs): """Search a provider and parse the results. :param search_strings: A dict with mode (key) and the search value (value) :param age: Not used :param ep_obj: Not used :returns: A list of search results (structure) """ results = [] if not self.login(): return results # Search Params search_params = { 'tags_type': 1, 'order_by': 'time', 'order_way': 'desc', 'action': 'basic', 'group_results': 0, 'searchsubmit': 1, 'searchstr': '', } for mode in search_strings: log.debug('Search mode: {0}', mode) if mode == 'Season': additional_strings = [] for search_string in search_strings[mode]: additional_strings.append(re.sub(r'(.*)S0?', r'\1Season ', search_string)) search_strings[mode].extend(additional_strings) for search_string in search_strings[mode]: if mode != 'RSS': log.debug('Search string: {search}', {'search': search_string}) search_params['searchstr'] = search_string response = self.session.get(self.urls['search'], params=search_params) if not response or not response.text: log.debug('No data returned from provider') continue results += self.parse(response.text, mode) return results def parse(self, data, mode): """ Parse search results for items. :param data: The raw response from a search :param mode: The current mode used to search, e.g. RSS :return: A list of items found """ def process_column_header(td): result = '' if td.a and td.a.img: result = td.a.img.get('title', td.a.get_text(strip=True)) if not result: result = td.get_text(strip=True) return result items = [] with BS4Parser(data, 'html5lib') as html: torrent_table = html.find('table', class_='torrent_table') torrent_rows = torrent_table('tr') if torrent_table else [] # Continue only if at least one release is found if len(torrent_rows) < 2: log.debug('Data returned from provider does not contain any torrents') return items labels = [process_column_header(label) for label in torrent_rows[0]('td')] # Skip column headers for row in torrent_rows[1:]: cells = row('td') if len(cells) < len(labels): continue try: # Skip if torrent has been nuked due to poor quality if row.find('img', alt='Nuked'): continue title = row.find('a', title='View torrent').get_text(strip=True) download_url = urljoin(self.url, row.find('span', title='Download').parent['href']) if not all([title, download_url]): continue seeders = try_int(cells[labels.index('Seeders')].get_text(strip=True).replace(',', ''), 1) leechers = try_int(cells[labels.index('Leechers')].get_text(strip=True).replace(',', '')) # Filter unseeded torrent if seeders < min(self.minseed, 1): if mode != 'RSS': log.debug("Discarding torrent because it doesn't meet the" " minimum seeders: {0}. Seeders: {1}", title, seeders) continue # If it's a season search, query the torrent's detail page. if mode == 'Season': title = self._parse_season(row, download_url, title) torrent_size = cells[labels.index('Size')].get_text(strip=True) size = convert_size(torrent_size) or -1 pubdate_raw = cells[labels.index('Time')].find('span')['title'] pubdate = self.parse_pubdate(pubdate_raw) item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': pubdate, } if mode != 'RSS': log.debug('Found result: {0} with {1} seeders and {2} leechers', title, seeders, leechers) items.append(item) except (AttributeError, TypeError, KeyError, ValueError, IndexError): log.exception('Failed parsing provider.') return items def login(self): """Login method used for logging in before doing search and torrent downloads.""" if any(dict_from_cookiejar(self.session.cookies).values()): return True login_params = { 'username': self.username, 'password': self.password, 'keeplogged': '1', 'login': 'Log in', } response = self.session.post(self.urls['login'], data=login_params) if not response or not response.text: log.warning('Unable to connect to provider') return False if re.search('Your username or password was incorrect.', response.text): log.warning('Invalid username or password. Check your settings') return False return True def _check_auth(self): if not self.username or not self.password: raise AuthException('Your authentication credentials for {0} are missing,' ' check your config.'.format(self.name)) return True def _parse_season(self, row, download_url, title): """Parse the torrent's detail page and return the season pack title.""" details_url = row.find('span').find_next(title='View torrent').get('href') torrent_id = parse_qs(download_url).get('id') if not all([details_url, torrent_id]): log.debug("Couldn't parse season pack details page for title: {0}", title) return title # Take a break before querying the provider again time.sleep(0.5) response = self.session.get(urljoin(self.url, details_url)) if not response or not response.text: log.debug("Couldn't open season pack details page for title: {0}", title) return title with BS4Parser(response.text, 'html5lib') as html: torrent_table = html.find('table', class_='torrent_table') torrent_row = torrent_table.find('tr', id='torrent_{0}'.format(torrent_id[0])) if not torrent_row: log.debug("Couldn't find season pack details for title: {0}", title) return title # Strip leading and trailing slash season_title = torrent_row.find('div', class_='filelist_path') if not season_title or not season_title.get_text(): log.debug("Couldn't parse season pack title for: {0}", title) return title return season_title.get_text(strip=True).strip('/') provider = MoreThanTVProvider()
gpl-3.0
2,425,226,955,275,046,400
34.607004
110
0.538083
false
4.435773
false
false
false
igrowing/Orchids
orchid_app/utils/__init__.py
1
2936
import re import time import pushb import cPickle import hashlib import sendmail from functools import wraps class Dict(dict): """Represent dictionary items as object attributes.""" def filter(self, *args): return Dict((k, v) for k, v in self.items() if k in args) def __getattr__(self, name): if name in self.keys(): return self[name] for key in self.keys(): if name == _namify(key): return self[key] return dict.__getattribute__(self, name) def __setattr__(self, name, value): self[name] = value def _getAttributeNames(self, *args, **kwargs): """Support auto completion of keys in iPython.""" return map(_namify, self.keys()) def _namify(key): return re.sub(r'[^\w]+', '_', key.lower()).strip('_') def dictify(obj, _dict=Dict, _list=list): if hasattr(obj, '_dictify'): obj = obj._dictify() if isinstance(obj, dict): return _dict((k, dictify(v, _dict, _list)) for k, v in obj.items()) elif hasattr(obj, '__iter__'): return _list(dictify(v, _dict, _list) for v in obj) return obj def as_key(obj): try: hash(obj) return obj except: return hashlib.md5(cPickle.dumps(obj)).hexdigest() def memoize(keep=True, cache=None): '''Decorator: provide timed keeping functions results in memory. @:param keep: Boolean or number. Boolean keep or discards the cached data. Number defines time in seconds to keep the cache with further discard of cache. @:param cache: empty dict. Separated cache names can be used if needed to keep similar function from different places. ''' if cache is None: cache = {} INF = -1 def _memoize0(func): @wraps(func) def _memoize1(*args, **kwargs): refresh = dict.pop(kwargs, '_refresh', False) timeout = dict.pop(kwargs, '_memoize', keep) timeout = INF if timeout is True else int(timeout) # Get the key name key = as_key((func, args, tuple(kwargs.items()))) if refresh: cache.pop(key, None) if timeout and key in cache: t0, v = cache.get(key) if t0 == INF or t0 >= time.time(): return v value = func(*args, **kwargs) if not timeout: cache.pop(key, None) return value t0 = INF if timeout == INF else time.time() + timeout cache[key] = (t0, value) return value return _memoize1 return _memoize0 def flatten_dict(init_dict): res_dict = {} if type(init_dict) is not dict: return res_dict for k, v in init_dict.iteritems(): if type(v) == dict: res_dict.update(flatten_dict(v)) else: res_dict[k] = v return res_dict
mit
6,515,311,847,145,798,000
27.504854
122
0.560627
false
3.878468
false
false
false
LamCiuLoeng/fd
rpac/util/layout_pdf.py
1
4038
# -*- coding: utf-8 -*- import traceback import os import random # import json import subprocess import zipfile import zlib from datetime import datetime as dt from tg import request, config from rpac.model import * __all__ = [ 'gen_pdf', 'null_string_sizes', 'format_fibers', 'format_cares', 'format_coo', 'format_list'] CARES = [ "WASH", "BLEACH", "IRON", "DRY", "DRYCLEAN", "SPECIALCARE" ] def gen_pdf(header_no, details): try: public_dir = config.get( 'public_dir' ) download_dir = os.path.join( public_dir, 'layout_pdf' ) if not os.path.exists( download_dir ): os.makedirs( download_dir ) phantomjs = os.path.join( public_dir, 'phantomjs', 'phantomjs.exe' ) labeljs = os.path.join( public_dir, 'phantomjs', 'pdf.js' ) pdfs = [] for detail_id, item_code in details: http_url = 'http://%s/pdflayout/index?id=%s' % (request.headers.get( 'Host' ), detail_id) _name = '%s_%s%d' % (trim(item_code), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) ) pdf_file = os.path.join( download_dir, '%s.pdf' % _name ) cmd = '%s %s %s %s' % (phantomjs, labeljs, http_url, pdf_file) # print cmd sp = subprocess.Popen(cmd, stdout = subprocess.PIPE, stderr = subprocess.STDOUT) while 1: if sp.poll() is not None: #print 'exec command completed.' break # else: # line = sp.stdout.readline().strip() pdfs.append(pdf_file) pd_zip_file = os.path.join( download_dir, "%s_pdf_%s%d.zip" % (trim(header_no), dt.now().strftime( "%Y%m%d%H%M%S" ), random.randint( 1, 1000 ) ) ) create_zip(pd_zip_file, pdfs) remove_files(pdfs) return pd_zip_file except: traceback.print_exc() return None def create_zip(zipf, files): _zip = zipfile.ZipFile(zipf, 'w', zlib.DEFLATED) for f in files: if os.path.exists(f): _zip.write(os.path.abspath(f), os.path.basename(f)) _zip.close() return zipf def remove_files(files): for f in files: remove_file(f) def remove_file(file): try: os.remove(file) except: pass def trim(s): return ''.join(s.split()) def null_string_sizes(data): null_list = data.get('SIZE', {'values': []})['values'] if not null_list: return [''] return null_list def format_fibers(data, capitalize=False): fibers = { 'en': [], 'sp': [] } for ff in data['FIBERS']['values']: if ff: if capitalize: fibers['en'].append('%s%% %s' % (ff['percent'], ff['english'].lower().capitalize())) fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish'].lower().capitalize())) else: fibers['en'].append('%s%% %s' % (ff['percent'], ff['english'])) fibers['sp'].append('%s%% %s' % (ff['percent'], ff['spanish'])) # print fibers return fibers def format_cares(data): cares = { 'en': [], 'sp': [] } for cs in CARES: cc = data.get(cs, {'values': []}) for c in cc['values']: # print '****', c cares['en'].append(c['english']) cares['sp'].append(c['spanish']) return cares def format_coo(data): coos = { 'en': [], 'sp': [] } for coo in data['CO']['values']: coos['en'].append(coo['english']) coos['sp'].append(coo['spanish']) return coos def format_list(ll, method=None, s=''): if method: return s.join([getattr(l, method)() for l in ll if l]) return s.join(ll) def format_list2(ll): return [l.lower().capitalize() for l in ll if l] def format_price(data): try: price = '$%.2f' % float(data['PRICE']['values'][0]) return price except: return '$0.00'
mit
5,333,141,877,359,527,000
22.614035
154
0.521545
false
3.248592
false
false
false
isstiaung/Adimal
adimal/twitter_feed/get_metadata.py
1
2472
from __future__ import absolute_import from __future__ import division, print_function, unicode_literals import requests import urllib from newspaper import Article import string from . import config from sumy.parsers.html import HtmlParser from sumy.parsers.plaintext import PlaintextParser from sumy.nlp.tokenizers import Tokenizer from sumy.summarizers.lsa import LsaSummarizer as Summarizer from sumy.nlp.stemmers import Stemmer from sumy.utils import get_stop_words from newspaper import Article high_level_mapper = {"Computers":"computer","Arts" : "art", "Business" : "business" , "Games" : "game" , "Health": "health","Home":"home","Recreation" : "recreation","Science" : "science" , "Society":"society", "Sports" : "sport"} low_level_mapper = {} LANGUAGE = "english" SENTENCES_COUNT = 3 def get_topics_and_category(link): print("link is" , link) text =get_article_text(link) print("got article text") topics=get_both_topics(text) summary=get_article_summary_using_link(link) result={} result['topics']=topics result['summary']=summary return result def get_article_text(link): article = Article(link) print("set article") article.download() print("downloaded article") article.parse() print("parsed article") text=article.text.encode("ascii","ignore") text=string.replace(text,"\n","") text=string.replace(text,"*","") print("replaced text") return text def get_both_topics(text): result = {} key=config.uclassify_token dict = {"text":text,"readKey":key} data=urllib.urlencode(dict) high_level_topic=requests.get("https://api.uclassify.com/v1/uclassify/topics/classify",params=data) response = eval(high_level_topic.text) high_level = max(response.iterkeys(),key=(lambda key : response[key])) result['high_level'] = high_level url_to_call = high_level_mapper.get(high_level) low_level_topic=requests.get("https://api.uclassify.com/v1/uclassify/" + url_to_call + "-topics/classify",params=data) response=eval(low_level_topic.text) low_level = max(response.iterkeys(),key=(lambda key : response[key])) result['low_level']=low_level return result def get_article_summary_using_link(link): stemmer = Stemmer(LANGUAGE) parser = HtmlParser.from_url(link, Tokenizer(LANGUAGE)) summarizer = Summarizer(stemmer) summarizer.stop_words = get_stop_words(LANGUAGE) summary = "" for sentence in summarizer(parser.document, SENTENCES_COUNT): summary = summary + ": " + str(sentence).decode('ascii','ignore') + "\n" return summary
mit
-8,905,665,559,059,703,000
33.830986
230
0.737864
false
3.04059
false
false
false
maheshp/novatest
nova/api/openstack/compute/contrib/volumes.py
1
22325
# Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """The volumes extension.""" import webob from webob import exc from nova.api.openstack import common from nova.api.openstack import extensions from nova.api.openstack import wsgi from nova.api.openstack import xmlutil from nova import compute from nova import exception from nova.openstack.common import log as logging from nova.openstack.common import uuidutils from nova import utils from nova import volume LOG = logging.getLogger(__name__) authorize = extensions.extension_authorizer('compute', 'volumes') authorize_attach = extensions.extension_authorizer('compute', 'volume_attachments') def _translate_volume_detail_view(context, vol): """Maps keys for volumes details view.""" d = _translate_volume_summary_view(context, vol) # No additional data / lookups at the moment return d def _translate_volume_summary_view(context, vol): """Maps keys for volumes summary view.""" d = {} d['id'] = vol['id'] d['status'] = vol['status'] d['size'] = vol['size'] d['availabilityZone'] = vol['availability_zone'] d['createdAt'] = vol['created_at'] if vol['attach_status'] == 'attached': d['attachments'] = [_translate_attachment_detail_view(vol['id'], vol['instance_uuid'], vol['mountpoint'])] else: d['attachments'] = [{}] d['displayName'] = vol['display_name'] d['displayDescription'] = vol['display_description'] if vol['volume_type_id'] and vol.get('volume_type'): d['volumeType'] = vol['volume_type']['name'] else: d['volumeType'] = vol['volume_type_id'] d['snapshotId'] = vol['snapshot_id'] LOG.audit(_("vol=%s"), vol, context=context) if vol.get('volume_metadata'): metadata = vol.get('volume_metadata') d['metadata'] = dict((item['key'], item['value']) for item in metadata) else: d['metadata'] = {} return d def make_volume(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('availabilityZone') elem.set('createdAt') elem.set('displayName') elem.set('displayDescription') elem.set('volumeType') elem.set('snapshotId') attachments = xmlutil.SubTemplateElement(elem, 'attachments') attachment = xmlutil.SubTemplateElement(attachments, 'attachment', selector='attachments') make_attachment(attachment) # Attach metadata node elem.append(common.MetadataTemplate()) class VolumeTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volume', selector='volume') make_volume(root) return xmlutil.MasterTemplate(root, 1) class VolumesTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumes') elem = xmlutil.SubTemplateElement(root, 'volume', selector='volumes') make_volume(elem) return xmlutil.MasterTemplate(root, 1) class CommonDeserializer(wsgi.MetadataXMLDeserializer): """Common deserializer to handle xml-formatted volume requests. Handles standard volume attributes as well as the optional metadata attribute """ metadata_deserializer = common.MetadataXMLDeserializer() def _extract_volume(self, node): """Marshal the volume attribute of a parsed request.""" vol = {} volume_node = self.find_first_child_named(node, 'volume') attributes = ['display_name', 'display_description', 'size', 'volume_type', 'availability_zone'] for attr in attributes: if volume_node.getAttribute(attr): vol[attr] = volume_node.getAttribute(attr) metadata_node = self.find_first_child_named(volume_node, 'metadata') if metadata_node is not None: vol['metadata'] = self.extract_metadata(metadata_node) return vol class CreateDeserializer(CommonDeserializer): """Deserializer to handle xml-formatted create volume requests. Handles standard volume attributes as well as the optional metadata attribute """ def default(self, string): """Deserialize an xml-formatted volume create request.""" dom = xmlutil.safe_minidom_parse_string(string) vol = self._extract_volume(dom) return {'body': {'volume': vol}} class VolumeController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(VolumeController, self).__init__() @wsgi.serializers(xml=VolumeTemplate) def show(self, req, id): """Return data about the given volume.""" context = req.environ['nova.context'] authorize(context) try: vol = self.volume_api.get(context, id) except exception.NotFound: raise exc.HTTPNotFound() return {'volume': _translate_volume_detail_view(context, vol)} def delete(self, req, id): """Delete a volume.""" context = req.environ['nova.context'] authorize(context) LOG.audit(_("Delete volume with id: %s"), id, context=context) try: vol = self.volume_api.get(context, id) self.volume_api.delete(context, vol) except exception.NotFound: raise exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.serializers(xml=VolumesTemplate) def index(self, req): """Returns a summary list of volumes.""" return self._items(req, entity_maker=_translate_volume_summary_view) @wsgi.serializers(xml=VolumesTemplate) def detail(self, req): """Returns a detailed list of volumes.""" return self._items(req, entity_maker=_translate_volume_detail_view) def _items(self, req, entity_maker): """Returns a list of volumes, transformed through entity_maker.""" context = req.environ['nova.context'] authorize(context) volumes = self.volume_api.get_all(context) limited_list = common.limited(volumes, req) res = [entity_maker(context, vol) for vol in limited_list] return {'volumes': res} @wsgi.serializers(xml=VolumeTemplate) @wsgi.deserializers(xml=CreateDeserializer) def create(self, req, body): """Creates a new volume.""" context = req.environ['nova.context'] authorize(context) if not self.is_valid_body(body, 'volume'): raise exc.HTTPUnprocessableEntity() vol = body['volume'] vol_type = vol.get('volume_type', None) metadata = vol.get('metadata', None) snapshot_id = vol.get('snapshot_id') if snapshot_id is not None: snapshot = self.volume_api.get_snapshot(context, snapshot_id) else: snapshot = None size = vol.get('size', None) if size is None and snapshot is not None: size = snapshot['volume_size'] LOG.audit(_("Create volume of %s GB"), size, context=context) availability_zone = vol.get('availability_zone', None) new_volume = self.volume_api.create(context, size, vol.get('display_name'), vol.get('display_description'), snapshot=snapshot, volume_type=vol_type, metadata=metadata, availability_zone=availability_zone ) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. retval = _translate_volume_detail_view(context, dict(new_volume)) result = {'volume': retval} location = '%s/%s' % (req.url, new_volume['id']) return wsgi.ResponseObject(result, headers=dict(location=location)) def _translate_attachment_detail_view(volume_id, instance_uuid, mountpoint): """Maps keys for attachment details view.""" d = _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint) # No additional data / lookups at the moment return d def _translate_attachment_summary_view(volume_id, instance_uuid, mountpoint): """Maps keys for attachment summary view.""" d = {} # NOTE(justinsb): We use the volume id as the id of the attachment object d['id'] = volume_id d['volumeId'] = volume_id d['serverId'] = instance_uuid if mountpoint: d['device'] = mountpoint return d def make_attachment(elem): elem.set('id') elem.set('serverId') elem.set('volumeId') elem.set('device') class VolumeAttachmentTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumeAttachment', selector='volumeAttachment') make_attachment(root) return xmlutil.MasterTemplate(root, 1) class VolumeAttachmentsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('volumeAttachments') elem = xmlutil.SubTemplateElement(root, 'volumeAttachment', selector='volumeAttachments') make_attachment(elem) return xmlutil.MasterTemplate(root, 1) class VolumeAttachmentController(wsgi.Controller): """The volume attachment API controller for the OpenStack API. A child resource of the server. Note that we use the volume id as the ID of the attachment (though this is not guaranteed externally) """ def __init__(self): self.compute_api = compute.API() super(VolumeAttachmentController, self).__init__() @wsgi.serializers(xml=VolumeAttachmentsTemplate) def index(self, req, server_id): """Returns the list of volume attachments for a given instance.""" context = req.environ['nova.context'] authorize_attach(context, action='index') return self._items(req, server_id, entity_maker=_translate_attachment_summary_view) @wsgi.serializers(xml=VolumeAttachmentTemplate) def show(self, req, server_id, id): """Return data about the given volume attachment.""" context = req.environ['nova.context'] authorize(context) authorize_attach(context, action='show') volume_id = id try: instance = self.compute_api.get(context, server_id) except exception.NotFound: raise exc.HTTPNotFound() bdms = self.compute_api.get_instance_bdms(context, instance) if not bdms: LOG.debug(_("Instance %s is not attached."), server_id) raise exc.HTTPNotFound() assigned_mountpoint = None for bdm in bdms: if bdm['volume_id'] == volume_id: assigned_mountpoint = bdm['device_name'] break if assigned_mountpoint is None: LOG.debug("volume_id not found") raise exc.HTTPNotFound() return {'volumeAttachment': _translate_attachment_detail_view( volume_id, instance['uuid'], assigned_mountpoint)} def _validate_volume_id(self, volume_id): if not uuidutils.is_uuid_like(volume_id): msg = _("Bad volumeId format: volumeId is " "not in proper format (%s)") % volume_id raise exc.HTTPBadRequest(explanation=msg) @wsgi.serializers(xml=VolumeAttachmentTemplate) def create(self, req, server_id, body): """Attach a volume to an instance.""" context = req.environ['nova.context'] authorize(context) authorize_attach(context, action='create') if not self.is_valid_body(body, 'volumeAttachment'): raise exc.HTTPUnprocessableEntity() volume_id = body['volumeAttachment']['volumeId'] device = body['volumeAttachment'].get('device') self._validate_volume_id(volume_id) msg = _("Attach volume %(volume_id)s to instance %(server_id)s" " at %(device)s") % locals() LOG.audit(msg, context=context) try: instance = self.compute_api.get(context, server_id) device = self.compute_api.attach_volume(context, instance, volume_id, device) except exception.NotFound: raise exc.HTTPNotFound() # The attach is async attachment = {} attachment['id'] = volume_id attachment['serverId'] = server_id attachment['volumeId'] = volume_id attachment['device'] = device # NOTE(justinsb): And now, we have a problem... # The attach is async, so there's a window in which we don't see # the attachment (until the attachment completes). We could also # get problems with concurrent requests. I think we need an # attachment state, and to write to the DB here, but that's a bigger # change. # For now, we'll probably have to rely on libraries being smart # TODO(justinsb): How do I return "accepted" here? return {'volumeAttachment': attachment} def update(self, req, server_id, id, body): """Update a volume attachment. We don't currently support this.""" raise exc.HTTPBadRequest() def delete(self, req, server_id, id): """Detach a volume from an instance.""" context = req.environ['nova.context'] authorize(context) authorize_attach(context, action='delete') volume_id = id LOG.audit(_("Detach volume %s"), volume_id, context=context) try: instance = self.compute_api.get(context, server_id) except exception.NotFound: raise exc.HTTPNotFound() bdms = self.compute_api.get_instance_bdms(context, instance) if not bdms: LOG.debug(_("Instance %s is not attached."), server_id) raise exc.HTTPNotFound() found = False for bdm in bdms: if bdm['volume_id'] == volume_id: self.compute_api.detach_volume(context, volume_id=volume_id) found = True break if not found: raise exc.HTTPNotFound() else: return webob.Response(status_int=202) def _items(self, req, server_id, entity_maker): """Returns a list of attachments, transformed through entity_maker.""" context = req.environ['nova.context'] authorize(context) try: instance = self.compute_api.get(context, server_id) except exception.NotFound: raise exc.HTTPNotFound() bdms = self.compute_api.get_instance_bdms(context, instance) limited_list = common.limited(bdms, req) results = [] for bdm in limited_list: if bdm['volume_id']: results.append(entity_maker(bdm['volume_id'], bdm['instance_uuid'], bdm['device_name'])) return {'volumeAttachments': results} def _translate_snapshot_detail_view(context, vol): """Maps keys for snapshots details view.""" d = _translate_snapshot_summary_view(context, vol) # NOTE(gagupta): No additional data / lookups at the moment return d def _translate_snapshot_summary_view(context, vol): """Maps keys for snapshots summary view.""" d = {} d['id'] = vol['id'] d['volumeId'] = vol['volume_id'] d['status'] = vol['status'] # NOTE(gagupta): We map volume_size as the snapshot size d['size'] = vol['volume_size'] d['createdAt'] = vol['created_at'] d['displayName'] = vol['display_name'] d['displayDescription'] = vol['display_description'] return d def make_snapshot(elem): elem.set('id') elem.set('status') elem.set('size') elem.set('createdAt') elem.set('displayName') elem.set('displayDescription') elem.set('volumeId') class SnapshotTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshot', selector='snapshot') make_snapshot(root) return xmlutil.MasterTemplate(root, 1) class SnapshotsTemplate(xmlutil.TemplateBuilder): def construct(self): root = xmlutil.TemplateElement('snapshots') elem = xmlutil.SubTemplateElement(root, 'snapshot', selector='snapshots') make_snapshot(elem) return xmlutil.MasterTemplate(root, 1) class SnapshotController(wsgi.Controller): """The Volumes API controller for the OpenStack API.""" def __init__(self): self.volume_api = volume.API() super(SnapshotController, self).__init__() @wsgi.serializers(xml=SnapshotTemplate) def show(self, req, id): """Return data about the given snapshot.""" context = req.environ['nova.context'] authorize(context) try: vol = self.volume_api.get_snapshot(context, id) except exception.NotFound: return exc.HTTPNotFound() return {'snapshot': _translate_snapshot_detail_view(context, vol)} def delete(self, req, id): """Delete a snapshot.""" context = req.environ['nova.context'] authorize(context) LOG.audit(_("Delete snapshot with id: %s"), id, context=context) try: snapshot = self.volume_api.get_snapshot(context, id) self.volume_api.delete_snapshot(context, snapshot) except exception.NotFound: return exc.HTTPNotFound() return webob.Response(status_int=202) @wsgi.serializers(xml=SnapshotsTemplate) def index(self, req): """Returns a summary list of snapshots.""" return self._items(req, entity_maker=_translate_snapshot_summary_view) @wsgi.serializers(xml=SnapshotsTemplate) def detail(self, req): """Returns a detailed list of snapshots.""" return self._items(req, entity_maker=_translate_snapshot_detail_view) def _items(self, req, entity_maker): """Returns a list of snapshots, transformed through entity_maker.""" context = req.environ['nova.context'] authorize(context) snapshots = self.volume_api.get_all_snapshots(context) limited_list = common.limited(snapshots, req) res = [entity_maker(context, snapshot) for snapshot in limited_list] return {'snapshots': res} @wsgi.serializers(xml=SnapshotTemplate) def create(self, req, body): """Creates a new snapshot.""" context = req.environ['nova.context'] authorize(context) if not self.is_valid_body(body, 'snapshot'): raise exc.HTTPUnprocessableEntity() snapshot = body['snapshot'] volume_id = snapshot['volume_id'] vol = self.volume_api.get(context, volume_id) force = snapshot.get('force', False) LOG.audit(_("Create snapshot from volume %s"), volume_id, context=context) if not utils.is_valid_boolstr(force): msg = _("Invalid value '%s' for force. ") % force raise exception.InvalidParameterValue(err=msg) if utils.bool_from_str(force): new_snapshot = self.volume_api.create_snapshot_force(context, vol, snapshot.get('display_name'), snapshot.get('display_description')) else: new_snapshot = self.volume_api.create_snapshot(context, vol, snapshot.get('display_name'), snapshot.get('display_description')) retval = _translate_snapshot_detail_view(context, new_snapshot) return {'snapshot': retval} class Volumes(extensions.ExtensionDescriptor): """Volumes support.""" name = "Volumes" alias = "os-volumes" namespace = "http://docs.openstack.org/compute/ext/volumes/api/v1.1" updated = "2011-03-25T00:00:00+00:00" def get_resources(self): resources = [] # NOTE(justinsb): No way to provide singular name ('volume') # Does this matter? res = extensions.ResourceExtension('os-volumes', VolumeController(), collection_actions={'detail': 'GET'}) resources.append(res) res = extensions.ResourceExtension('os-volume_attachments', VolumeAttachmentController(), parent=dict( member_name='server', collection_name='servers')) resources.append(res) res = extensions.ResourceExtension('os-volumes_boot', inherits='servers') resources.append(res) res = extensions.ResourceExtension('os-snapshots', SnapshotController(), collection_actions={'detail': 'GET'}) resources.append(res) return resources
apache-2.0
5,517,570,708,372,171,000
32.877086
79
0.598253
false
4.394685
false
false
false
PowerDNS/exabgp
lib/exabgp/reactor/__init__.py
1
13895
# encoding: utf-8 """ reactor.py Created by Thomas Mangin on 2012-06-10. Copyright (c) 2009-2013 Exa Networks. All rights reserved. """ import os import re import sys import time import signal import select from collections import deque from exabgp.reactor.daemon import Daemon from exabgp.reactor.listener import Listener from exabgp.reactor.listener import NetworkError from exabgp.reactor.api.processes import Processes from exabgp.reactor.api.processes import ProcessError from exabgp.reactor.peer import Peer from exabgp.reactor.peer import ACTION from exabgp.reactor.network.error import error from exabgp.reactor.api.decoding import Decoder from exabgp.configuration.ancient import Configuration from exabgp.configuration.environment import environment from exabgp.version import version from exabgp.logger import Logger class Reactor (object): # [hex(ord(c)) for c in os.popen('clear').read()] clear = ''.join([chr(int(c,16)) for c in ['0x1b', '0x5b', '0x48', '0x1b', '0x5b', '0x32', '0x4a']]) def __init__ (self,configurations): self.ip = environment.settings().tcp.bind self.port = environment.settings().tcp.port self.respawn = environment.settings().api.respawn self.max_loop_time = environment.settings().reactor.speed self.logger = Logger() self.daemon = Daemon(self) self.processes = None self.listener = None self.configuration = Configuration(configurations) self.decoder = Decoder() self.peers = {} self.route_update = False self._shutdown = False self._reload = False self._reload_processes = False self._restart = False self._saved_pid = False self._pending = deque() self._running = None signal.signal(signal.SIGTERM, self.sigterm) signal.signal(signal.SIGHUP, self.sighup) signal.signal(signal.SIGALRM, self.sigalrm) signal.signal(signal.SIGUSR1, self.sigusr1) signal.signal(signal.SIGUSR2, self.sigusr2) def sigterm (self,signum, frame): self.logger.reactor("SIG TERM received - shutdown") self._shutdown = True def sighup (self,signum, frame): self.logger.reactor("SIG HUP received - shutdown") self._shutdown = True def sigalrm (self,signum, frame): self.logger.reactor("SIG ALRM received - restart") self._restart = True def sigusr1 (self,signum, frame): self.logger.reactor("SIG USR1 received - reload configuration") self._reload = True def sigusr2 (self,signum, frame): self.logger.reactor("SIG USR2 received - reload configuration and processes") self._reload = True self._reload_processes = True def ready (self,ios,sleeptime=0): # never sleep a negative number of second (if the rounding is negative somewhere) # never sleep more than one second (should the clock time change during two time.time calls) sleeptime = min(max(0.0,sleeptime),1.0) if not ios: time.sleep(sleeptime) return [] try: read,_,_ = select.select(ios,[],[],sleeptime) return read except select.error,e: errno,message = e.args if not errno in error.block: raise e return [] def run (self): if self.ip: try: self.listener = Listener([self.ip,],self.port) self.listener.start() except NetworkError,e: self.listener = None if os.geteuid() != 0 and self.port <= 1024: self.logger.reactor("Can not bind to %s:%d, you may need to run ExaBGP as root" % (self.ip,self.port),'critical') else: self.logger.reactor("Can not bind to %s:%d (%s)" % (self.ip,self.port,str(e)),'critical') self.logger.reactor("unset exabgp.tcp.bind if you do not want listen for incoming connections",'critical') self.logger.reactor("and check that no other daemon is already binding to port %d" % self.port,'critical') sys.exit(1) self.logger.reactor("Listening for BGP session(s) on %s:%d" % (self.ip,self.port)) if not self.daemon.drop_privileges(): self.logger.reactor("Could not drop privileges to '%s' refusing to run as root" % self.daemon.user,'critical') self.logger.reactor("Set the environmemnt value exabgp.daemon.user to change the unprivileged user",'critical') return # This is required to make sure we can write in the log location as we now have dropped root privileges if not self.logger.restart(): self.logger.reactor("Could not setup the logger, aborting",'critical') return self.daemon.daemonise() if not self.daemon.savepid(): self.logger.reactor('could not update PID, not starting','error') # Make sure we create processes one we have dropped privileges and closed file descriptor self.processes = Processes(self) self.reload() # did we complete the run of updates caused by the last SIGUSR1/SIGUSR2 ? reload_completed = True wait = environment.settings().tcp.delay if wait: sleeptime = (wait * 60) - int(time.time()) % (wait * 60) self.logger.reactor("waiting for %d seconds before connecting" % sleeptime) time.sleep(float(sleeptime)) while True: try: while self.peers: start = time.time() end = start+self.max_loop_time if self._shutdown: self._shutdown = False self.shutdown() elif self._reload and reload_completed: self._reload = False self.reload(self._reload_processes) self._reload_processes = False elif self._restart: self._restart = False self.restart() elif self.route_update: self.route_update = False self.route_send() ios = {} keys = set(self.peers.keys()) while start < time.time() < end: for key in list(keys): peer = self.peers[key] action = peer.run() # .run() returns an ACTION enum: # * immediate if it wants to be called again # * later if it should be called again but has no work atm # * close if it is finished and is closing down, or restarting if action == ACTION.close: self.unschedule(peer) keys.discard(key) # we are loosing this peer, not point to schedule more process work elif action == ACTION.later: for io in peer.sockets(): ios[io] = key # no need to come back to it before a a full cycle keys.discard(key) if not self.schedule() and not keys: ready = self.ready(ios.keys() + self.processes.fds(),end-time.time()) for io in ready: if io in ios: keys.add(ios[io]) del ios[io] if not keys: reload_completed = True # RFC state that we MUST not send more than one KEEPALIVE / sec # And doing less could cause the session to drop if self.listener: for connection in self.listener.connected(): # found # * False, not peer found for this TCP connection # * True, peer found # * None, conflict found for this TCP connections found = False for key in self.peers: peer = self.peers[key] neighbor = peer.neighbor # XXX: FIXME: Inet can only be compared to Inet if connection.local == str(neighbor.peer_address) and connection.peer == str(neighbor.local_address): if peer.incoming(connection): found = True break found = None break if found: self.logger.reactor("accepted connection from %s - %s" % (connection.local,connection.peer)) elif found is False: self.logger.reactor("no session configured for %s - %s" % (connection.local,connection.peer)) connection.notification(6,3,'no session configured for the peer') connection.close() elif found is None: self.logger.reactor("connection refused (already connected to the peer) %s - %s" % (connection.local,connection.peer)) connection.notification(6,5,'could not accept the connection') connection.close() self.processes.terminate() self.daemon.removepid() break except KeyboardInterrupt: while True: try: self._shutdown = True self.logger.reactor("^C received") break except KeyboardInterrupt: pass except SystemExit: while True: try: self._shutdown = True self.logger.reactor("exiting") break except KeyboardInterrupt: pass except IOError: while True: try: self._shutdown = True self.logger.reactor("I/O Error received, most likely ^C during IO",'warning') break except KeyboardInterrupt: pass except ProcessError: while True: try: self._shutdown = True self.logger.reactor("Problem when sending message(s) to helper program, stopping",'error') break except KeyboardInterrupt: pass except select.error,e: while True: try: self._shutdown = True self.logger.reactor("problem using select, stopping",'error') break except KeyboardInterrupt: pass # from exabgp.leak import objgraph # print objgraph.show_most_common_types(limit=20) # import random # obj = objgraph.by_type('Route')[random.randint(0,2000)] # objgraph.show_backrefs([obj], max_depth=10) def shutdown (self): """terminate all the current BGP connections""" self.logger.reactor("Performing shutdown") if self.listener: self.listener.stop() for key in self.peers.keys(): self.peers[key].stop() def reload (self,restart=False): """reload the configuration and send to the peer the route which changed""" self.logger.reactor("Performing reload of exabgp %s" % version) reloaded = self.configuration.reload() if not reloaded: # # Careful the string below is used but the QA code to check for sucess of failure self.logger.configuration("Problem with the configuration file, no change done",'error') # Careful the string above is used but the QA code to check for sucess of failure # self.logger.configuration(self.configuration.error,'error') return for key, peer in self.peers.items(): if key not in self.configuration.neighbor: self.logger.reactor("Removing peer: %s" % peer.neighbor.name()) peer.stop() for key, neighbor in self.configuration.neighbor.items(): # new peer if key not in self.peers: self.logger.reactor("New peer setup: %s" % neighbor.name()) peer = Peer(neighbor,self) self.peers[key] = peer # modified peer elif self.peers[key].neighbor != neighbor: self.logger.reactor("Peer definition change, establishing a new connection for %s" % str(key)) self.peers[key].reestablish(neighbor) # same peer but perhaps not the routes else: # finding what route changed and sending the delta is not obvious self.logger.reactor("Peer definition identical, updating peer routes if required for %s" % str(key)) self.peers[key].reconfigure(neighbor) self.logger.configuration("Loaded new configuration successfully",'warning') # This only starts once ... self.processes.start(restart) def schedule (self): try: # read at least on message per process if there is some and parse it for service,command in self.processes.received(): self.decoder.parse_command(self,service,command) # if we have nothing to do, return or save the work if not self._running: if not self._pending: return False self._running = self._pending.popleft() # run it try: self._running.next() # run # should raise StopIteration in most case # and prevent us to have to run twice to run one command self._running.next() # run except StopIteration: self._running = None return True except StopIteration: pass except KeyboardInterrupt: self._shutdown = True self.logger.reactor("^C received",'error') def route_send (self): """the process ran and we need to figure what routes to changes""" self.logger.reactor("Performing dynamic route update") for key in self.configuration.neighbor.keys(): self.peers[key].send_new() self.logger.reactor("Updated peers dynamic routes successfully") def route_flush (self): """we just want to flush any unflushed routes""" self.logger.reactor("Performing route flush") for key in self.configuration.neighbor.keys(): self.peers[key].send_new(update=True) def restart (self): """kill the BGP session and restart it""" self.logger.reactor("Performing restart of exabgp %s" % version) self.configuration.reload() for key in self.peers.keys(): if key not in self.configuration.neighbor.keys(): neighbor = self.configuration.neighbor[key] self.logger.reactor("Removing Peer %s" % neighbor.name()) self.peers[key].stop() else: self.peers[key].reestablish() self.processes.terminate() self.processes.start() def unschedule (self,peer): key = peer.neighbor.name() if key in self.peers: del self.peers[key] def answer (self,service,string): self.processes.write(service,string) self.logger.reactor('Responding to %s : %s' % (service,string)) def api_shutdown (self): self._shutdown = True self._pending = deque() self._running = None def api_reload (self): self._reload = True self._pending = deque() self._running = None def api_restart (self): self._restart = True self._pending = deque() self._running = None @staticmethod def match_neighbor (description,name): for string in description: if re.search('(^|[\s])%s($|[\s,])' % re.escape(string), name) is None: return False return True def match_neighbors (self,descriptions): """returns the sublist of peers matching the description passed, or None if no description is given""" if not descriptions: return self.peers.keys() returned = [] for key in self.peers: for description in descriptions: if Reactor.match_neighbor(description,key): if key not in returned: returned.append(key) return returned def nexthops (self,peers): return dict((peer,self.peers[peer].neighbor.local_address) for peer in peers) def plan (self,callback): self._pending.append(callback)
bsd-3-clause
1,335,862,084,631,700,000
30.942529
126
0.684131
false
3.45389
true
false
false
eirmag/weboob
modules/bp/browser.py
1
5496
# -*- coding: utf-8 -*- # Copyright(C) 2010-2011 Nicolas Duhamel # # This file is part of weboob. # # weboob is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # weboob is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with weboob. If not, see <http://www.gnu.org/licenses/>. from urlparse import urlsplit, parse_qsl from datetime import datetime from weboob.tools.browser import BaseBrowser, BrowserIncorrectPassword, BrowserBanned from .pages import LoginPage, Initident, CheckPassword, repositionnerCheminCourant, BadLoginPage, AccountDesactivate, \ AccountList, AccountHistory, \ TransferChooseAccounts, CompleteTransfer, TransferConfirm, TransferSummary from weboob.capabilities.bank import Transfer __all__ = ['BPBrowser'] class BPBrowser(BaseBrowser): DOMAIN = 'voscomptesenligne.labanquepostale.fr' PROTOCOL = 'https' CERTHASH = '868646b852c989638d4e5bbfab830e2cfbb82f4d2524e28d0251686a44e49163' ENCODING = None # refer to the HTML encoding PAGES = {r'.*wsost/OstBrokerWeb/loginform.*' : LoginPage, r'.*authentification/repositionnerCheminCourant-identif.ea' : repositionnerCheminCourant, r'.*authentification/initialiser-identif.ea' : Initident, r'.*authentification/verifierMotDePasse-identif.ea' : CheckPassword, r'.*synthese_assurancesEtComptes/afficheSynthese-synthese\.ea' : AccountList, r'.*synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea' : AccountList, r'.*CCP/releves_ccp/releveCPP-releve_ccp\.ea' : AccountHistory, r'.*CNE/releveCNE/releveCNE-releve_cne\.ea' : AccountHistory, r'.*/virementSafran_aiguillage/init-saisieComptes\.ea' : TransferChooseAccounts, r'.*/virementSafran_aiguillage/formAiguillage-saisieComptes\.ea' : CompleteTransfer, r'.*/virementSafran_national/validerVirementNational-virementNational.ea' : TransferConfirm, r'.*/virementSafran_national/confirmerVirementNational-virementNational.ea' : TransferSummary, r'.*ost/messages\.CVS\.html\?param=0x132120c8.*' : BadLoginPage, r'.*ost/messages\.CVS\.html\?param=0x132120cb.*' : AccountDesactivate, } def __init__(self, *args, **kwargs): kwargs['parser'] = ('lxml',) BaseBrowser.__init__(self, *args, **kwargs) def home(self): self.location('https://voscomptesenligne.labanquepostale.fr/wsost/OstBrokerWeb/loginform?TAM_OP=login&' 'ERROR_CODE=0x00000000&URL=%2Fvoscomptes%2FcanalXHTML%2Fidentif.ea%3Forigin%3Dparticuliers') def is_logged(self): return not self.is_on_page(LoginPage) def login(self): if not self.is_on_page(LoginPage): self.location('https://voscomptesenligne.labanquepostale.fr/wsost/OstBrokerWeb/loginform?TAM_OP=login&' 'ERROR_CODE=0x00000000&URL=%2Fvoscomptes%2FcanalXHTML%2Fidentif.ea%3Forigin%3Dparticuliers', no_login=True) self.page.login(self.username, self.password) if self.is_on_page(BadLoginPage): raise BrowserIncorrectPassword() if self.is_on_page(AccountDesactivate): raise BrowserBanned() def get_accounts_list(self): self.location("https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/comptesCommun/synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea") return self.page.get_accounts_list() def get_account(self, id): if not self.is_on_page(AccountList): self.location("https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/comptesCommun/synthese_assurancesEtComptes/rechercheContratAssurance-synthese.ea") return self.page.get_account(id) def get_history(self, account): v = urlsplit(account._link_id) args = dict(parse_qsl(v.query)) args['typeRecherche'] = 10 self.location(self.buildurl(v.path, **args)) if not self.is_on_page(AccountHistory): return iter([]) return self.page.get_history() def make_transfer(self, from_account, to_account, amount): self.location('https://voscomptesenligne.labanquepostale.fr/voscomptes/canalXHTML/virement/virementSafran_aiguillage/init-saisieComptes.ea') self.page.set_accouts(from_account, to_account) #TODO: Check self.page.complete_transfer(amount) self.page.confirm() id_transfer = self.page.get_transfer_id() transfer = Transfer(id_transfer) transfer.amount = amount transfer.origin = from_account.label transfer.recipient = to_account.label transfer.date = datetime.now() return transfer
agpl-3.0
-1,063,427,746,391,495,000
45.184874
176
0.663937
false
3.474083
false
false
false
bitdancer/pynvm
nvm/pmemobj/list.py
1
7019
import collections import sys from .compat import recursive_repr, abc from _pmem import ffi # XXX refactor to make this import unneeded? # XXX: refactor to allocate this instead of hardcoding it. LIST_POBJPTR_ARRAY_TYPE_NUM = 30 class PersistentList(abc.MutableSequence): """Persistent version of the 'list' type.""" # XXX locking! # XXX All bookkeeping attrs should be _v_xxxx so that all other attrs # (other than __manager__) can be made persistent. def __init__(self, *args, **kw): if '__manager__' not in kw: raise ValueError("__manager__ is required") mm = self.__manager__ = kw.pop('__manager__') if '_oid' not in kw: with mm.transaction(): # XXX Will want to implement a freelist here, like CPython self._oid = mm.malloc(ffi.sizeof('PListObject')) ob = ffi.cast('PObject *', mm.direct(self._oid)) ob.ob_type = mm._get_type_code(PersistentList) else: self._oid = kw.pop('_oid') if kw: raise TypeError("Unrecognized keyword argument(s) {}".format(kw)) self._body = ffi.cast('PListObject *', mm.direct(self._oid)) if args: if len(args) != 1: raise TypeError("PersistentList takes at most 1" " argument, {} given".format(len(args))) self.extend(args[0]) # Methods and properties needed to implement the ABC required methods. @property def _size(self): return ffi.cast('PVarObject *', self._body).ob_size @property def _allocated(self): return self._body.allocated @property def _items(self): mm = self.__manager__ ob_items = mm.otuple(self._body.ob_items) if ob_items == mm.OID_NULL: return None return ffi.cast('PObjPtr *', mm.direct(ob_items)) def _resize(self, newsize): mm = self.__manager__ allocated = self._allocated # Only realloc if we don't have enough space already. if (allocated >= newsize and newsize >= allocated >> 1): assert self._items != None or newsize == 0 with mm.transaction(): ob = ffi.cast('PVarObject *', self._body) mm.snapshot_range(ffi.addressof(ob, 'ob_size'), ffi.sizeof('size_t')) ob.ob_size = newsize return # We use CPython's overallocation algorithm. new_allocated = (newsize >> 3) + (3 if newsize < 9 else 6) + newsize if newsize == 0: new_allocated = 0 items = self._items with mm.transaction(): if items is None: items = mm.malloc(new_allocated * ffi.sizeof('PObjPtr'), type_num=LIST_POBJPTR_ARRAY_TYPE_NUM) else: items = mm.realloc(self._body.ob_items, new_allocated * ffi.sizeof('PObjPtr'), LIST_POBJPTR_ARRAY_TYPE_NUM) mm.snapshot_range(self._body, ffi.sizeof('PListObject')) self._body.ob_items = items self._body.allocated = new_allocated ffi.cast('PVarObject *', self._body).ob_size = newsize def insert(self, index, value): mm = self.__manager__ size = self._size newsize = size + 1 with mm.transaction(): self._resize(newsize) if index < 0: index += size if index < 0: index = 0 if index > size: index = size items = self._items mm.snapshot_range(items + index, ffi.offsetof('PObjPtr *', newsize)) for i in range(size, index, -1): items[i] = items[i-1] v_oid = mm.persist(value) mm.incref(v_oid) items[index] = v_oid def _normalize_index(self, index): try: index = int(index) except TypeError: # Assume it is a slice # XXX fixme raise NotImplementedError("Slicing not yet implemented") if index < 0: index += self._size if index < 0 or index >= self._size: raise IndexError(index) return index def __setitem__(self, index, value): mm = self.__manager__ index = self._normalize_index(index) items = self._items with mm.transaction(): v_oid = mm.persist(value) mm.snapshot_range(ffi.addressof(items, index), ffi.sizeof('PObjPtr *')) mm.xdecref(items[index]) items[index] = v_oid mm.incref(v_oid) def __delitem__(self, index): mm = self.__manager__ index = self._normalize_index(index) size = self._size newsize = size - 1 items = self._items with mm.transaction(): mm.snapshot_range(ffi.addressof(items, index), ffi.offsetof('PObjPtr *', size)) mm.decref(items[index]) for i in range(index, newsize): items[i] = items[i+1] self._resize(newsize) def __getitem__(self, index): index = self._normalize_index(index) items = self._items return self.__manager__.resurrect(items[index]) def __len__(self): return self._size # Additional list methods not provided by the ABC. @recursive_repr() def __repr__(self): return "{}([{}])".format(self.__class__.__name__, ', '.join("{!r}".format(x) for x in self)) def __eq__(self, other): if not (isinstance(other, PersistentList) or isinstance(other, list)): return NotImplemented if len(self) != len(other): return False for i in range(len(self)): if self[i] != other[i]: return False return True if sys.version_info[0] < 3: def __ne__(self, other): return not self == other def clear(self): mm = self.__manager__ if self._size == 0: return items = self._items with mm.transaction(): for i in range(self._size): # Grab oid in tuple form so the assignment can't change it oid = mm.otuple(items[i]) if oid == mm.OID_NULL: continue items[i] = mm.OID_NULL mm.decref(oid) self._resize(0) # Additional methods required by the pmemobj API. def _traverse(self): items = self._items for i in range(len(self)): yield items[i] def _substructures(self): return ((self._body.ob_items, LIST_POBJPTR_ARRAY_TYPE_NUM),) def _deallocate(self): self.clear()
bsd-3-clause
-1,384,105,293,564,038,400
33.406863
77
0.515601
false
4.187947
false
false
false
thirdkey-solutions/granary
granary/seed.py
1
1984
import seedlib import string import json from mnemonic import Mnemonic from binascii import hexlify, unhexlify import bitcoin # class Granary(): # pass class Seed(): def __init__(self): self._bin_seed = None self._fingerprint = None self._bip32_xpriv = None def __nonzero__(self): return bool(self._bin_seed) def __repr__(self): return "< Seed: %s >" % self.fingerprint() if self else "< Seed: empty >" def bin_seed(self): return self._bin_seed def fingerprint(self): if not self._bin_seed: return None self._fingerprint = seedlib.fingerprint(self._bin_seed) return self._fingerprint def from_random(self): self._bin_seed = seedlib.random_key() def from_bin(self, bin_seed): assert(len(bin_seed) == 32) self._bin_seed = bin_seed def from_hex(self, hex_seed): assert(set(hex_seed) <= set(string.hexdigits)) assert(len(hex_seed) == 64) self._bin_seed = unhexlify(hex_seed) def as_hex(self): return hexlify(self._bin_seed) if self._bin_seed else None def from_mnemonic(self, mnemonic): self._bin_seed = seedlib.mnemonic_decode(mnemonic) def as_mnemonic(self): return seedlib.mnemonic_encode(self._bin_seed) if self._bin_seed else None def stretched(self, passphrase): # stretch key newseed = Seed() newseed.from_bin(seedlib.stretched_key(self._bin_seed, passphrase)) return newseed # mnemonic seed -> BIP39 -> BIP32 xpriv def as_HD_root(self): # BIP39 compatible derivation from seed mnemonic without passphrase master_seed = seedlib.mnemonic.to_seed(self.as_mnemonic()) # Master key pair for BIP32 master_xpriv = bitcoin.bip32_master_key(master_seed) return master_xpriv
mit
700,877,953,537,606,800
28.176471
82
0.588206
false
3.736347
false
false
false
peraktong/Cannon-Experiment
DR13/0330_read_table_rc.py
1
24195
import numpy as np from astropy.table import Table from astropy.io import fits import matplotlib.pyplot as plt import matplotlib import pickle from matplotlib import cm from numpy.random import randn # table path path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits" star = fits.open(path) table = Table.read(path) """ There are 13 columns in the table: 1. 'APOGEEID' -- The name of the star 2. 'VISIT' -- The name of the visit file 3. BJD -- Barycentric JD Inferred labels are from the Cannon. The spectra we use are from the first combined spectra (There are two combined spectra for each star, which are obtained by two different methods) : (1) global weighting, where each visit spectrum is weighted by its (S/N)2, and (2) pixel-by-pixel weighting, where each pixel is weighted by its (S/N)2. 4. TEFF 5. LOGG 6. FEH The abc parameters for each visit: 7. A -- parameter a 8. B -- parameter b 9. C -- parameter c 10. CHIINF -- chi-squared for the inferred flux from the cannon (a=0,b=1,c=0) 11. CHIMIX -- chi-squared for the mixed flux from the abc fit. 12. VBARY -- The barycentric Velocity(km/s) from the APOGEE team. 13. VSHIFT -- The velocity shift from the abc fit(km/s) 14. FIBER -- Fiber ID 15. SNR -- SNR of the visit #### The covariance matrix of the abc fit is in HDU0 data, which is a 3*3*N 3-d matrix. N is the number of visits. ### """ # read covariance matrix from the abc fit: un_cov = star[0].data[:,:,0] #print(un_cov) # read the velocity shift from the abc fit v_shift = table["VSHIFT"] #print(v_shift.shape) ######################## #Read table and plot to check. class plot(): def read_table(self): path = "/Users/caojunzhi/Downloads/upload_20170330/red_clump_dr13.fits" star = fits.open(path) table = Table.read(path) # read it: un_cov = star[0].data self.un_cov = un_cov a = table["A"] b = table["B"] c = table["C"] self.a = a self.b = b self.c = c mask = 2*b>a+c self.mask = mask name = table["APOGEEID"] self.name = name SHIFT = table["VSHIFT"] self.shift = SHIFT VBARY = table["VBARY"] self.VBARY = VBARY teff = table["TEFF"] self.teff = teff logg = table["LOGG"] self.logg = logg feh = table["FEH"] self.feh = feh self.chi_inf = table["CHIINF"] self.chi_mix = table["CHIMIX"] self.BJD = table["BJD"] self.fiber = table["FIBER"] self.SNR =table["SNR"] def plot_teff_logg(self): # only show visits with 2b>a+c mask = self.mask teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] # shift is in km/s shift = self.shift[mask]*1000 a = self.a b = self.b c = self.c bac = (2*b-a-c)[mask] font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(logg,teff, marker='x', c=shift, vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm) ax1.set_ylabel('Teff $K$', fontsize=20) ax1.set_xlabel('Logg ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(logg,teff, marker='x', c=shift, vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("RV shifts $m/s$", fontsize=20) f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_teff_feh(self): # only show visits with 2b>a+c mask = self.mask teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] shift = self.shift[mask] * 1000 a = self.a b = self.b c = self.c bac = (2*b-a-c)[mask] font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(feh,teff, marker='x', c=shift, vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm) ax1.set_ylabel('Teff $K$', fontsize=20) ax1.set_xlabel('FeH ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(feh,teff, marker='x', c=shift, vmin=np.min(shift), vmax=np.max(shift), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("RV shifts $m/s$", fontsize=20) f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_teff_logg_bac(self): # only show visits with 2b>a+c mask = self.mask teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] shift = self.shift[mask] a = self.a b = self.b c = self.c bac = (2*b-a-c)[mask] font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 low = 0 up = 3 ax1.scatter(logg,teff, marker='x', c=bac, vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm) ax1.set_ylabel('Teff $K$', fontsize=20) ax1.set_xlabel('Logg ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(logg,teff, marker='x', c=bac, vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("2b-a-c", fontsize=20) f.suptitle("Teff vs Logg for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_logg_rc_2bac" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_teff_feh_bac(self): # only show visits with 2b>a+c mask = self.mask teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] shift = self.shift[mask] a = self.a b = self.b c = self.c bac = (2*b-a-c)[mask] low = 0 up = 3 font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(feh,teff, marker='x', c=bac, vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm) ax1.set_ylabel('Teff $K$', fontsize=20) ax1.set_xlabel('FeH ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(feh,teff, marker='x', c=bac, vmin=low, vmax=up, alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("2b-a-c", fontsize=20) f.suptitle("Teff vs FeH for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "Teff_feh_rc_2bac" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_shift_bjd(self): mask = self.mask shift =self.shift[mask] BJD = self.BJD[mask] feh = self.feh[mask] font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(BJD,shift, marker='x', c=feh, vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm) ax1.set_xlabel('BJD', fontsize=20) ax1.set_ylabel('RV shift $km/s$ ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(BJD,shift, marker='x', c=feh, vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("Fe/H", fontsize=20) f.suptitle("RV shift vs BJD for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_BJD_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_rv_fiber(self): mask = self.mask a = self.a[mask] b = self.b[mask] c = self.c[mask] fiber = self.fiber[mask] SNR = self.SNR[mask] portion = (c+a)/(a+b+c) RV = (c - a) / (a + b + c) * 4144.68 font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(fiber,RV, marker='x', c=SNR, vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm) ax1.set_xlabel('FiberID', fontsize=20) ax1.set_ylabel('RV shift $m/s$', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(fiber,RV, marker='x', c=SNR, vmin=np.min(SNR), vmax=np.max(SNR), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("SNR", fontsize=20) f.suptitle("RV shifts vs FiberID for the red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_shift_vs_Fiber_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_ac_fiber(self): mask = self.mask a = self.a[mask] b = self.b[mask] c = self.c[mask] fiber = self.fiber[mask] portion = (c+a)/(a+b+c) RV = (c - a) / (a + b + c) * 4144.68 font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(fiber,portion, marker='x', c=RV, vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm) ax1.set_xlabel('FiberID', fontsize=20) ax1.set_ylabel('$(c+a)/(a+b+c)$ ', fontsize=20) axes = plt.gca() axes.set_ylim([-1,1]) f.subplots_adjust(right=0.8) pl = ax1.scatter(fiber,portion, marker='x', c=RV, vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("RV shifts $m/s$", fontsize=20) f.suptitle("$(c+a)/(a+b+c)$ vs FiberID for the red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "ac_vs_Fiber_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_delta_chi_SNR(self): mask = self.mask delta_chi = (self.chi_inf-self.chi_mix)[mask] SNR = self.SNR[mask] RV = self.shift[mask] font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(SNR,delta_chi, marker='x', c=RV, vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm) ax1.set_xlabel('SNR', fontsize=20) ax1.set_ylabel('Delta chi squared ', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(SNR,delta_chi, marker='x', c=RV, vmin=np.min(RV), vmax=np.max(RV), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("RV shifts $m/s$", fontsize=20) f.suptitle("Delta chi squared vs SNR for the red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "dchi_vs_SNR_rc" +".png" fig.savefig(save_path, dpi=500) plt.close() def histogram_shift_abc(self): a = self.a b = self.b c = self.c RV = (c-a)/(a+b+c)*4144.68 # add a mask: only show results with 2b>a+c mask = 2*b>a+c a = a[mask] b = b[mask] c = c[mask] RV = RV[mask] font = {'weight': 'bold', 'size': 15} matplotlib.rc('font', **font) f, ((ax1, ax2), (ax3, ax4)) = \ plt.subplots(2, 2) colors = ["cyan",'b', 'g', 'r'] name = ["RV","a", "b", "c"] # histogram of rv #ax1 rms_RV = (np.nansum(RV*RV)/len(RV))**0.5 rms_a = (np.nansum(a * a) / len(a)) ** 0.5 rms_b = (np.nansum(b*b) / len(b)) ** 0.5 rms_c = (np.nansum(c * c) / len(c)) ** 0.5 ax1.hist(RV, bins=40, color=colors[0], label="%s RMS = %.2f $m/s$"%(name[0],rms_RV)) #ax1.set_title('Histogram of Radial velocity shifts', fontsize=30) ax1.set_xlabel('values of radial velocity shifts $m/s$', fontsize=15) ax1.set_ylabel('Number', fontsize=15) ax1.legend(prop={'size': 15}) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) # histogram of a #ax2 ax2.hist(a, bins=40, color=colors[1], label="%s RMS = %.2f"%(name[1],rms_a)) #ax2.set_title('Histogram of parameter a', fontsize=30) ax2.set_xlabel('values of parameter a', fontsize=15) ax2.set_ylabel('Number', fontsize=15) ax2.legend(prop={'size': 15}) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) # histogram of b #ax3 ax3.hist(b, bins=40, color=colors[2], label="%s RMS = %.2f"%(name[2],rms_b)) ax3.legend(prop={'size': 15}) #ax3.set_title('Histogram of paramete b', fontsize=30) ax3.set_xlabel("values of parameter b", fontsize=15) ax3.set_ylabel('Number', fontsize=15) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) # histogram of c #ax4 ax4.hist(c, bins=40, color=colors[3], label="%s RMS = %.2f"%(name[3],rms_c)) ax4.legend(prop={'size': 15}) #ax4.set_title('Histogram of parameter c', fontsize=30) ax4.set_xlabel("values of parameter c", fontsize=15) ax4.set_ylabel('Number', fontsize=15) # add vertical grey line # ax1.plot((wl[index], wl[index]), (0.5, 1 + 0.5 * N), 'k-', linewidth=1.5) f.suptitle("Histogram of RV shifts, a, b and c for the red clumps in DR13",fontsize=25) f.legends #f.suptitle("Histogram of RV shifts, a, b and c by using the absorption lines") # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "histogram_rv_shift_rc" + ".png" fig.savefig(save_path, dpi=500) plt.close() # RV before after def plot_RV_std_before_after_teff(self): mask = self.mask shift =self.shift[mask] VBARY = self.VBARY[mask] teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] # From the average (c+a)/(a+b+c) # Do put a mask here mask = self.mask # add points with the same fiberid together name = self.name[mask] target = list(set(name)) VBARY = self.VBARY[mask] shift =self.shift[mask] #SNR = self.SNR[mask] fusion_new = [] # name+std_old and std_new + Teff logg feh for i in range(0,len(target)): print("Doing %.2f %%"%(i/len(target)*100)) index = np.where(name == target[i]) index = np.array(index) index = index.ravel() std_old_i = np.std(VBARY[index]) std_new_i = np.std(VBARY[index]+shift[index]) teff_i = np.nanmedian(teff[index]) logg_i = np.nanmedian(logg[index]) feh_i = np.nanmedian(feh[index]) fusion_new.append([target[i],std_old_i,std_new_i,teff_i,logg_i,feh_i]) fusion_new = np.array(fusion_new) self.fusion_new = fusion_new # portion+fiber+rv # name = fusion_new[:, 0] std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel() std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel() # use int teff = np.array(fusion_new[:,3],dtype=np.float16).ravel() font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(std_old,std_new, marker='x', c=teff, vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm) ax1.plot(std_old,std_old,"k",alpha=alpha,linewidth=0.3) ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20) ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(std_old,std_new, marker='x', c=teff, vmin=np.min(teff), vmax=np.max(teff), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("Teff $K$", fontsize=20) f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_teff" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_RV_std_before_after_logg(self): mask = self.mask shift =self.shift[mask] VBARY = self.VBARY[mask] teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] fusion_new =self.fusion_new # name = fusion_new[:, 0] std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel() std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel() logg = np.array(fusion_new[:,4],dtype=np.float16).ravel() font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(std_old,std_new, marker='x', c=logg, vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm) ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3) ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20) ax1.set_ylabel('Sts of RVs after the correction $km/s$', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(std_old,std_new, marker='x', c=logg, vmin=np.min(logg), vmax=np.max(logg), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("logg", fontsize=20) f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_logg" +".png" fig.savefig(save_path, dpi=500) plt.close() def plot_RV_std_before_after_feh(self): mask = self.mask shift =self.shift[mask] VBARY = self.VBARY[mask] teff = self.teff[mask] logg = self.logg[mask] feh = self.feh[mask] fusion_new =self.fusion_new # name = fusion_new[:, 0] std_old = np.array(fusion_new[:,1],dtype=np.float32).ravel() std_new = np.array(fusion_new[:,2],dtype=np.float32).ravel() feh = np.array(fusion_new[:,5],dtype=np.float16).ravel() font = {'family': 'normal', 'weight': 'bold', 'size': 14} matplotlib.rc('font', **font) f, ax1 = plt.subplots(1,1) alpha = 0.3 #ax1 ax1.scatter(std_old,std_new, marker='x', c=feh, vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm) ax1.plot(std_old,std_old, "k", alpha=alpha, linewidth=0.3) ax1.set_xlabel('Std of RVs before the correction $km/s$', fontsize=20) ax1.set_ylabel('Std of RVs after the correction $km/s$', fontsize=20) f.subplots_adjust(right=0.8) pl = ax1.scatter(std_old,std_new, marker='x', c=feh, vmin=np.min(feh), vmax=np.max(feh), alpha=alpha, cmap=cm.coolwarm) cbar_ax = f.add_axes([0.85, 0.15, 0.02, 0.7]) cb = f.colorbar(pl, cax=cbar_ax) cb.set_label("FeH", fontsize=20) f.suptitle("Std of RVs before vs after the correction for red clumps in DR13", fontsize=30) # save them: fig = matplotlib.pyplot.gcf() # adjust the size based on the number of visit fig.set_size_inches(14.5, 8.5) save_path = "/Users/caojunzhi/Downloads/upload_20170330/" + "RV_std_before_after_feh" +".png" fig.savefig(save_path, dpi=500) plt.close() model = plot() model.read_table() """ model.plot_teff_logg() model.plot_teff_feh() model.plot_teff_logg_bac() model.plot_teff_feh_bac() model.plot_rv_fiber() model.plot_ac_fiber() """ #VBARY vs model.plot_RV_std_before_after_teff() model.plot_RV_std_before_after_logg() model.plot_RV_std_before_after_feh()
mit
-1,033,193,668,488,516,000
21.633302
102
0.538128
false
2.976015
false
false
false
motion-planning/rrt-algorithms
examples/rrt_connect/rrt_connect_2d_with_random_obstacles.py
1
1254
# This file is subject to the terms and conditions defined in # file 'LICENSE', which is part of this source code package. import numpy as np from src.rrt.rrt_connect import RRTConnect from src.search_space.search_space import SearchSpace from src.utilities.obstacle_generation import generate_random_obstacles from src.utilities.plotting import Plot X_dimensions = np.array([(0, 100), (0, 100)]) # dimensions of Search Space x_init = (0, 0) # starting location x_goal = (100, 100) # goal location Q = np.array([2]) # length of tree edges r = 0.5 # length of smallest edge to check for intersection with obstacles max_samples = 2048 # max number of samples to take before timing out prc = 0.1 # probability of checking for a connection to goal # create search space X = SearchSpace(X_dimensions) n = 50 Obstacles = generate_random_obstacles(X, x_init, x_goal, n) # create rrt_search rrt_connect = RRTConnect(X, Q, x_init, x_goal, max_samples, r, prc) path = rrt_connect.rrt_connect() # plot plot = Plot("rrt_connect_2d_with_random_obstacles") plot.plot_tree(X, rrt_connect.trees) if path is not None: plot.plot_path(X, path) plot.plot_obstacles(X, Obstacles) plot.plot_start(X, x_init) plot.plot_goal(X, x_goal) plot.draw(auto_open=True)
mit
4,732,484,829,832,730,000
34.828571
75
0.73764
false
2.923077
false
false
false
Midnighter/foggy-march
foggy/plots.py
1
3454
# -*- coding: utf-8 -*- """ ====================== Variance Scaling Plots ====================== :Author: Moritz Emanuel Beber :Date: 2013-05-03 :Copyright: Copyright |c| 2013 Jacobs University Bremen gGmbH, all rights reserved. :File: plots.py .. |c| unicode:: U+A9 """ __all__ = ["BREWER_SET1", "fluctuation_scaling", "fluctuation_scaling_fit", "correlation", "histogram"] import numpy import scipy.stats import matplotlib.pyplot as plt from itertools import izip from scipy.optimize import curve_fit BREWER_SET1 = ["#E41A1C", "#377EB8", "#4DAF4A", "#984EA3", "#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#8DD3C7"] def fluctuation_scaling(data, labels): """ Plot many curves with labels. data: list Contains tuples of x-locations and y-locations. labels: list For each pair in ``data`` one string. """ for ((x_loc, y_loc), label, colour) in izip(data, labels, BREWER_SET1): mask = numpy.isfinite(x_loc) & (x_loc > 0.0) & numpy.isfinite(y_loc) & (y_loc > 0.0) x_loc = x_loc[mask] y_loc = y_loc[mask] if len(x_loc) == 0 or len(y_loc) == 0: continue plt.scatter(x_loc, y_loc, label=label, color=colour) plt.xlabel("$<f_{i}>$") plt.ylabel("$\\sigma_{i}$") plt.xscale("log") plt.yscale("log") plt.legend(loc="upper left") plt.show() def _continuous_power_law(x, k, alpha, c): return k * numpy.power(x, alpha) + c def fluctuation_scaling_fit(data, labels): for ((x_loc, y_loc), label, colour) in izip(data, labels, BREWER_SET1): mask = numpy.isfinite(x_loc) & (x_loc > 0.0) & numpy.isfinite(y_loc) & (y_loc > 0.0) x_loc = x_loc[mask] y_loc = y_loc[mask] if len(x_loc) == 0 or len(y_loc) == 0: continue try: (popt, pcov) = curve_fit(_continuous_power_law, x_loc, y_loc) fit_y = numpy.power(x_loc, popt[1]) # fit_y *= popt[0] # can cause OverflowError # (slope, intercept, r, p, err) = stats.linregress(x_log, y_log) # fit_y = numpy.power(x_loc, slope) * numpy.power(10.0, intercept) plt.plot(x_loc, fit_y, color=colour) except RuntimeError: plt.scatter(x_loc, y_loc, label=label, color=colour) else: plt.scatter(x_loc, y_loc, label="%s $\\alpha = %.3G \\pm %.3G$" % (label, popt[1], numpy.sqrt(pcov[1, 1])), color=colour) # plt.text(lab_xloc, lab_yloc, "$\\alpha = %.3G$\n$R^{2} = %.3G$\n$p = %.3G$\ns.e.$= %.3G$" % (slope, numpy.power(r, 2.0), p, err)) plt.xlabel("$<f_{i}>$") plt.ylabel("$\\sigma_{i}$") plt.xscale("log") plt.yscale("log") plt.legend(loc="upper left") plt.show() def correlation(x, y, x_lbl="Degree $k$", y_lbl="$\\eta$"): mask = numpy.isfinite(x) & numpy.isfinite(y) x = x[mask] y = y[mask] pearson = scipy.stats.pearsonr(x, y) spearman = scipy.stats.spearmanr(x, y) fig = plt.figure() plt.plot(x, y, "x", label="$r=%.3g$\n$p=%.3g$\n$\\rho=%.3g$\n$p=%.3g$" % (pearson[0], pearson[1], spearman[0], spearman[1])) plt.xlabel(x_lbl) plt.ylabel(y_lbl) plt.legend(loc="best") return fig def histogram(x, x_lbl="Speed $v$", y_lbl="$f(v)$", num_bins=100): mask = numpy.isfinite(x) if not mask.any(): return x = x[mask] plt.hist(x, bins=num_bins) plt.xlabel(x_lbl) plt.ylabel(y_lbl) plt.show()
bsd-3-clause
-80,002,290,279,166,700
29.034783
137
0.556456
false
2.734759
false
false
false
salv-orlando/MyRepo
nova/image/fake.py
1
7157
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2011 Justin Santa Barbara # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Implementation of an fake image service""" import copy import datetime import random from nova import exception from nova import flags from nova import log as logging from nova import utils LOG = logging.getLogger('nova.image.fake') FLAGS = flags.FLAGS class _FakeImageService(object): """Mock (fake) image service for unit testing.""" def __init__(self): self.images = {} # NOTE(justinsb): The OpenStack API can't upload an image? # So, make sure we've got one.. timestamp = datetime.datetime(2011, 01, 01, 01, 02, 03) # NOTE(bcwaldon): was image '123456' image1 = {'id': '155d900f-4e14-4e4c-a73d-069cbf4541e6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': False, 'container_format': 'raw', 'disk_format': 'raw', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel, 'architecture': 'x86_64'}} # NOTE(bcwaldon): was image 'fake' image2 = {'id': 'a2459075-d96c-40d5-893e-577ff92e721c', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} # NOTE(bcwaldon): was image '2' image3 = {'id': '76fa36fc-c930-4bf3-8c8a-ea2a2420deb6', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': None, 'disk_format': None, 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} # NOTE(bcwaldon): was image '1' image4 = {'id': 'cedef40a-ed67-4d10-800e-17455edce175', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} # NOTE(bcwaldon): was image '3' image5 = {'id': 'c905cedb-7281-47e4-8a62-f26bc5fc4c77', 'name': 'fakeimage123456', 'created_at': timestamp, 'updated_at': timestamp, 'deleted_at': None, 'deleted': False, 'status': 'active', 'is_public': True, 'container_format': 'ami', 'disk_format': 'ami', 'properties': {'kernel_id': FLAGS.null_kernel, 'ramdisk_id': FLAGS.null_kernel}} self.create(None, image1) self.create(None, image2) self.create(None, image3) self.create(None, image4) self.create(None, image5) super(_FakeImageService, self).__init__() def index(self, context, filters=None, marker=None, limit=None): """Returns list of images.""" retval = [] for img in self.images.values(): retval += [dict([(k, v) for k, v in img.iteritems() if k in ['id', 'name']])] return retval def detail(self, context, filters=None, marker=None, limit=None): """Return list of detailed image information.""" return copy.deepcopy(self.images.values()) def show(self, context, image_id): """Get data about specified image. Returns a dict containing image data for the given opaque image id. """ image = self.images.get(str(image_id)) if image: return copy.deepcopy(image) LOG.warn('Unable to find image id %s. Have images: %s', image_id, self.images) raise exception.ImageNotFound(image_id=image_id) def show_by_name(self, context, name): """Returns a dict containing image data for the given name.""" images = copy.deepcopy(self.images.values()) for image in images: if name == image.get('name'): return image raise exception.ImageNotFound(image_id=name) def create(self, context, metadata, data=None): """Store the image data and return the new image id. :raises: Duplicate if the image already exist. """ image_id = str(metadata.get('id', utils.gen_uuid())) metadata['id'] = image_id if image_id in self.images: raise exception.Duplicate() self.images[image_id] = copy.deepcopy(metadata) return self.images[image_id] def update(self, context, image_id, metadata, data=None): """Replace the contents of the given image with the new data. :raises: ImageNotFound if the image does not exist. """ if not self.images.get(image_id): raise exception.ImageNotFound(image_id=image_id) self.images[image_id] = copy.deepcopy(metadata) def delete(self, context, image_id): """Delete the given image. :raises: ImageNotFound if the image does not exist. """ removed = self.images.pop(image_id, None) if not removed: raise exception.ImageNotFound(image_id=image_id) def delete_all(self): """Clears out all images.""" self.images.clear() _fakeImageService = _FakeImageService() def FakeImageService(): return _fakeImageService def FakeImageService_reset(): global _fakeImageService _fakeImageService = _FakeImageService()
apache-2.0
-7,230,544,118,180,133,000
34.430693
78
0.54562
false
4.101433
false
false
false
MowenPan/star_wars
ship.py
1
1577
import pygame from pygame.sprite import Sprite class Ship(Sprite): def __init__(self, ai_settings, screen): """Initialize the ship, and set its starting position.""" super(Ship, self).__init__() self.screen = screen self.ai_settings = ai_settings # Load the ship image, and get its rect. self.image = pygame.image.load('images/ship.bmp') self.rect = self.image.get_rect() self.screen_rect = screen.get_rect() # Start each new ship at the bottom center of the screen. self.rect.centerx = self.screen_rect.centerx self.rect.bottom = self.screen_rect.bottom # Store a decimal value for the ship's center. self.center = float(self.rect.centerx) # Movement flags. self.moving_right = False self.moving_left = False def center_ship(self): """Center the ship on the screen.""" self.center = self.screen_rect.centerx def update(self): """Update the ship's position, based on movement flags.""" # Update the ship's center value, not the rect. if self.moving_right and self.rect.right < self.screen_rect.right: self.center += self.ai_settings.ship_speed_factor if self.moving_left and self.rect.left > 0: self.center -= self.ai_settings.ship_speed_factor # Update rect object from self.center. self.rect.centerx = self.center def blitme(self): """Draw the ship at its current location.""" self.screen.blit(self.image, self.rect)
apache-2.0
-7,004,505,927,314,254,000
34.044444
74
0.623335
false
3.913151
false
false
false
joeyoung658/A-Level_2016-18
Other/numbers.py
1
1301
# 09/09/2016 # Joe Young import random from time import sleep count = 0 alist = [] randomc = 0 while randomc != 4: ran = random.randint(1, 100) alist.append(ran) randomc = randomc + 1 alist.sort() def question(): global alist global count number = 0 clist = len(alist) - 1 print(alist) try: number = int(input("Input a number which fits the sequence \n -")) except ValueError: print("Please input a whole number!\n") print(question()) if number > alist[clist]: if count == 4: exit = str(input("Would you like to exit? (Yes/No)")).lower() if exit == "yes": print("You have exited the program!") sleep(5) elif exit == "no": print("You have chosen not to exit the program!") sleep(3) count = 0 return question() else: print("Please enter a valid option!") else: count = count + 1 print("Yes,", number, " does fit the sequence \n") return question() else: print("No,", number, " does not fit the sequence \n") return question() question()
gpl-3.0
7,266,662,100,259,031,000
21.654545
74
0.496541
false
4.117089
false
false
false
OAButton/tricorder
plugins/python/sciencedirect.py
1
7168
#!/usr/bin/env python2.7 # NOTE THIS NEEDS 2.6 as parser breaks with 2.5 :-) import warnings warnings.simplefilter("ignore",DeprecationWarning) import os, sys, re, urllib2, string, socket import htmlentitydefs import mechanize import html5lib from html5lib import treebuilders import lxml.html, lxml.etree from lxml.cssselect import CSSSelector socket.setdefaulttimeout(15) class ParseException(Exception): pass ## # Removes HTML or XML character references and entities from a text string. # # @param text The HTML (or XML) source text. # @return The plain text, as a Unicode string, if necessary. def unescape(text): def fixup(m): text = m.group(0) if text[:2] == "&#": # character reference try: if text[:3] == "&#x": return unichr(int(text[3:-1], 16)) else: return unichr(int(text[2:-1])) except ValueError: pass else: # named entity try: text = unichr(htmlentitydefs.name2codepoint[text[1:-1]]) except KeyError: pass return text # leave as is return re.sub("&#?\w+;", fixup, text).encode('utf-8') # # Strip off any institutional proxies we find # def canon_url(url): # print "xxxxx url = %s" % url m = re.match(r'http://[^/]*sciencedirect.com[^/]*/(science(\?_ob|/article).*$)', url) if not m: raise ParseException, "bad source url" return "http://www.sciencedirect.com/" + m.group(1) # # Make up crossref metadata URL (just need the DOI) # def crossref_xml_url(doi): url = "http://www.crossref.org/openurl/?id=doi:" + doi url += "&noredirect=true" # see http://www.crossref.org/help/Content/05_Interfacing_with_the_CrossRef_system/Using_the_Open_URL_Resolver.htm # key is either "username:password" or "<email>" key_file = os.environ.get("HOME") + "/.crossref-key" if os.path.exists(key_file): f = open(key_file) key = f.read().strip() f.close() url += "&pid=" + key url += "&format=unixref" return url # # Try, by foul trickery, to get an abstract # We're looking for HTML like this: # <div class="articleText" style="display: inline;"> # <h3 class="h3">Abstract</h3> # <p>An instrumented indentation technique... # def scrape_abstract(page): root = lxml.html.fromstring(page) #root = lxml.html.fromstring(html_data) #links_lxml_res = root.cssselect("a.detailsViewLink") #links_lxml = [link.get("href") for link in links_lxml_res] #links_lxml = list(set(links_lxml)) abs = [] for div in root.cssselect("div.articleText"): for h3 in div.cssselect("h3.h3"): if h3.text and string.lower(h3.text) in ('abstract','summary'): for p in div.cssselect("p"): abs.append(p.xpath("string()")) if len(abs) == 0: for div in root.cssselect('div.svAbstract'): for p in div.cssselect("p"): abs.append(p.xpath("string()")) if len(abs) == 0: for div in root.cssselect('#articleContent'): for p in div.cssselect("div.articleText_indent"): abs.append(p.xpath("string()")) abstract = ' '.join(abs) abstract = re.sub('\n+',' ',abstract) abstract = re.sub('\s+',' ',abstract) # print "1=================================================================" # print abstract # print "2=================================================================" return unescape(abstract) # # Just try to fetch the metadata from crossref # def handle(url): cUrl = canon_url(url) #print "%s => %s" % (url, cUrl) cookies = mechanize.CookieJar() browser = mechanize.Browser() browser.addheaders = [("User-Agent", "Mozilla/5.0 (compatible; citeulike/1.0)"), ("From", "plugins@citeulike.org")] #browser.add_handler(PrettifyHandler()) browser.set_handle_robots(False) browser.set_debug_http(False) browser.set_debug_redirects(False) browser.open(cUrl) response = browser.response() page = response.get_data() # print page # # Elsevier insist on user selecting a "preferred source" when the article is # available. This is normally stored in a cookie. # If we get directed to the Elsevier "linking hub", find the 1st SD link on the # and follow that. # Yeah, I know - rubbish. # huburl = browser.geturl() doi = None m = re.search(r'linkinghub.elsevier.com/', huburl) if m: root = lxml.html.fromstring(page) inputs = root.cssselect("input") hrefs = [link.get("value") for link in inputs] for href in hrefs: n = re.search('sciencedirect.com',href) if n: browser.open(href) response = browser.response() page = response.get_data() break m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page) # this page might requires a login. Luckily there seems to be a # link "View Abstract" which can take us to a page we can read if not m and not doi: root = lxml.html.fromstring(page) links = root.cssselect("a") for href in [e.get("href") for e in links]: if href: m = re.search(r'http://dx.doi.org/([^"]+)', href) if m: break if False: parser = html5lib.HTMLParser(tree=treebuilders.getTreeBuilder("beautifulsoup")) # print page soup = parser.parse(page) link = soup.find(text=re.compile(r"view abstract", re.I)) if link: href = link.parent['href'] browser.open(href) response = browser.response() page = response.get_data() m = re.search(r'<a(?: id="[^"]+")? href="http://dx.doi.org/([^"]+)"', page) if m: doi = m.group(1) else: root = lxml.html.fromstring(page) doi_nodes = root.cssselect("#doi") for n in [e.text for e in doi_nodes]: doi = re.sub(r'doi:','',n) break if not doi: m = re.search(r'/doi/(10\.\d\d\d\d)_([^/]+)/', page) if m: doi = "%s/%s" % (m.group(1), m.group(2)) if not doi: raise ParseException, "Cannot find DOI in page" # if not re.search(r'^10[.](1016|1006|1053)/',doi): # raise ParseException, "Cannot find an Elsevier DOI (10.1006, 10.1016, 10.1053) DOI" xml_url = crossref_xml_url(doi) browser.open(xml_url) response = browser.response() xml_page = response.get_data() xml_page = xml_page.decode('utf-8') # Get rid of extraneous "stars" \u2606. Sometimes at end of title (hopefully # they're never meant to be "real" elsewhere...) xml_page = xml_page.replace(u'\u2606',' ') m = re.search("not found in CrossRef", xml_page) if m: raise ParseException, "Unable to locate that DOI (%s) in crossref" % doi yield "begin_tsv" yield "use_crossref\t1" yield "linkout\tDOI\t\t%s\t\t" % doi abstract = scrape_abstract(page) # try: # abstract = scrape_abstract(page) # except: # abstract = '' if abstract: print "abstract\t%s" % (abstract) yield "end_tsv" yield "status\tok" if __name__ == "__main__": url = sys.stdin.readline().strip() for line in handle(url): print line.encode("utf-8") sys.exit(0) try: for line in handle(url): print line.encode("utf-8") except Exception, e: import traceback line = traceback.tb_lineno(sys.exc_info()[2]) print "\t".join(["status", "error", "There was an internal error processing this request. Please report this to bugs@citeulike.org quoting error code %d." % line]) raise
bsd-3-clause
-3,845,690,253,872,181,000
25.947368
165
0.628906
false
2.946157
false
false
false
forkbong/qutebrowser
tests/end2end/features/test_completion_bdd.py
1
1124
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et: # Copyright 2015-2021 Florian Bruhin (The Compiler) <mail@qutebrowser.org> # # This file is part of qutebrowser. # # qutebrowser is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # qutebrowser is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with qutebrowser. If not, see <http://www.gnu.org/licenses/>. import pytest_bdd as bdd bdd.scenarios('completion.feature') @bdd.then(bdd.parsers.parse("the completion model should be {model}")) def check_model(quteproc, model): """Make sure the completion model was set to something.""" pattern = "Starting {} completion *".format(model) quteproc.wait_for(message=pattern)
gpl-3.0
-5,833,988,742,366,090,000
39.142857
74
0.75089
false
3.746667
false
false
false
MasterOdin/gitvier
setup.py
1
1816
#!/usr/bin/env python """Setup script for the package.""" import os import sys import setuptools from gitvier import __project__, __version__, __author__, DESCRIPTION PACKAGE_NAME = "gitvier" MINIMUM_PYTHON_VERSION = (3, 5) def check_python_version(): """Exit when the Python version is too low.""" if sys.version_info < MINIMUM_PYTHON_VERSION: sys.exit("Python {0}.{1}+ is required.".format(*MINIMUM_PYTHON_VERSION)) def read_descriptions(): """Build a description for the project from documentation files.""" try: readme = open("README.rst").read() except IOError: return "<placeholder>" else: return readme check_python_version() setuptools.setup( name=__project__, version=__version__, author=__author__, author_email='matt.peveler@gmail.com', description=DESCRIPTION, long_description=read_descriptions(), url='https://github.com/MasterOdin/gitvier', packages=setuptools.find_packages(), entry_points={'console_scripts': [ 'gitvier = gitvier.cli:main' ]}, license='MIT', classifiers=[ 'Development Status :: 2 - Pre-Alpha', 'Environment :: Console', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3.5', 'Programming Language :: Python :: 3.6', 'Topic :: Software Development', 'Topic :: Software Development :: Build Tools', 'Topic :: Software Development :: Version Control', 'Topic :: System :: Software Distribution' ], install_requires=[ 'colorama', 'GitPython', 'PyYAML' ] )
mit
4,129,258,678,640,155,000
26.515152
80
0.616189
false
4.165138
false
false
false
thecookieraider/PyStar
Maze Generation/Pathfinder.py
1
14841
from math import sqrt from random import shuffle, randint, seed from Mazers import Depth_First from pygame.locals import * from time import time import sys import pygame import Main class Pathfinder: START_COLOR = (0, 0, 255) END_COLOR = (255, 20, 147) SEARCHED_COLOR = (255, 0, 0) PATH_COLOR = (0, 255, 0) FPS = 60 DRAWING = 0x01 AUTO = 0x02 def __init__(self, main_args, displaysurf, width, height): self.surf = displaysurf self.fps = pygame.time.Clock() self.w = width self.h = height self.main_args = main_args self.maze = Depth_First.Maze(width, height, False) self.points = [] self.keys = {} self.mode = self.AUTO self.seed = randint(0, 1000) if main_args['type'] == Main.REG_MAZE: self.maze.generate(self.seed) self.blitMethod = Depth_First.Maze.gen_surf_s elif main_args['type'] == Main.BOX_MAZE: self.maze.generate_box(self.seed, main_args['box_dims'], main_args['diagonal']) self.blitMethod = Depth_First.Maze.gen_surf_box_s self.cells = self.maze.cells self.highlighted_cell = [0, 0] self.update() pygame.display.update() self.handle_events() def a_star(self, start, goal): openlist = set() closedlist = set() current = Node(start, None, 0, self.get_distance(start, goal)) openlist.add(current) while openlist: openlist = set(sorted(openlist, key=lambda _node: _node.fCost)) current = openlist.pop() if current.cell.x == goal.x and current.cell.y == goal.y: path = [] while current.parent is not None: path.append(current) if current.cell.color != self.END_COLOR: current.cell.color = self.PATH_COLOR current = current.parent self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0)) pygame.display.update() return path closedlist.add(current) if current.cell.color != self.START_COLOR: current.cell.color = self.SEARCHED_COLOR self.special_events() self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0)) pygame.display.update() n = [x for x in current.cell.get_neighbors(self.cells) if x.visited] nodes = [] for cell in n: gcost = current.gCost + self.get_distance(current.cell, cell) hcost = self.get_distance(cell, goal) node = Node(cell, current, gcost, hcost) nodes.append(node) for cell in n: if self.cell_in_list(cell, closedlist): continue gcost = current.gCost + self.get_distance(current.cell, cell) hcost = self.get_distance(cell, goal) node = Node(cell, current, gcost, hcost) if not self.cell_in_list(cell, openlist): openlist.add(node) return None @staticmethod def cell_in_list(cell, nodelist): for i in nodelist: if i.cell.x == cell.x and i.cell.y == cell.y: return True return False @staticmethod def better_sibling(node, openlist): for i in openlist: if i.cell == node.cell and i.fCost <= node.fCost: return True return False def get_random_point(self): l = [i for x in self.cells for i in x if i.visited] shuffle(l) return l[randint(0, len(l)-1)] @staticmethod def node_sorter(a, b): if b.fCost < a.fCost: return 1 if b.fCost > a.fCost: return -1 return 0 @staticmethod def get_distance(start, goal): dx = float(start.x - goal.x) dy = float(start.y - goal.y) dist = float(sqrt(dx * dx + dy * dy)) return dist @staticmethod def clamp(x, y, maxx, maxy, minx, miny): pair = [] if x > maxx: pair.append(maxx) elif x < minx: pair.append(minx) else: pair.append(x) if y > maxy: pair.append(maxy) elif y < miny: pair.append(miny) else: pair.append(y) return pair def generate_random_start_end(self): self.reset_cell_colors() seed() self.points = [self.get_random_point(), self.get_random_point()] self.points[0].color = self.START_COLOR self.points[1].color = self.END_COLOR print("New points generated: Start: {}, {} | End: {}, {}".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y)) self.update() def set_all_cells_to_color(self, col): for array in self.cells: for cell in array: cell.color = col def reset_cell_colors(self, leave_start_end=False): for array in self.cells: for cell in array: if cell.visited: if leave_start_end and cell.color in (self.START_COLOR, self.END_COLOR): continue else: cell.color = (255, 255, 255) else: cell.color = (0, 0, 0) self.update() def reset_maze(self, new_seed): if new_seed: self.seed = randint(0, 1000) if self.main_args['type'] == Main.BOX_MAZE: self.maze.generate_box(self.seed, self.main_args['box_dims'], self.main_args['diagonal']) elif self.main_args['type'] == Main.REG_MAZE: self.maze.generate(self.seed, self.main_args['diagonal']) self.cells = self.maze.cells self.update() def get_cell(self, x, y): for array in self.cells: for cell in array: if self.main_args['type'] == Main.BOX_MAZE: if cell.x == int((x / self.main_args['box_dims'][0])) \ and cell.y == int((y / self.main_args['box_dims'][1])): return cell else: if cell.x == x and cell.y == y: return cell def handle_events(self): while True: for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit(0) if event.type == MOUSEBUTTONDOWN: if event.button == 1: if len(self.points) == 2: self.points = [] self.reset_cell_colors() elif len(self.points) == 1: cell = self.get_cell(event.pos[0], event.pos[1]) if cell.visited: self.points.append(cell) cell.color = self.END_COLOR elif not self.points: cell = self.get_cell(event.pos[0], event.pos[1]) if cell.visited: self.points.append(cell) cell.color = self.START_COLOR if event.button == 3: self.keys['button3'] = event.pos if event.type == MOUSEBUTTONUP: if 'button'+str(event.button) in self.keys: del self.keys['button'+str(event.button)] if event.type == KEYDOWN: self.keys[event.key] = True if event.key == K_d: self.highlighted_cell[0] += self.main_args['box_dims'][0] elif event.key == K_s: self.highlighted_cell[1] += self.main_args['box_dims'][1] elif event.key == K_a: self.highlighted_cell[0] -= self.main_args['box_dims'][0] elif event.key == K_w: self.highlighted_cell[1] -= self.main_args['box_dims'][1] if event.type == KEYUP: if event.key in self.keys: del self.keys[event.key] if event.key == K_z: if self.mode == self.AUTO: self.mode = self.DRAWING self.set_all_cells_to_color((255, 255, 255)) for r in self.cells: for c in r: c.visited = True else: self.reset_maze(False) self.cells = self.maze.cells self.mode = self.AUTO self.points = [] if event.key == K_r: Main.main() elif event.key == K_f: self.reset_cell_colors(True) if not self.points or len(self.points) < 2: self.generate_random_start_end() print("Finding path . . .") print("Start: ({}, {})\nEnd: ({}, {})".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y)) b = time() self.a_star(self.points[0], self.points[1]) e = time() print("Done in {} seconds".format(e - b)) elif event.key == K_p: self.generate_random_start_end() elif event.key == K_m: self.reset_maze(True) elif event.key == K_c: if self.mode == self.AUTO: self.reset_cell_colors() self.points = [] else: self.set_all_cells_to_color((255, 255, 255)) for r in self.cells: for c in r: c.visited = True self.points = [] elif event.key == K_x: if self.mode == self.DRAWING: for r in self.cells: for c in r: if not c.visited: c.color = (255, 255, 255) c.visited = True self.reset_cell_colors(True) else: self.reset_cell_colors(True) elif event.key == K_SPACE: if len(self.points) == 2: self.points = [] self.reset_cell_colors() elif len(self.points) == 1: hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1]) if hcell: self.points.append(hcell) hcell.color = self.END_COLOR elif not self.points: hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1]) if hcell: self.points.append(hcell) hcell.color = self.START_COLOR self.update() pygame.event.pump() if K_RIGHT in self.keys: self.highlighted_cell[0] += self.main_args['box_dims'][0] elif K_DOWN in self.keys: self.highlighted_cell[1] += self.main_args['box_dims'][1] elif K_LEFT in self.keys: self.highlighted_cell[0] -= self.main_args['box_dims'][0] elif K_UP in self.keys: self.highlighted_cell[1] -= self.main_args['box_dims'][1] self.highlighted_cell = self.clamp(self.highlighted_cell[0], self.highlighted_cell[1], self.w - self.main_args['box_dims'][0], self.h - self.main_args['box_dims'][1], 0, 0) if K_v in self.keys and self.mode == self.DRAWING: hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1]) hcell.visited = False hcell.color = (0, 0, 0) if 'button3' in self.keys and self.mode == self.DRAWING: hcell = self.get_cell(pygame.mouse.get_pos()[0], pygame.mouse.get_pos()[1]) hcell.visited = False hcell.color = (0, 0, 0) if K_b in self.keys and self.mode == self.DRAWING: hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1]) hcell.visited = True hcell.color = (255, 255, 255) hcell = self.get_cell(self.highlighted_cell[0], self.highlighted_cell[1]) pygame.draw.rect(self.surf, (0, 255, 0), (self.highlighted_cell[0], self.highlighted_cell[1], hcell.box[0], hcell.box[1])) pygame.display.update() self.fps.tick(self.FPS) def special_events(self): for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit(0) if event.type == KEYUP: if event.key == K_r: Main.main() elif event.key == K_k: print("A-Star Halted") self.handle_events() elif event.key == K_f: self.reset_cell_colors(True) self.points = [] self.generate_random_start_end() print("Finding path . . .") print("START: ({}, {})\nEND: ({}, {})".format(self.points[0].x, self.points[0].y, self.points[1].x, self.points[1].y)) b = time() self.a_star(self.points[0], self.points[1]) e = time() print("FOUND PATH IN {} SECONDS".format(e - b)) self.handle_events() def update(self): self.surf.blit(self.blitMethod(self.cells, self.w, self.h), (0, 0)) class Node: def __init__(self, cell, parent, gcost, hcost): self.cell = cell self.parent = parent self.gCost = gcost self.hCost = hcost self.fCost = gcost + hcost
mit
-1,412,383,707,930,343,000
37.952756
145
0.457112
false
4.049386
false
false
false
citrtech/DJLand-Tools
audio_splitter/test/test_slice.py
1
1077
from pydub import AudioSegment # pydub does things in milliseconds ten_seconds = 10 * 1000 one_second = 1000 #Examples #first_10_seconds = song[:ten_seconds] #last_5_seconds = song[-5000:] song = AudioSegment.from_mp3("2016.01.04-09.00.00-S.mp3") #print("Test") #last_second = song[-ten_seconds:] #last_second.export("out/testing.mp3", format="mp3") #Cool that worked, now lets try looping #find the duration of the input clip in millliseconds duration_in_milliseconds = len(song) #grab each one second slice and save it from the first second to the last whole second in the file for i in range(0,duration_in_milliseconds,1*one_second): print ("Second number %s \n" % (int(i/1000)) ) offset = i + one_second current_second = song[i:offset]; filename = "out/" + str(int(i/1000)) + ".mp3" current_second.export(filename, format="mp3") #it works! now we just have to combine it with the other stuff to start from the #right unix timestamp and check behaviour of last second (where there might not #be a complete second of audio left)
gpl-3.0
-2,184,063,138,072,133,000
30.676471
98
0.707521
false
3.234234
false
false
false
farseerfc/pacvis
pacvis/pacvis.py
1
7040
#!/usr/bin/env python import sys import json from types import SimpleNamespace from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter from webbrowser import open_new_tab import tornado.ioloop import tornado.web from .console import start_message, append_message, print_message from .infos import DbInfo, PkgInfo, GroupInfo, VDepInfo # Tornado entry class MainHandler(tornado.web.RequestHandler): def parse_args(self, **kargs): result = {} for key in kargs: defvalue = str(kargs[key]) if type(kargs[key]) is int: result[key] = int(self.get_argument(key, defvalue)) elif type(kargs[key]) is bool: result[key] = self.get_argument(key, defvalue) != "False" else: result[key] = self.get_argument(key, defvalue) print_message("get arg %r: %r" % (key, result[key])) return result def get(self): print_message("\n" + str(self.request)) args = SimpleNamespace(**self.parse_args( maxlevel=1000, maxreqs=1000, maxdeps=1000, drawsize="isize", usemagic=False, straightline=False, enablephysics=False, aligntop=False, disableallphysics=False, debugperformance=False, byrepos=False, showallvdeps=False)) dbinfo = DbInfo() start_message("Loading local database ...") dbinfo.find_all(args.showallvdeps) append_message("done") start_message("Finding all dependency circles ... ") dbinfo.find_circles() append_message("done") dbinfo.topology_sort(args.usemagic, args.aligntop, args.byrepos) dbinfo.calcSizes() start_message("Rendering ... ") nodes = [] links = [] nodes.append({"id": 0, "label": "level 1 group", "level": 0, "shape": "triangleDown", "isize": 0, "csize": 0, "cssize": 0, "deps": "", "reqs": "", "optdeps": "", "desc": "", "version": "", "group": "group", "groups": "", "provides": "", }) ids = 1 for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level): append_message("%s" % pkg.name) pkg.id = ids ids += 1 if pkg.level < args.maxlevel: group = "normal" if pkg.level == 0: group = "standalone" elif type(pkg) is GroupInfo: group = "group" elif type(pkg) is VDepInfo: group = "vdep" # if not args.showallvdeps and len(pkg.requiredby) == 0: # continue elif pkg.explicit: group = "explicit" nodes.append({"id": pkg.id, "label": pkg.name, "level": pkg.level, "group": group, "isize": pkg.isize, "csize": pkg.csize, "cssize": pkg.cssize, "deps": ", ".join(pkg.deps), "reqs": ", ".join(pkg.requiredby), "optdeps": ", ".join(pkg.optdeps), "groups": ", ".join(pkg.groups), "provides": ", ".join(pkg.provides), "desc": pkg.desc, "version": pkg.version, "repo": pkg.repo, }) ids = 0 for pkg in sorted(dbinfo.all_pkgs.values(), key=lambda x: x.level): if pkg.level < args.maxlevel: if len(pkg.deps) == 0 and len(pkg.requiredby) == 0: links.append({"id": ids, "from": pkg.id, "to": 0}) ids += 1 if len(pkg.deps) < args.maxdeps: for dep in pkg.deps: if dep not in pkg.circledeps: if len(dbinfo.get(dep).requiredby) < args.maxreqs: links.append({"id": ids, "from": pkg.id, "to": dbinfo.get(dep).id}) ids += 1 for dep in pkg.circledeps: if (pkg.id != dbinfo.get(dep).id): links.append({"id": ids, "to": pkg.id, "from": dbinfo.get(dep).id, "color": "rgb(244,67,54,0.8)"}) ids += 1 for dep in pkg.optdeps: if dep in dbinfo.all_pkgs: links.append({"id": ids, "from": pkg.id, "to": dbinfo.get(dep).id, "dashes": True, "color": "rgb(255,235,59)"}) ids += 1 print_message("Writing HTML") self.render("templates/index.template.html", nodes=json.dumps(nodes), links=json.dumps(links), options=args, optionsjson=json.dumps(args.__dict__)) def make_app(): import os return tornado.web.Application([ (r"/", MainHandler), ], debug=True, static_path=os.path.join(os.path.dirname(__file__), "static")) def main(): argp = ArgumentParser(description='start PacVis server', formatter_class=ArgumentDefaultsHelpFormatter) argp.add_argument('-p', '--port', type=int, default=8888, help='listen at given port') argp.add_argument('-s', '--host', type=str, default='localhost', help='listen at given hostname') argp.add_argument('-b', '--browser', action='store_true', help='start a browser') args = argp.parse_args() app = make_app() app.listen(args.port, address=args.host) print_message(f"Start PacVis at http://{args.host}:{args.port}/") if args.browser: url = f'http://{args.host}:{args.port}/' print_message(f'open in browser: {url}') open_new_tab(url) else: print_message('use --browser to open a browser automatically.') try: tornado.ioloop.IOLoop.current().start() except KeyboardInterrupt: print_message("Received interrupt from keyboard, shutting down ...") sys.exit(0) if __name__ == "__main__": main()
mit
1,197,412,450,875,610,600
37.681319
107
0.446023
false
4.433249
false
false
false
abingham/ackward
src/ackward/logging/LoggerBase.py
1
3176
from ackward import (Class, method, Namespace, Property, TranslationUnit) def tunit(): return TranslationUnit( forward_declarations=[('ackward', 'logging', 'class Filter'), ('ackward', 'logging', 'class Handler'), ('ackward', 'logging', 'class LogRecord')], header_includes=[('ackward', 'logging', 'Types.hpp')], impl_includes=[('ackward', 'logging', 'LoggerBase.hpp'), ('ackward', 'logging', 'Filter.hpp'), ('ackward', 'logging', 'Handler.hpp'), ('ackward', 'logging', 'LogRecord.hpp')]) def methods(parent): methods = [ ('void setLevel(Level l)', 'Sets the threshold for this logger.'), ('bool isEnabledFor(Level l) const', 'Indicates if a message of severity ``lvl`` would be processed by this logger.'), ('Level getEffectiveLevel() const', 'Indicates the effective level for this logger.'), ('void log(Level lvl, std::wstring msg) const', 'Logs a message with level ``lvl`` on this logger.'), ('void addFilter(Filter f)', 'Adds the specified filter ``filt`` to this logger.'), ('void removeFilter(Filter f)', 'Removes the specified filter ``filt`` from this logger.'), ('bool filter(LogRecord r) const', 'Applies this logger\'s filters to the record and returns a true value if the record is to be processed.'), ('void addHandler(Handler h)', 'Adds the specified handler ``hdlr`` to this logger.'), ('void removeHandler(Handler h)', 'Removes the specified handler hdlr from this logger.'), ('void handle(LogRecord r) const', 'Handles a record by passing it to all handlers associated with this logger and its ancestors (until a false value of propagate is found).'), ('void exception(std::wstring msg) const', '''Logs a message with level ``ERROR`` on this logger. Exception info is added to the logging message. This method should only be called from an exception handler.''') ] for lvl in ['debug', 'info', 'warning', 'error', 'critical']: methods.append( ('void {0}(std::wstring msg) const'.format(lvl), 'Logs a message with level ``{0}`` on this logger.'.format(lvl.upper()))) for m in methods: docstring='''\\rst {0} \\endrst'''.format(m[1]) method(m[0], parent=parent, doc=docstring) def definition(env): t = tunit() ns = Namespace('ackward', 'logging', parent=t) cls = Class(name='LoggerBase', wrapped_class='logging.Logger', parent=ns) # TODO: docstring for propagate Property(name='propagate', type='bool', parent=cls).doc='If this evaluates to false, logging messages are not passed by this logger or by its child loggers to the handlers of higher level (ancestor) loggers.' methods(parent=cls) return t
mit
8,258,628,040,254,271,000
43.111111
181
0.567695
false
4.517781
false
false
false
faisal-oead/My-Twitter-Bot
main-bot.py
1
1851
#!/usr/bin/env python2 # -*- coding: utf-8 -*- # # Copyright 2014 faisal oead <fafagold@gmail.com> # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, # MA 02110-1301, USA. # import tweepy import time # == معلومات الامان == consumer_key="" consumer_secret="" access_token="" access_token_secret="" auth = tweepy.OAuthHandler(consumer_key, consumer_secret) auth.set_access_token(access_token, access_token_secret) x = 0 y = 0 try: api = tweepy.API(auth) user = api.me() followers_list = api.followers_ids(user) friends_list = api.friends_ids(user) # الغاء متابعة الغير المتابعين try: for friend in friends_list: if friend not in followers_list: api.destroy_friendship(friend) time.sleep(60) x = x + 1 if x == 200: break except tweepy.error.TweepError as ee: print ee pass #متابعة اشخاص جدد try: for follower in api.followers_ids("MohamadAlarefe"): if follower not in followers_list: api.create_friendship(follower) time.sleep(60) y = y + 1 if y == 100: break except tweepy.error.TweepError as eee: print eee pass except tweepy.error.TweepError as e: print e pass
gpl-2.0
-3,045,754,460,581,140,500
25.072464
71
0.707615
false
2.929967
false
false
false
MatthewShao/mitmproxy
test/pathod/test_pathod.py
1
7341
import io import pytest from pathod import pathod from mitmproxy.net import tcp from mitmproxy import exceptions from mitmproxy.test import tutils from . import tservers class TestPathod: def test_logging(self): s = io.StringIO() p = pathod.Pathod(("127.0.0.1", 0), logfp=s) assert len(p.get_log()) == 0 id = p.add_log(dict(s="foo")) assert p.log_by_id(id) assert len(p.get_log()) == 1 p.clear_log() assert len(p.get_log()) == 0 for _ in range(p.LOGBUF + 1): p.add_log(dict(s="foo")) assert len(p.get_log()) <= p.LOGBUF class TestTimeout(tservers.DaemonTests): timeout = 0.01 def test_timeout(self): # FIXME: Add float values to spec language, reduce test timeout to # increase test performance # This is a bodge - we have some platform difference that causes # different exceptions to be raised here. with pytest.raises(Exception): self.pathoc(["get:/:p1,1"]) assert self.d.last_log()["type"] == "timeout" class TestNotAfterConnect(tservers.DaemonTests): ssl = False ssloptions = dict( not_after_connect=True ) def test_connect(self): r, _ = self.pathoc( [r"get:'http://foo.com/p/202':da"], connect_to=("localhost", self.d.port) ) assert r[0].status_code == 202 class TestCustomCert(tservers.DaemonTests): ssl = True ssloptions = dict( certs=[("*", tutils.test_data.path("pathod/data/testkey.pem"))], ) def test_connect(self): r, _ = self.pathoc([r"get:/p/202"]) r = r[0] assert r.status_code == 202 assert r.sslinfo assert "test.com" in str(r.sslinfo.certchain[0].get_subject()) class TestSSLCN(tservers.DaemonTests): ssl = True ssloptions = dict( cn=b"foo.com" ) def test_connect(self): r, _ = self.pathoc([r"get:/p/202"]) r = r[0] assert r.status_code == 202 assert r.sslinfo assert r.sslinfo.certchain[0].get_subject().CN == "foo.com" class TestNohang(tservers.DaemonTests): nohang = True def test_nohang(self): r = self.get("200:p0,0") assert r.status_code == 800 l = self.d.last_log() assert "Pauses have been disabled" in l["response"]["msg"] class TestHexdump(tservers.DaemonTests): hexdump = True def test_hexdump(self): assert self.get(r"200:b'\xf0'") class TestNocraft(tservers.DaemonTests): nocraft = True def test_nocraft(self): r = self.get(r"200:b'\xf0'") assert r.status_code == 800 assert b"Crafting disabled" in r.content class CommonTests(tservers.DaemonTests): def test_binarydata(self): assert self.get(r"200:b'\xf0'") assert self.d.last_log() # FIXME: Other binary data elements def test_sizelimit(self): r = self.get("200:b@1g") assert r.status_code == 800 l = self.d.last_log() assert "too large" in l["response"]["msg"] def test_preline(self): r, _ = self.pathoc([r"get:'/p/200':i0,'\r\n'"]) assert r[0].status_code == 200 def test_logs(self): self.d.clear_log() assert self.get("202:da") assert self.d.expect_log(1) self.d.clear_log() assert len(self.d.log()) == 0 def test_disconnect(self): with pytest.raises(Exception, match="Unexpected EOF"): self.get("202:b@100k:d200") def test_parserr(self): rsp = self.get("400:msg,b:") assert rsp.status_code == 800 def test_static(self): rsp = self.get("200:b<file") assert rsp.status_code == 200 assert rsp.content.strip() == b"testfile" def test_anchor(self): rsp = self.getpath("/anchor/foo") assert rsp.status_code == 202 def test_invalid_first_line(self): c = tcp.TCPClient(("localhost", self.d.port)) with c.connect(): if self.ssl: c.convert_to_tls() c.wfile.write(b"foo\n\n\n") c.wfile.flush() l = self.d.last_log() assert l["type"] == "error" assert "foo" in l["msg"] def test_invalid_content_length(self): with pytest.raises(exceptions.HttpException): self.pathoc(["get:/:h'content-length'='foo'"]) l = self.d.last_log() assert l["type"] == "error" assert "Unparseable Content Length" in l["msg"] def test_invalid_headers(self): with pytest.raises(exceptions.HttpException): self.pathoc(["get:/:h'\t'='foo'"]) l = self.d.last_log() assert l["type"] == "error" assert "Invalid headers" in l["msg"] def test_access_denied(self): rsp = self.get("=nonexistent") assert rsp.status_code == 800 def test_source_access_denied(self): rsp = self.get("200:b</foo") assert rsp.status_code == 800 assert b"File access denied" in rsp.content def test_proxy(self): r, _ = self.pathoc([r"get:'http://foo.com/p/202':da"]) assert r[0].status_code == 202 def test_websocket(self): r, _ = self.pathoc(["ws:/p/"], ws_read_limit=0) assert r[0].status_code == 101 r, _ = self.pathoc(["ws:/p/ws"], ws_read_limit=0) assert r[0].status_code == 101 def test_websocket_frame(self): r, _ = self.pathoc( ["ws:/p/", "wf:f'wf:b\"test\"':pa,1"], ws_read_limit=1 ) assert r[1].payload == b"test" def test_websocket_frame_reflect_error(self): r, _ = self.pathoc( ["ws:/p/", "wf:-mask:knone:f'wf:b@10':i13,'a'"], ws_read_limit=1, timeout=1 ) # FIXME: Race Condition? assert "Parse error" in self.d.text_log() def test_websocket_frame_disconnect_error(self): self.pathoc(["ws:/p/", "wf:b@10:d3"], ws_read_limit=0) assert self.d.last_log() class TestDaemon(CommonTests): ssl = False def test_connect(self): r, _ = self.pathoc( [r"get:'http://foo.com/p/202':da"], connect_to=("localhost", self.d.port), ssl=True ) assert r[0].status_code == 202 def test_connect_err(self): with pytest.raises(exceptions.HttpException): self.pathoc([r"get:'http://foo.com/p/202':da"], connect_to=("localhost", self.d.port)) class TestDaemonSSL(CommonTests): ssl = True def test_ssl_conn_failure(self): c = tcp.TCPClient(("localhost", self.d.port)) c.rbufsize = 0 c.wbufsize = 0 with c.connect(): c.wfile.write(b"\0\0\0\0") with pytest.raises(exceptions.TlsException): c.convert_to_tls() l = self.d.last_log() assert l["type"] == "error" assert "SSL" in l["msg"] def test_ssl_cipher(self): r, _ = self.pathoc([r"get:/p/202"]) assert r[0].status_code == 202 assert self.d.last_log()["cipher"][1] > 0 class TestHTTP2(tservers.DaemonTests): ssl = True nohang = True def test_http2(self): r, _ = self.pathoc(["GET:/"], ssl=True, use_http2=True) assert r[0].status_code == 800
mit
7,214,238,150,954,249,000
27.126437
98
0.558507
false
3.284564
true
false
false
pi19404/robosub-1
src/microcontroller_interface/microcontroller_debugging_interface.py
1
11005
#!/usr/bin/python #import statements import serial import os import time #Global Constants############################################################################# #These values are temporary, for testing. They WILL change in the final product #It was recommended that these values should be placed in a dictionary control_byte = '\n' ACL_1_X_addr = 0x10 ACL_1_Y_addr = 0x11 ACL_1_Z_addr = 0x12 GYRO_1_X_addr = 0x20 GYRO_1_Y_addr = 0x21 GYRO_1_Z_addr = 0x22 ADC_DEPTH = 0x30 ADC_BATT = 0x31 THRUSTER_BOW_SB = 0x10 THRUSTER_BOW_PORT = 0x11 THRUSTER_DEPTH_SB = 0x12 THRUSTER_DEPTH_PORT = 0x13 THRUSTER_STERN_SB = 0x14 THRUSTER_STERN_PORT = 0x15 mag = 127 #Function Definitions######################################################################### """ Here we are trying to make sure we have actually found a control byte, so we receive several packets, then look at where we expect the control bytes to be. If they are not in the expected locastions, we wait for a new control byte and try again. X000X000X 012345678 """ def get_lock() : #variables for the sync loop current_byte = '\0' packet_array = "" in_sync = False #reset the serial port s.close() s.open() print "Aquiring stream sync" while in_sync == False: #read a packet from the serial port current_byte = s.read() #if the byte is the control_byte, then receive several packets #otherwise, we will jump back to the top of the loop and get another byte if current_byte == control_byte : packet_array = "" # clear out the array packet_array += current_byte # add the byte to the array #receive several packets while len(packet_array) != 9 : packet_array += s.read() #check to see if the control byte is in the proper location in the received packets if (packet_array[0] == control_byte and \ packet_array[4] == control_byte and \ packet_array[8] == control_byte) : #throw away rest of last packet s.read(3) #say we are in sync so we can break out of the loop in_sync = True print "sync locked" #end get_lock() """ This function reads a 4-byte packet from the serial port. It will also check to make sure we are still in sync, and pauses the program if we lose sync. It will then attempt to get back into sync with the serial stream. """ def get_packet() : success = False while success == False : #read 4 bytes from the serial port packet = s.read(4) #ensure we are in sync by checking that the control byte is in the correct place if packet[0] != control_byte : #if we are not in sync print "Error: lost sync. Press the [Enter] key to attempt to re-sync" raw_input() #waits for the user to press the enter key s.flushInput() #flushes the serial rx buffer get_lock() #get back into sync else : #if we are in sync, break out of loop success = True return packet #end get_packet() """ cmd_thruster() sends a thruster control command to the microncontroller It takes an id, and a value between +127 and -127 (negative is reverse) """ def cmd_thruster(thruster_id, magnitude) : raw_thruster_id = '\0' direction_mag = 0; raw_direction_mag = '\0' raw_cmd = "" #the chr() command converts the integer to the ascii character representation, which is a raw byte #convert the thruster id to a raw binary value raw_thruster_id = chr(thruster_id) #make sure magnitude is within bounds if (magnitude > 127) : magnitude = 127 elif (magnitude < -127) : magnitude = -127 #convert direction and magnitude variable into a raw byte raw_magnitude = chr(magnitude & 0xFF) CONTROL_BYTE = '\n' #combine the raw bytes raw_cmd = CONTROL_BYTE + raw_thruster_id + raw_magnitude #send the commmand to the microcontroller s.write(raw_cmd) #end cmd_thruster() #here are some example functions controlling the thrusters for movement #causes the sub to move forward def cmd_move_forward() : cmd_thruster(THRUSTER_BOW_SB, -mag) cmd_thruster(THRUSTER_BOW_PORT, mag) cmd_thruster(THRUSTER_STERN_SB, mag) cmd_thruster(THRUSTER_STERN_PORT, -mag) #end cmd_move_forward() #causes the sub to move backwards def cmd_move_backward() : cmd_thruster(THRUSTER_BOW_SB, mag) cmd_thruster(THRUSTER_BOW_PORT, -mag) cmd_thruster(THRUSTER_STERN_SB, -mag) cmd_thruster(THRUSTER_STERN_PORT, mag) #end cmd_move_forward() #causes the sub to dive def cmd_dive() : cmd_thruster(THRUSTER_DEPTH_SB, mag) cmd_thruster(THRUSTER_DEPTH_PORT, mag) #end cmd_move_forward() #causes the sub to surface def cmd_surface() : cmd_thruster(THRUSTER_DEPTH_SB, -mag) cmd_thruster(THRUSTER_DEPTH_PORT, -mag) #end cmd_move_forward() #causes the sub to rotate clockwise def cmd_rotate_cw() : cmd_thruster(THRUSTER_BOW_SB, mag) cmd_thruster(THRUSTER_BOW_PORT, mag) cmd_thruster(THRUSTER_STERN_SB, -mag) cmd_thruster(THRUSTER_STERN_PORT, -mag) #end cmd_rotate_cw() #causes the sub to rotate counter-clockwise def cmd_rotate_ccw() : cmd_thruster(THRUSTER_BOW_SB, -mag) cmd_thruster(THRUSTER_BOW_PORT, -mag) cmd_thruster(THRUSTER_STERN_SB, mag) cmd_thruster(THRUSTER_STERN_PORT, mag) #end cmd_rotate_ccw() #stops the depth control thrusters def cmd_stop_depth() : cmd_thruster(THRUSTER_DEPTH_SB, 0) cmd_thruster(THRUSTER_DEPTH_PORT, 0) #end cmd_move_forward() #stops all thrusters def cmd_stop_all() : cmd_thruster(THRUSTER_BOW_SB, 0) cmd_thruster(THRUSTER_BOW_PORT, 0) cmd_thruster(THRUSTER_STERN_SB, 0) cmd_thruster(THRUSTER_STERN_PORT, 0) cmd_thruster(THRUSTER_DEPTH_SB, 0) cmd_thruster(THRUSTER_DEPTH_PORT, 0) #end cmd_move_forward() #Main code#################################################################################### #initialize the serial port s = serial.Serial() #get instance of serial class s.port = "/dev/ttyUSB0" #this may change, depending on what port the OS gives the microcontroller s.baudrate = 56818 #the baudrate may change in the future s.open() #attempt to open the serial port (there is no guard code, I'm assuming this does not fail) f = open("slog", "w") #clear the screen os.system('clear') get_lock() #get in sync with the stream #Initialize some variables ACL_1_X_val = -1 ACL_1_Y_val = -1 ACL_1_Z_val = -1 GYRO_1_X_val = -1 GYRO_1_Y_val = -1 GYRO_1_Z_val = -1 ADC_DEPTH_val = -1 ADC_BATT_val = -1 buffer_size_max = 0 buffer_tick = 1 buffer_total = 1 sent_time = 0 received_time = 0 min_ping_time = 500 ping_tick = 1 ping_total = 1 wait_time = time.time() + 1 x_update = time.time() x_period = 500 x_total = 1 x_tick = 1 UART_queue_len = 500 UART_queue_len_max = 0 start_time = time.time() cmd_stop_all() cmd_stop_all() cmd_stop_all() cmd_stop_all() """ time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_move_forward() time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_move_backward() time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_dive() time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_surface() time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_rotate_cw() time.sleep(5) cmd_stop_all() time.sleep(.1) cmd_rotate_ccw() time.sleep(5) cmd_stop_all() """ ACL_1_X_val_old = 0 flipflop = 0 #Main reading loop while 1 : #cmd_dive() #cmd_move_forward() if (time.time() > wait_time) : #cmd_dive() if flipflop == 0 : cmd_move_forward() cmd_dive() else : cmd_move_backward() cmd_surface() flipflop = ~flipflop #cmd_move_forward() """ cmd_move_forward() cmd_thruster(THRUSTER_BOW_SB, mag, 1) cmd_thruster(THRUSTER_STERN_SB, mag, 0) cmd_thruster(THRUSTER_BOW_PORT, mag, 0) cmd_thruster(THRUSTER_STERN_PORT, mag, 1) cmd_dive() cmd_stop_all() cmd_move_forward() cmd_dive() cmd_move_backward() cmd_rotate_cw() cmd_stop_depth() cmd_surface() cmd_thruster(THRUSTER_BOW_SB, 100, 0) cmd_thruster(THRUSTER_STERN_SB, 25, 0) cmd_thruster(THRUSTER_BOW_SB, 0, 0) cmd_thruster(THRUSTER_STERN_SB, 0, 0) cmd_stop_all() """ sent_time = time.time() wait_time = sent_time + .5 #receive a packet received_packet = get_packet() #pull the device information out of the packet device = ord(received_packet[1]) #second byte of packet is device information (first byte is always control byte) os.system('clear') print "Sensor Test" #if-elif statement looks for what device the packet is concerning, and handles the data appropriately if device == ACL_1_X_addr : #pulls the data out the last two bytes of the packet ACL_1_X_val = int(( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 )) #data is stored in 2's complement form, this does the appropriate conversion if ACL_1_X_val > 32767 : ACL_1_X_val = (ACL_1_X_val-65536) f.write("X," + str(ACL_1_X_val) + '\n') elif device == ACL_1_Y_addr : ACL_1_Y_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) if ACL_1_Y_val > 32767 : ACL_1_Y_val = (ACL_1_Y_val-65536) f.write("Y," + str(ACL_1_Y_val) + '\n') elif device == ACL_1_Z_addr : ACL_1_Z_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) if ACL_1_Z_val > 32767 : ACL_1_Z_val = (ACL_1_Z_val-65536) f.write("Z," + str(ACL_1_Z_val) + '\n') elif device == GYRO_1_X_addr : GYRO_1_X_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) if GYRO_1_X_val > 32767 : GYRO_1_X_val = (GYRO_1_X_val-65536) elif device == GYRO_1_Y_addr : GYRO_1_Y_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) if GYRO_1_Y_val > 32767 : GYRO_1_Y_val = (GYRO_1_Y_val-65536) elif device == GYRO_1_Z_addr : GYRO_1_Z_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) if GYRO_1_Z_val > 32767 : GYRO_1_Z_val = (GYRO_1_Z_val-65536) elif device == ADC_DEPTH : ADC_DEPTH_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) elif device == ADC_BATT : ADC_BATT_val = ( ord(received_packet[2]) ) | \ ( ord(received_packet[3]) << 8 ) elif device == ord('P') : received_time = time.time() if ping_tick > 20 : ping_total /= 2 ping_tick /= 2 ping_total += received_time - sent_time ping_tick += 1 print "ACL X: %d" % (ACL_1_X_val) print "ACL Y: %d" % (ACL_1_Y_val) print "ACL Z: %d" % (ACL_1_Z_val) print "GYRO X: %d" % (GYRO_1_X_val) print "GYRO Y: %d" % (GYRO_1_Y_val) print "GYRO Z: %d" % (GYRO_1_Z_val) print "ADC Depth: %d" % ((ADC_DEPTH_val) ) print "ADC Battery: %lf" % ((ADC_BATT_val) * 3.3/1024 * 7.5) print "Average Ping Time: %lf" % (ping_total/ping_tick) print "buffer size: %d" % (s.inWaiting()) print "Run Time (minutes): %lf" % ((time.time() - start_time)/60) if ACL_1_X_val_old == ACL_1_X_val : samecount = samecount + 1 else : samecount = 0 print "samecount: %d" % (samecount) if samecount >= 500 : print "FROZEN" samecount = 0 raw_input() #s.close() #time.sleep(.5) #s.open() ACL_1_X_val_old = ACL_1_X_val #time.sleep(0.001) #time.sleep(seconds) #end of reading while loop #close the serial port s.close()
gpl-3.0
-8,604,657,200,176,266,000
22.2173
114
0.649159
false
2.614014
false
false
false
hivetech/dna
python/dna/cli.py
1
2375
# -*- coding: utf-8 -*- # vim:fenc=utf-8 ''' :copyright (c) 2014 Xavier Bruhiere. :license: MIT, see LICENSE for more details. ''' import os import abc import click class Cli(object): ''' Convenient wrapper around UI interface boilerplate. Make the app prettier and more robust. ''' __metaclass__ = abc.ABCMeta # TODO Allow more styling _default_style = { 'primary-color': 'blue', 'success-color': 'green', 'heading-color': 'white', 'error-color': 'red' } def __init__(self, title, style=None): self._mode = None self.style = style or self._default_style click.clear() self.heading('{} [{} mode]'.format(title, self.mode)) def _print(self, text, **kwargs): click.secho(text, **kwargs) @property def mode(self): return self._mode or os.environ.get('APP_ENV', 'development') @mode.setter def mode(self, value): self._mode = value def heading(self, text): self._print('\n{}\n'.format(text), bold=True, fg=self.style['heading-color'], underline=True) def msg(self, text, **kwargs): self._print(text, fg=self.style['primary-color'], **kwargs) def success(self, text): self._print(text, fg=self.style['success-color'], bold=True) def error(self, text): self._print('\n{}\n'.format(text), fg=self.style['error-color'], bold=True) @abc.abstractmethod def run(self): pass def __call__(self, *args, **kwargs): ''' Golang style function that safely calls main routine ''' exit_result = None exit_error = None try: exit_result = self.run(*args, **kwargs) self.success('Done without error.') except KeyboardInterrupt: self.error('Received SIGINT signal, aborting.') except Exception as error: self.error('!!!!!! CRASH !!!!!!') if self.mode == 'development': raise exit_error = ['{}: {}'.format(type(error).__name__, str(error))] if hasattr(error, 'kwargs'): for k, v in error.kwargs.iteritems(): exit_error.append('{}: {}'.format(k, v)) exit_error = '\n'.join(exit_error) return exit_result, exit_error
apache-2.0
2,027,917,352,932,338,700
27.27381
78
0.548632
false
3.818328
false
false
false
boegel/easybuild-easyblocks
easybuild/easyblocks/p/psi.py
1
11600
## # Copyright 2013-2020 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), # with support of Ghent University (http://ugent.be/hpc), # the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), # Flemish Research Foundation (FWO) (http://www.fwo.be/en) # and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). # # https://github.com/easybuilders/easybuild # # EasyBuild is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation v2. # # EasyBuild is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. ## """ EasyBuild support for building and installing PSI, implemented as an easyblock @author: Kenneth Hoste (Ghent University) @author: Ward Poelmans (Ghent University) """ from distutils.version import LooseVersion import glob import os import shutil import tempfile import easybuild.tools.environment as env from easybuild.easyblocks.generic.cmakemake import CMakeMake from easybuild.easyblocks.generic.configuremake import ConfigureMake from easybuild.framework.easyconfig import BUILD from easybuild.tools.build_log import EasyBuildError from easybuild.tools.modules import get_software_root from easybuild.tools.run import run_cmd class EB_PSI(CMakeMake): """ Support for building and installing PSI """ def __init__(self, *args, **kwargs): """Initialize class variables custom to PSI.""" super(EB_PSI, self).__init__(*args, **kwargs) self.psi_srcdir = None self.install_psi_objdir = None self.install_psi_srcdir = None @staticmethod def extra_options(): """Extra easyconfig parameters specific to PSI.""" extra_vars = CMakeMake.extra_options() extra_vars.update({ # always include running PSI unit tests (takes about 2h or less) 'runtest': ["tests TESTFLAGS='-u -q'", "Run tests included with PSI, without interruption.", BUILD], }) # Doesn't work with out-of-source build extra_vars['separate_build_dir'][0] = False return extra_vars def configure_step(self): """ Configure build outside of source directory. """ try: objdir = os.path.join(self.builddir, 'obj') os.makedirs(objdir) os.chdir(objdir) except OSError as err: raise EasyBuildError("Failed to prepare for configuration of PSI build: %s", err) env.setvar('F77FLAGS', os.getenv('F90FLAGS')) # In order to create new plugins with PSI, it needs to know the location of the source # and the obj dir after install. These env vars give that information to the configure script. self.psi_srcdir = os.path.basename(self.cfg['start_dir'].rstrip(os.sep)) self.install_psi_objdir = os.path.join(self.installdir, 'obj') self.install_psi_srcdir = os.path.join(self.installdir, self.psi_srcdir) env.setvar('PSI_OBJ_INSTALL_DIR', self.install_psi_objdir) env.setvar('PSI_SRC_INSTALL_DIR', self.install_psi_srcdir) # explicitely specify Python binary to use pythonroot = get_software_root('Python') if not pythonroot: raise EasyBuildError("Python module not loaded.") # pre 4.0b5, they were using autotools, on newer it's CMake if LooseVersion(self.version) <= LooseVersion("4.0b5") and self.name == "PSI": # Use EB Boost boostroot = get_software_root('Boost') if not boostroot: raise EasyBuildError("Boost module not loaded.") self.log.info("Using configure based build") env.setvar('PYTHON', os.path.join(pythonroot, 'bin', 'python')) env.setvar('USE_SYSTEM_BOOST', 'TRUE') if self.toolchain.options.get('usempi', None): # PSI doesn't require a Fortran compiler itself, but may require it to link to BLAS/LAPACK correctly # we should always specify the sequential Fortran compiler, # to avoid problems with -lmpi vs -lmpi_mt during linking fcompvar = 'F77_SEQ' else: fcompvar = 'F77' # update configure options # using multi-threaded BLAS/LAPACK is important for performance, # cfr. http://sirius.chem.vt.edu/psi4manual/latest/installfile.html#sec-install-iii opt_vars = [ ('cc', 'CC'), ('cxx', 'CXX'), ('fc', fcompvar), ('libdirs', 'LDFLAGS'), ('blas', 'LIBBLAS_MT'), ('lapack', 'LIBLAPACK_MT'), ] for (opt, var) in opt_vars: self.cfg.update('configopts', "--with-%s='%s'" % (opt, os.getenv(var))) # -DMPICH_IGNORE_CXX_SEEK dances around problem with order of stdio.h and mpi.h headers # both define SEEK_SET, this makes the one for MPI be ignored self.cfg.update('configopts', "--with-opt='%s -DMPICH_IGNORE_CXX_SEEK'" % os.getenv('CFLAGS')) # specify location of Boost self.cfg.update('configopts', "--with-boost=%s" % boostroot) # enable support for plugins self.cfg.update('configopts', "--with-plugins") ConfigureMake.configure_step(self, cmd_prefix=self.cfg['start_dir']) else: self.log.info("Using CMake based build") self.cfg.update('configopts', ' -DPYTHON_EXECUTABLE=%s' % os.path.join(pythonroot, 'bin', 'python')) if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"): self.log.info("Remove the CMAKE_BUILD_TYPE test in PSI4 source and the downloaded dependencies!") self.log.info("Use PATCH_COMMAND in the corresponding CMakeLists.txt") self.cfg['build_type'] = 'EasyBuildRelease' if self.toolchain.options.get('usempi', None): self.cfg.update('configopts', " -DENABLE_MPI=ON") if get_software_root('imkl'): self.cfg.update('configopts', " -DENABLE_CSR=ON -DBLAS_TYPE=MKL") if self.name == 'PSI4': pcmsolverroot = get_software_root('PCMSolver') if pcmsolverroot: if LooseVersion(self.version) >= LooseVersion("1.1"): pcmsolver = 'PCMSolver' else: pcmsolver = 'PCMSOLVER' self.cfg.update('configopts', " -DENABLE_%s=ON" % pcmsolver) if LooseVersion(self.version) < LooseVersion("1.2"): self.cfg.update('configopts', " -DPCMSOLVER_ROOT=%s" % pcmsolverroot) else: self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_PCMSolver=ON " "-DPCMSolver_DIR=%s/share/cmake/PCMSolver" % pcmsolverroot) chempsroot = get_software_root('CheMPS2') if chempsroot: if LooseVersion(self.version) >= LooseVersion("1.1"): chemps2 = 'CheMPS2' else: chemps2 = 'CHEMPS2' self.cfg.update('configopts', " -DENABLE_%s=ON" % chemps2) if LooseVersion(self.version) < LooseVersion("1.2"): self.cfg.update('configopts', " -DCHEMPS2_ROOT=%s" % chempsroot) else: self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_CheMPS2=ON " "-DCheMPS2_DIR=%s/share/cmake/CheMPS2" % chempsroot) # Be aware, PSI4 wants exact versions of the following deps! built with CMake!! # If you want to use non-CMake build versions, the you have to provide the # corresponding Find<library-name>.cmake scripts # In PSI4 version 1.2.1, you can check the corresponding CMakeLists.txt file # in external/upstream/<library-name>/ if LooseVersion(self.version) >= LooseVersion("1.2"): for dep in ['libxc', 'Libint', 'pybind11', 'gau2grid']: deproot = get_software_root(dep) if deproot: self.cfg.update('configopts', " -DCMAKE_INSIST_FIND_PACKAGE_%s=ON" % dep) dep_dir = os.path.join(deproot, 'share', 'cmake', dep) self.cfg.update('configopts', " -D%s_DIR=%s " % (dep, dep_dir)) CMakeMake.configure_step(self, srcdir=self.cfg['start_dir']) def install_step(self): """Custom install procedure for PSI.""" super(EB_PSI, self).install_step() # the obj and unpacked sources must remain available for working with plugins try: for subdir in ['obj', self.psi_srcdir]: # copy symlinks as symlinks to work around broken symlinks shutil.copytree(os.path.join(self.builddir, subdir), os.path.join(self.installdir, subdir), symlinks=True) except OSError as err: raise EasyBuildError("Failed to copy obj and unpacked sources to install dir: %s", err) def test_step(self): """ Run the testsuite of PSI4 """ testdir = tempfile.mkdtemp() env.setvar('PSI_SCRATCH', testdir) if self.name == 'PSI4' and LooseVersion(self.version) >= LooseVersion("1.2"): if self.cfg['runtest']: paracmd = '' # Run ctest parallel, but limit to maximum 4 jobs (in case of slow disks) if self.cfg['parallel']: if self.cfg['parallel'] > 4: paracmd = '-j 4' else: paracmd = "-j %s" % self.cfg['parallel'] cmd = "ctest %s %s" % (paracmd, self.cfg['runtest']) run_cmd(cmd, log_all=True, simple=False) else: super(EB_PSI, self).test_step() try: shutil.rmtree(testdir) except OSError as err: raise EasyBuildError("Failed to remove test directory %s: %s", testdir, err) def sanity_check_step(self): """Custom sanity check for PSI.""" custom_paths = { 'files': ['bin/psi4'], 'dirs': ['include', ('share/psi', 'share/psi4')], } super(EB_PSI, self).sanity_check_step(custom_paths=custom_paths) def make_module_extra(self): """Custom variables for PSI module.""" txt = super(EB_PSI, self).make_module_extra() share_dir = os.path.join(self.installdir, 'share') if os.path.exists(share_dir): psi4datadir = glob.glob(os.path.join(share_dir, 'psi*')) if len(psi4datadir) == 1: txt += self.module_generator.set_environment('PSI4DATADIR', psi4datadir[0]) else: raise EasyBuildError("Failed to find exactly one PSI4 data dir: %s", psi4datadir) return txt
gpl-2.0
-9,018,316,180,705,056,000
44.669291
116
0.585259
false
3.903096
true
false
false
diplomacy/research
diplomacy_research/models/policy/order_based/dataset/base.py
1
19541
# ============================================================================== # Copyright 2019 - Philip Paquette # # NOTICE: Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # ============================================================================== """ Order Based Base Dataset Builder - Base class responsible for generating the protocol buffers to be used by the model """ import logging import numpy as np from diplomacy import Map from diplomacy_research.models.datasets.base_builder import FixedProtoField, VarProtoField from diplomacy_research.models.policy.base_policy_builder import BasePolicyBuilder from diplomacy_research.models.self_play.reward_functions import DefaultRewardFunction, DEFAULT_GAMMA from diplomacy_research.models.state_space import get_order_tokens, get_order_based_mask, \ get_possible_orders_for_powers, get_issued_orders_for_powers, proto_to_board_state, GO_ID, NB_NODES, \ NB_SUPPLY_CENTERS, POWER_VOCABULARY_KEY_TO_IX, order_to_ix, MAX_CANDIDATES, NB_FEATURES, NB_ORDERS_FEATURES, \ NB_PREV_ORDERS, NB_PREV_ORDERS_HISTORY, get_board_alignments, get_orderable_locs_for_powers, get_current_season, \ proto_to_prev_orders_state # Constants LOGGER = logging.getLogger(__name__) class BaseDatasetBuilder(BasePolicyBuilder): """ This object is responsible for maintaining the data and feeding it into the model """ @staticmethod def get_proto_fields(): """ Returns the proto fields used by this dataset builder """ # Creating proto fields proto_fields = { 'request_id': FixedProtoField([], None), 'player_seed': FixedProtoField([], np.int32), 'board_state': FixedProtoField([NB_NODES, NB_FEATURES], np.uint8), 'board_alignments': VarProtoField([NB_NODES * NB_SUPPLY_CENTERS], np.uint8), 'prev_orders_state': FixedProtoField([NB_PREV_ORDERS, NB_NODES, NB_ORDERS_FEATURES], np.uint8), 'decoder_inputs': VarProtoField([1 + NB_SUPPLY_CENTERS], np.int32), 'decoder_lengths': FixedProtoField([], np.int32), 'candidates': VarProtoField([None, MAX_CANDIDATES], np.int32), 'noise': FixedProtoField([], np.float32), 'temperature': FixedProtoField([], np.float32), 'dropout_rate': FixedProtoField([], np.float32), 'current_power': FixedProtoField([], np.int32), 'current_season': FixedProtoField([], np.int32), 'draw_target': FixedProtoField([], np.float32), 'value_target': FixedProtoField([], np.float32) } return proto_fields @staticmethod def get_feedable_item(locs, state_proto, power_name, phase_history_proto, possible_orders_proto, **kwargs): """ Computes and return a feedable item (to be fed into the feedable queue) :param locs: A list of locations for which we want orders :param state_proto: A `.proto.game.State` representation of the state of the game. :param power_name: The power name for which we want the orders and the state values :param phase_history_proto: A list of `.proto.game.PhaseHistory`. This represents prev phases. :param possible_orders_proto: A `proto.game.PossibleOrders` object representing possible order for each loc. :param kwargs: Additional optional kwargs: - player_seed: The seed to apply to the player to compute a deterministic mask. - noise: The sigma of the additional noise to apply to the intermediate layers (i.e. sigma * epsilon) - temperature: The temperature to apply to the logits. (Default to 0. for deterministic/greedy) - dropout_rate: The amount of dropout to apply to the inputs/outputs of the decoder. :return: A feedable item, with feature names as key and numpy arrays as values """ # pylint: disable=too-many-branches # Converting to state space map_object = Map(state_proto.map) board_state = proto_to_board_state(state_proto, map_object) # Building the decoder length # For adjustment phase, we restrict the number of builds/disbands to what is allowed by the game engine in_adjustment_phase = state_proto.name[-1] == 'A' nb_builds = state_proto.builds[power_name].count nb_homes = len(state_proto.builds[power_name].homes) # If we are in adjustment phase, making sure the locs are the orderable locs (and not the policy locs) if in_adjustment_phase: orderable_locs, _ = get_orderable_locs_for_powers(state_proto, [power_name]) if sorted(locs) != sorted(orderable_locs): if locs: LOGGER.warning('Adj. phase requires orderable locs. Got %s. Expected %s.', locs, orderable_locs) locs = orderable_locs # WxxxA - We can build units # WxxxA - We can disband units # Other phase if in_adjustment_phase and nb_builds >= 0: decoder_length = min(nb_builds, nb_homes) elif in_adjustment_phase and nb_builds < 0: decoder_length = abs(nb_builds) else: decoder_length = len(locs) # Computing the candidates for the policy if possible_orders_proto: # Adjustment Phase - Use all possible orders for each location. if in_adjustment_phase: # Building a list of all orders for all locations adj_orders = [] for loc in locs: adj_orders += possible_orders_proto[loc].value # Computing the candidates candidates = [get_order_based_mask(adj_orders)] * decoder_length # Regular phase - Compute candidates for each location else: candidates = [] for loc in locs: candidates += [get_order_based_mask(possible_orders_proto[loc].value)] # We don't have possible orders, so we cannot compute candidates # This might be normal if we are only getting the state value or the next message to send else: candidates = [] for _ in range(decoder_length): candidates.append([]) # Prev orders state prev_orders_state = [] for phase_proto in reversed(phase_history_proto): if len(prev_orders_state) == NB_PREV_ORDERS: break if phase_proto.name[-1] == 'M': prev_orders_state = [proto_to_prev_orders_state(phase_proto, map_object)] + prev_orders_state for _ in range(NB_PREV_ORDERS - len(prev_orders_state)): prev_orders_state = [np.zeros((NB_NODES, NB_ORDERS_FEATURES), dtype=np.uint8)] + prev_orders_state prev_orders_state = np.array(prev_orders_state) # Building (order) decoder inputs [GO_ID] decoder_inputs = [GO_ID] # kwargs player_seed = kwargs.get('player_seed', 0) noise = kwargs.get('noise', 0.) temperature = kwargs.get('temperature', 0.) dropout_rate = kwargs.get('dropout_rate', 0.) # Building feedable data item = { 'player_seed': player_seed, 'board_state': board_state, 'board_alignments': get_board_alignments(locs, in_adjustment_phase=in_adjustment_phase, tokens_per_loc=1, decoder_length=decoder_length), 'prev_orders_state': prev_orders_state, 'decoder_inputs': decoder_inputs, 'decoder_lengths': decoder_length, 'candidates': candidates, 'noise': noise, 'temperature': temperature, 'dropout_rate': dropout_rate, 'current_power': POWER_VOCABULARY_KEY_TO_IX[power_name], 'current_season': get_current_season(state_proto) } # Return return item @property def proto_generation_callable(self): """ Returns a callable required for proto files generation. e.g. return generate_proto(saved_game_bytes, is_validation_set) Note: Callable args are - saved_game_bytes: A `.proto.game.SavedGame` object from the dataset - phase_ix: The index of the phase we want to process - is_validation_set: Boolean that indicates if we are generating the validation set Note: Used bytes_to_proto from diplomacy_research.utils.proto to convert bytes to proto The callable must return a list of tf.train.Example to put in the protocol buffer file """ raise NotImplementedError() # ---------- Multiprocessing methods to generate proto buffer ---------------- def get_policy_data(saved_game_proto, power_names, top_victors): """ Computes the proto to save in tf.train.Example as a training example for the policy network :param saved_game_proto: A `.proto.game.SavedGame` object from the dataset. :param power_names: The list of powers for which we want the policy data :param top_victors: The list of powers that ended with more than 25% of the supply centers :return: A dictionary with key: the phase_ix with value: A dict with the power_name as key and a dict with the example fields as value """ nb_phases = len(saved_game_proto.phases) policy_data = {phase_ix: {} for phase_ix in range(nb_phases - 1)} game_id = saved_game_proto.id map_object = Map(saved_game_proto.map) # Determining if we have a draw nb_sc_to_win = len(map_object.scs) // 2 + 1 has_solo_winner = max([len(saved_game_proto.phases[-1].state.centers[power_name].value) for power_name in saved_game_proto.phases[-1].state.centers]) >= nb_sc_to_win survivors = [power_name for power_name in saved_game_proto.phases[-1].state.centers if saved_game_proto.phases[-1].state.centers[power_name].value] has_draw = not has_solo_winner and len(survivors) >= 2 # Processing all phases (except the last one) current_year = 0 for phase_ix in range(nb_phases - 1): # Building a list of orders of previous phases previous_orders_states = [np.zeros((NB_NODES, NB_ORDERS_FEATURES), dtype=np.uint8)] * NB_PREV_ORDERS for phase_proto in saved_game_proto.phases[max(0, phase_ix - NB_PREV_ORDERS_HISTORY):phase_ix]: if phase_proto.name[-1] == 'M': previous_orders_states += [proto_to_prev_orders_state(phase_proto, map_object)] previous_orders_states = previous_orders_states[-NB_PREV_ORDERS:] prev_orders_state = np.array(previous_orders_states) # Parsing each requested power in the specified phase phase_proto = saved_game_proto.phases[phase_ix] phase_name = phase_proto.name state_proto = phase_proto.state phase_board_state = proto_to_board_state(state_proto, map_object) # Increasing year for every spring or when the game is completed if phase_proto.name == 'COMPLETED' or (phase_proto.name[0] == 'S' and phase_proto.name[-1] == 'M'): current_year += 1 for power_name in power_names: phase_issued_orders = get_issued_orders_for_powers(phase_proto, [power_name]) phase_possible_orders = get_possible_orders_for_powers(phase_proto, [power_name]) phase_draw_target = 1. if has_draw and phase_ix == (nb_phases - 2) and power_name in survivors else 0. # Data to use when not learning a policy blank_policy_data = {'board_state': phase_board_state, 'prev_orders_state': prev_orders_state, 'draw_target': phase_draw_target} # Power is not a top victor - We don't want to learn a policy from him if power_name not in top_victors: policy_data[phase_ix][power_name] = blank_policy_data continue # Finding the orderable locs orderable_locations = list(phase_issued_orders[power_name].keys()) # Skipping power for this phase if we are only issuing Hold for order_loc, order in phase_issued_orders[power_name].items(): order_tokens = get_order_tokens(order) if len(order_tokens) >= 2 and order_tokens[1] != 'H': break else: policy_data[phase_ix][power_name] = blank_policy_data continue # Removing orderable locs where orders are not possible (i.e. NO_CHECK games) for order_loc, order in phase_issued_orders[power_name].items(): if order not in phase_possible_orders[order_loc] and order_loc in orderable_locations: if 'NO_CHECK' not in saved_game_proto.rules: LOGGER.warning('%s not in all possible orders. Phase %s - Game %s.', order, phase_name, game_id) orderable_locations.remove(order_loc) # Remove orderable locs where the order is either invalid or not frequent if order_to_ix(order) is None and order_loc in orderable_locations: orderable_locations.remove(order_loc) # Determining if we are in an adjustment phase in_adjustment_phase = state_proto.name[-1] == 'A' nb_builds = state_proto.builds[power_name].count nb_homes = len(state_proto.builds[power_name].homes) # WxxxA - We can build units # WxxxA - We can disband units # Other phase if in_adjustment_phase and nb_builds >= 0: decoder_length = min(nb_builds, nb_homes) elif in_adjustment_phase and nb_builds < 0: decoder_length = abs(nb_builds) else: decoder_length = len(orderable_locations) # Not all units were disbanded - Skipping this power as we can't learn the orders properly if in_adjustment_phase and nb_builds < 0 and len(orderable_locations) < abs(nb_builds): policy_data[phase_ix][power_name] = blank_policy_data continue # Not enough orderable locations for this power, skipping if not orderable_locations or not decoder_length: policy_data[phase_ix][power_name] = blank_policy_data continue # decoder_inputs [GO, order1, order2, order3] decoder_inputs = [GO_ID] decoder_inputs += [order_to_ix(phase_issued_orders[power_name][loc]) for loc in orderable_locations] if in_adjustment_phase and nb_builds > 0: decoder_inputs += [order_to_ix('WAIVE')] * (min(nb_builds, nb_homes) - len(orderable_locations)) decoder_length = min(decoder_length, NB_SUPPLY_CENTERS) # Adjustment Phase - Use all possible orders for each location. if in_adjustment_phase: build_disband_locs = list(get_possible_orders_for_powers(phase_proto, [power_name]).keys()) phase_board_alignments = get_board_alignments(build_disband_locs, in_adjustment_phase=in_adjustment_phase, tokens_per_loc=1, decoder_length=decoder_length) # Building a list of all orders for all locations adj_orders = [] for loc in build_disband_locs: adj_orders += phase_possible_orders[loc] # Not learning builds for BUILD_ANY if nb_builds > 0 and 'BUILD_ANY' in state_proto.rules: adj_orders = [] # No orders found - Skipping if not adj_orders: policy_data[phase_ix][power_name] = blank_policy_data continue # Computing the candidates candidates = [get_order_based_mask(adj_orders)] * decoder_length # Regular phase - Compute candidates for each location else: phase_board_alignments = get_board_alignments(orderable_locations, in_adjustment_phase=in_adjustment_phase, tokens_per_loc=1, decoder_length=decoder_length) candidates = [] for loc in orderable_locations: candidates += [get_order_based_mask(phase_possible_orders[loc])] # Saving results # No need to return temperature, current_power, current_season policy_data[phase_ix][power_name] = {'board_state': phase_board_state, 'board_alignments': phase_board_alignments, 'prev_orders_state': prev_orders_state, 'decoder_inputs': decoder_inputs, 'decoder_lengths': decoder_length, 'candidates': candidates, 'draw_target': phase_draw_target} # Returning return policy_data def get_value_data(saved_game_proto, power_names): """ Computes the proto to save in tf.train.Example as a training example for the value network :param saved_game_proto: A `.proto.game.SavedGame` object from the dataset. :param power_names: The list of powers for which we want the policy data :return: A dictionary with key: the phase_ix with value: A dict with the power_name as key and a dict with the example fields as value """ nb_phases = len(saved_game_proto.phases) value_data = {phase_ix: {} for phase_ix in range(nb_phases - 1)} # Computing the value of each phase for power_name in power_names: value_targets = [] current_value = 0. rewards = DefaultRewardFunction().get_episode_rewards(saved_game_proto, power_name) for reward in reversed(rewards): current_value = reward + DEFAULT_GAMMA * current_value value_targets += [current_value] value_targets += [0] # Computing the value data for phase_ix in range(nb_phases - 1): value_data[phase_ix][power_name] = {'value_target': value_targets[phase_ix]} # Returning the value of the specified phase for each power return value_data
mit
-6,682,590,270,983,825,000
51.52957
120
0.592703
false
4.182577
false
false
false
MFry/pyAlgoDataStructures
Interview_Cake/p1_stock_price.py
1
1516
""" Problem 1 Greedy """ import unittest def brute_get_max_profits(yesterday_prices): """ Brute Force method :param yesterday_prices: :return: """ max_price = float('-inf') for i, buy_price in enumerate(yesterday_prices): best_price = float('-inf') for sell_price in yesterday_prices[i + 1:]: if best_price < sell_price - buy_price: best_price = sell_price - buy_price if best_price > max_price: max_price = best_price return max_price def get_max_profits(yesterday_prices): """ Greedy Algorithm O(n) :param yesterday_prices: :return: """ if len(yesterday_prices) < 2: raise IndexError('Calculating profit requires at least two values') min_buy = float('inf') max_price = float('-inf') for sell_price in yesterday_prices: if sell_price - min_buy > max_price: max_price = sell_price - min_buy if min_buy > sell_price: min_buy = sell_price return max_price class MyTestCase(unittest.TestCase): def test_get_max_profits(self): check_price_yesterday = [10, 7, 5, 8, 11, 9] self.assertEqual(brute_get_max_profits(check_price_yesterday), 6) self.assertEqual(get_max_profits(check_price_yesterday), 6) check_price_yesterday = [10, 11, 12, 50, 60, 100] ans = brute_get_max_profits(check_price_yesterday) self.assertEqual(get_max_profits(check_price_yesterday), ans)
mit
-4,202,641,034,049,540,000
29.32
75
0.60752
false
3.399103
false
false
false
nishant-jain-94/Autofill
src/lstm-3-1024-1024-batchsize-512-epochs-30-Sequence.py
1
4123
from __future__ import print_function import json import os import numpy as np import sys import h5py from gensim.models import Word2Vec from gensim.utils import simple_preprocess from keras.layers import Embedding from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.layers import LSTM from keras.preprocessing import sequence from intersect_embeddings import Embeddings from keras.callbacks import ModelCheckpoint from nltk.tokenize import word_tokenize import random from itertools import groupby # ## Instantiate Embeddings embeddings = Embeddings(300, 4, 1, 4) # ### Getting data from preprocessing word2vec_model = embeddings.get_intersected_model() word2index, index2word = embeddings.get_vocabulary() word2vec_weights = word2vec_model.wv.syn0 tokenized_indexed_sentences = embeddings.get_indexed_sentences() word2index = {word:index+1 for word, index in word2index.items()} index2word = {index:word for word, index in word2index.items()} word2index tokenized_indexed_sentences[0] tokenized_indexed_sentences = [np.array(sentence) + 1 for sentence in tokenized_indexed_sentences if len(sentence) > 0] new_weights = np.zeros((1, word2vec_weights.shape[1])) new_weights = np.append(new_weights, word2vec_weights, axis = 0) # ### generating training data window_size = 5 vocab_size = len(word2index) print(vocab_size) maxlen = max([len(sentence) for sentence in tokenized_indexed_sentences]) tokenized_indexed_sentences = sequence.pad_sequences(tokenized_indexed_sentences) seq_in = [] seq_out = [] # generating dataset tokenized_indexed_sentences = [sentence for sentence in tokenized_indexed_sentences if len(sentence) > 0] for sentence in tokenized_indexed_sentences: x = sentence y = np.append(sentence[1:], np.array(sentence[len(sentence)-1])) seq_in.append(x) seq_out.append([new_weights[index] for index in y]) # converting seq_in and seq_out into numpy array seq_in = np.array(seq_in) seq_out = np.array(seq_out) n_samples = len(seq_in) print ("Number of samples : ", n_samples) # ## Defining model # Changes to the model to be done here model = Sequential() model.add(Embedding(input_dim = new_weights.shape[0], output_dim = new_weights.shape[1], weights = [new_weights], mask_zero = True)) model.add(LSTM(1024, return_sequences = True)) model.add(LSTM(1024, return_sequences = True)) model.add(LSTM(300, return_sequences = True)) model.compile(loss='cosine_proximity', optimizer='adam',metrics=['accuracy']) model.summary() model_weights_path = "../weights/lstm-3-1024-1024-batchsize-512-epochs-30-Sequence" if not os.path.exists(model_weights_path): os.makedirs(model_weights_path) checkpoint_path = model_weights_path + '/weights.{epoch:02d}.hdf5' checkpoint = ModelCheckpoint(filepath=checkpoint_path, verbose=1, save_best_only=False, mode='max') # ## Train Model model.fit(seq_in, seq_out, epochs=30, verbose=1, batch_size=512, callbacks=[checkpoint]) # ### model predict start = 0 sentence_test = "In which regions in particular did" indexed_sentences = embeddings.get_indexed_query(sentence_test) print("indexed_sentences ",indexed_sentences) sent = np.array(indexed_sentences) pattern = list(sent) print("\"",' '.join(index2word[index] for index in pattern)) for i in range(10): prediction = model.predict(np.array([pattern])) pred_word = word2vec_model.similar_by_vector(prediction[0][prediction.shape[1] - 1])[0][0] sys.stdout.write(pred_word+" ") pattern.append(word2index[pred_word]) pattern = pattern[:len(pattern)] # ## Accuracy def accuracy(): count = 0 correct = 0 for sub_sample_in, sub_sample_out in zip(seq_in, seq_out): ypred = model.predict_on_batch(np.expand_dims(sub_sample_in, axis = 0))[0] ytrue = sub_sample_out pred_word = word2vec_model.similar_by_vector(ypred)[0][0] true_word = word2vec_model.similar_by_vector(ytrue)[0][0] similarity = word2vec_model.similarity(pred_word, true_word) if similarity == 1: correct += 1 count += 1 print("Accuracy {0}".format(correct/count))
gpl-3.0
1,490,675,374,157,375,700
33.949153
132
0.732961
false
3.311647
false
false
false
ARMmbed/greentea
test/gtea/gt_cli.py
1
4917
# # Copyright (c) 2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # import six import sys import unittest from greentea import greentea_cli from greentea.gtea.tests_spec import TestSpec test_spec_def = { "builds": { "K64F-ARM": { "platform": "K64F", "toolchain": "ARM", "base_path": "./.build/K64F/ARM", "baud_rate": 115200, "tests": { "mbed-drivers-test-generic_tests": { "binaries": [ { "binary_type": "bootable", "path": "./.build/K64F/ARM/mbed-drivers-test-generic_tests.bin", } ] }, "mbed-drivers-test-c_strings": { "binaries": [ { "binary_type": "bootable", "path": "./.build/K64F/ARM/mbed-drivers-test-c_strings.bin", } ] }, }, } } } class GreenteaCliFunctionality(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_get_greentea_version(self): version = greentea_cli.get_greentea_version() self.assertIs(type(version), str) version_list = version.split(".") self.assertEqual(version_list[0].isdigit(), True) self.assertEqual(version_list[1].isdigit(), True) self.assertEqual(version_list[2].isdigit(), True) def test_print_version(self): version = greentea_cli.get_greentea_version() old_stdout = sys.stdout sys.stdout = stdout_capture = six.StringIO() greentea_cli.print_version() sys.stdout = old_stdout printed_version = stdout_capture.getvalue().splitlines()[0] self.assertEqual(printed_version, version) def test_get_hello_string(self): version = greentea_cli.get_greentea_version() hello_string = greentea_cli.get_hello_string() self.assertIs(type(version), str) self.assertIs(type(hello_string), str) self.assertIn(version, hello_string) def test_get_local_host_tests_dir_invalid_path(self): test_path = greentea_cli.get_local_host_tests_dir("invalid-path") self.assertEqual(test_path, None) def test_get_local_host_tests_dir_valid_path(self): path = "." test_path = greentea_cli.get_local_host_tests_dir(path) self.assertEqual(test_path, path) def test_get_local_host_tests_dir_default_path(self): import os import shutil import tempfile curr_dir = os.getcwd() test1_dir = tempfile.mkdtemp() test2_dir = os.mkdir(os.path.join(test1_dir, "test")) test3_dir = os.mkdir(os.path.join(test1_dir, "test", "host_tests")) os.chdir(test1_dir) test_path = greentea_cli.get_local_host_tests_dir("") self.assertEqual(test_path, "./test/host_tests") os.chdir(curr_dir) shutil.rmtree(test1_dir) def test_create_filtered_test_list(self): test_spec = TestSpec() test_spec.parse(test_spec_def) test_build = test_spec.get_test_builds()[0] test_list = greentea_cli.create_filtered_test_list( test_build.get_tests(), "mbed-drivers-test-generic_*", None, test_spec=test_spec, ) self.assertEqual( set(test_list.keys()), set(["mbed-drivers-test-generic_tests"]) ) test_list = greentea_cli.create_filtered_test_list( test_build.get_tests(), "*_strings", None, test_spec=test_spec ) self.assertEqual(set(test_list.keys()), set(["mbed-drivers-test-c_strings"])) test_list = greentea_cli.create_filtered_test_list( test_build.get_tests(), "mbed*s", None, test_spec=test_spec ) expected = set( ["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"] ) self.assertEqual(set(test_list.keys()), expected) test_list = greentea_cli.create_filtered_test_list( test_build.get_tests(), "*-drivers-*", None, test_spec=test_spec ) expected = set( ["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"] ) self.assertEqual(set(test_list.keys()), expected) # Should be case insensitive test_list = greentea_cli.create_filtered_test_list( test_build.get_tests(), "*-DRIVERS-*", None, test_spec=test_spec ) expected = set( ["mbed-drivers-test-c_strings", "mbed-drivers-test-generic_tests"] ) self.assertEqual(set(test_list.keys()), expected) if __name__ == "__main__": unittest.main()
apache-2.0
-5,142,583,259,424,977,000
30.928571
92
0.55725
false
3.650334
true
false
false
b-cube/thredds_catalog_crawler
thredds_catalog_crawler/crawl.py
1
11950
from thredds_crawler.etree import etree import urllib import urlparse import requests import os import sys import re from thredds_crawler.utils import construct_url INV_NS = "http://www.unidata.ucar.edu/namespaces/thredds/InvCatalog/v1.0" XLINK_NS = "http://www.w3.org/1999/xlink" import logging try: # Python >= 2.7 from logging import NullHandler except ImportError: # Python < 2.7 class NullHandler(logging.Handler): def emit(self, record): pass logger = logging.getLogger("thredds_crawler") logger.addHandler(NullHandler()) class Crawl(object): # TODO: this is super specific SKIPS = [ # ".*files.*", # ".*Individual Files.*", # ".*File_Access.*", # ".*Forecast Model Run.*", # ".*Constant Forecast Offset.*", # ".*Constant Forecast Date.*" ] def __init__(self, catalog_url, select=None, skip=None, debug=None): """ select: a list of dataset IDs. Python regex supported. skip: list of dataset names and/or a catalogRef titles. Python regex supported. """ if debug is True: logger.setLevel(logging.DEBUG) ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter('%(asctime)s - [%(levelname)s] %(message)s') ch.setFormatter(formatter) logger.addHandler(ch) self.catalog_url = catalog_url # Only process these dataset IDs if select is not None: select = map(lambda x: re.compile(x), select) self.select = select # Skip these dataset links, such as a list of files # ie. "files/" if skip is None: skip = Crawl.SKIPS self.skip = map(lambda x: re.compile(x), skip) self.visited = [] # datasets = [LeafDataset(url) for url in self._run(url=catalog_url) if url is not None] # self.datasets = filter(lambda x: x.id is not None, datasets) def _find_root_url(self): ''' before parsing the larger tree, check that the catalog_url is the root node - return the shortest url that's good ''' parts = urlparse.urlparse(self.catalog_url) route_parts = parts.path.split('/') route_parts = [r for r in route_parts if r and r != 'catalog.xml'] founds = [] for i in xrange(len(route_parts) + 1): route = urlparse.urlunparse( (parts.scheme, parts.netloc, '/'.join(route_parts[:len(route_parts) - i] + ['catalog.xml']), parts.params, parts.query, parts.fragment) ) req = requests.head(route) status_code = req.status_code if status_code in [200, 304]: founds.append(route) return self.catalog_url if not founds else min(founds) def _run(self, url): if url in self.visited: logger.debug("Skipping %s (already crawled)" % url) return self.visited.append(url) logger.info("Crawling: %s" % url) u = urlparse.urlsplit(url) name, ext = os.path.splitext(u.path) if ext == ".html": u = urlparse.urlsplit(url.replace(".html", ".xml")) url = u.geturl() # Get an etree object try: r = requests.get(url) tree = etree.XML(str(r.text)) except BaseException: logger.error("Skipping %s (error parsing getting XML)" % url) return # Crawl the catalogRefs: for ref in tree.findall('.//{%s}catalogRef' % INV_NS): # Check skips title = ref.get("{%s}title" % XLINK_NS) if not any([x.match(title) for x in self.skip]): for ds in self._run(url=construct_url(url, ref.get("{%s}href" % XLINK_NS))): yield ds else: logger.info("Skipping catalogRef based on 'skips'. Title: %s" % title) continue # Get the leaf datasets ds = [] for leaf in tree.findall('.//{%s}dataset[@urlPath]' % INV_NS): # Subset by the skips name = leaf.get("name") if any([x.match(name) for x in self.skip]): logger.info("Skipping dataset based on 'skips'. Name: %s" % name) continue # Subset by the Selects defined gid = leaf.get('ID') if self.select is not None: if gid is not None and any([x.match(gid) for x in self.select]): logger.debug("Processing %s" % gid) yield "%s?dataset=%s" % (url, gid) else: logger.info("Ignoring dataset based on 'selects'. ID: %s" % gid) continue else: logger.debug("Processing %s" % gid) yield "%s?dataset=%s" % (url, gid) class CatalogRef(object): def __init__(self, source_url, element): self.id = None self.name = None self.parent_url = source_url self.elem = element @property def href(self): return def _parse(self): # extract everything from the node name = self.element.attrib.get('name', '') cat_id = self.element.attrib.get('ID', '') title = self.element.attrib.get('title', '') href = self.element.attrib.get('{http://www.w3.org/1999/xlink}href', '') tag = extract_element_tag(self.element.tag) # get the parent parent = self.element.getparent() parent_tag = extract_element_tag(parent.tag) parent_id = parent.attrib.get('ID', '') if parent_tag != 'catalog' else '' class Dataset(object): def __init_(self, parent_url, elem): self.id = None self.name = None self.parent_url = parent_url self.elem = elem def __repr__(self): return "<Dataset id: %s, name: %s>" % (self.id, self.name) def _parse_element(self): ''' first, is it a bucket or a leaf? if bucket, get children and carry on if leaf, get endpoint and handle supported service list get children and carry on (related catalogs, etc) ''' self.is_leaf = self.elem.xpath('*[local-name()="access"]/@urlPath or @urlPath') # if it has children, get them and add to follows # do not add the access url(s) to follows. this is the terminus class CatalogRef(object): def __init_(self, parent_url, elem): self.id = None self.name = None self.parent_url = parent_url self.elem = elem # self.href_path = href_path def __repr__(self): return "<CatalogRef id: %s, name: %s>" % (self.id, self.name) # TODO: url generation = parent path urljoin with href @property def href(self): parts = urlparse.urlparse(self.href_path) if parts.scheme and parts.netloc: # it's a valid url, do nothing return self.href_path parts = urlparse.urlparse(self.parent_url) # just a basic urljoin if self.parent_type == 'dataset': return urlparse.urljoin(self.parent_url.replace('catalog.xml', ''), self.href_path) else: pass def follow(self): req = requests.get(self.href) # TODO: parse the xml and generate catalogRefs, Datasets x 2 class ParentDataset(object): ''' a collection object, tagged as dataset, that can contain catalogRefs, children datasets (likely terminal nodes) or a metadata blob this object won't have its own url (should be tied to the catalogRef URL parent) ''' def __init__(self, parent_url): self.id = None self.name = None self.parent_url = parent_url self.children = [] def __repr__(self): return "<ParentDataset id: %s, name: %s>" % (self.id, self.name) class LeafDataset(object): def __init__(self, dataset_url, estimate_size=False): self.services = [] self.id = None self.name = None self.metadata = None self.catalog_url = None self.data_size = None self.estimate_size = estimate_size # Get an etree object r = requests.get(dataset_url) try: tree = etree.XML(str(r.text)) except etree.XMLSyntaxError: logger.error("Error procesing %s, invalid XML" % dataset_url) else: dataset = tree.find("{%s}dataset" % INV_NS) self.id = dataset.get("ID") self.name = dataset.get("name") self.metadata = dataset.find("{%s}metadata" % INV_NS) self.catalog_url = dataset_url.split("?")[0] # Data Size - http://www.unidata.ucar.edu/software/thredds/current/tds/ # catalog/InvCatalogSpec.html#dataSize data_size = dataset.find("{%s}dataSize" % INV_NS) if data_size is not None: self.data_size = float(data_size.text) data_units = data_size.get('units') # Convert to MB if data_units == "bytes": self.data_size *= 1e-6 elif data_units == "Kbytes": self.data_size *= 0.001 elif data_units == "Gbytes": self.data_size /= 0.001 elif data_units == "Tbytes": self.data_size /= 1e-6 # Services service_tag = dataset.find("{%s}serviceName" % INV_NS) if service_tag is None: service_tag = self.metadata.find("{%s}serviceName" % INV_NS) service_name = service_tag.text for service in tree.findall(".//{%s}service[@name='%s']" % (INV_NS, service_name)): if service.get("serviceType") == "Compound": for s in service.findall("{%s}service" % INV_NS): url = '' else: url = '' def follow(self): # TODO: run the head requests for the service + urlPath # hrefs to make sure they are valid requests pass @property def href(self): return urlparse.urljoin( urlparse.urlunparse( ( parts.scheme, parts.netloc, '/'.join(url_paths[0:match_index + 1]), parts.params, parts.query, parts.fragment ) ), path ) @property def size(self): if self.data_size is not None: return self.data_size if self.estimate_size: try: dap_endpoint = next(s.get("url") for s in self.services if s.get("service").lower() in ["opendap", "dap"]) # Get sizes from DDS try: import netCDF4 nc = netCDF4.Dataset(dap_endpoint) bites = 0 for vname in nc.variables: var = nc.variables.get(vname) bites += var.dtype.itemsize * var.size return bites * 1e-6 # Megabytes except ImportError: logger.error("The python-netcdf4 library is required for computing the size of this dataset.") return None except StopIteration: return None # We can't calculate return None def __repr__(self): return "<LeafDataset id: %s, name: %s, services: %s>" % ( self.id, self.name, str([s.get("service") for s in self.services]) )
gpl-3.0
-8,937,101,400,914,662,000
32.194444
114
0.529456
false
4.028995
false
false
false
lichuan261/wuand
XX-Net/python27/1.0/lib/noarch/dnslib/ranges.py
1
2427
# -*- coding: utf-8 -*- """ Wrapper around property builtin to restrict attribute to defined integer value range (throws ValueError). Intended to ensure that values packed with struct are in the correct range >>> class T(object): ... a = range_property('a',-100,100) ... b = B('b') ... c = H('c') ... d = I('d') >>> t = T() >>> for i in [0,100,-100]: ... t.a = i ... assert t.a == i >>> t.a = 101 Traceback (most recent call last): ... ValueError: Attribute 'a' must be between -100-100 [101] >>> t.a = -101 Traceback (most recent call last): ... ValueError: Attribute 'a' must be between -100-100 [-101] >>> t.a = 'blah' Traceback (most recent call last): ... ValueError: Attribute 'a' must be between -100-100 [blah] """ import sys if sys.version < '3': int_types = (int, long,) else: int_types = (int,) def range_property(attr,min,max): def getter(obj): return getattr(obj,"_%s" % attr) def setter(obj,val): if isinstance(val,int_types) and min <= val <= max: setattr(obj,"_%s" % attr,val) else: raise ValueError("Attribute '%s' must be between %d-%d [%s]" % (attr,min,max,val)) return property(getter,setter) def B(attr): """ Unsigned Byte """ return range_property(attr,0,255) def H(attr): """ Unsigned Short """ return range_property(attr,0,65535) def I(attr): """ Unsigned Long """ return range_property(attr,0,4294967295) def ntuple_range(attr,n,min,max): f = lambda x : isinstance(x,int_types) and min <= x <= max def getter(obj): return getattr(obj,"_%s" % attr) def setter(obj,val): if len(val) != n: raise ValueError("Attribute '%s' must be tuple with %d elements [%s]" % (attr,n,val)) if all(map(f,val)): setattr(obj,"_%s" % attr,val) else: raise ValueError("Attribute '%s' elements must be between %d-%d [%s]" % (attr,min,max,val)) return property(getter,setter) def IP4(attr): return ntuple_range(attr,4,0,255) def IP6(attr): return ntuple_range(attr,16,0,255) if __name__ == '__main__': import doctest doctest.testmod()
gpl-2.0
5,592,855,449,354,411,000
25.096774
84
0.526164
false
3.502165
false
false
false
arefenayat/pysend
asli.py
1
3605
import sys import urllib.request from urllib.parse import urlparse class check(object): def __init__(self):pass def address(self): add=input("Enter URL Address With HTTP to Send Data: \n") if add: o = urlparse(add) if not o.scheme: self.address() else: self.address=add else: self.address() def method(self): method=input("Enter Method Name (GET OR POST) \n") if method: if method=="POST" or method=="post" or method=="get" or method=="GET": self.method=method else: self.method() else: self.method() def getkey(self): keys=input("Enter Key's To Send exam: name,family,number \n") if not len(keys): self.getkey() else: keys=keys.split(',') self.keys=keys def getval(self): values=input("Enter values's To Send exam saeid,ahmadi,2 \n") if not len(values): self.getval() else: values=values.split(',') self.values=values def post_(self,address,**datas): data = urllib.parse.urlencode(datas) data = data.encode('utf-8') request = urllib.request.Request(address) request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8") request.add_header("User-Agent","Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11") try : f = urllib.request.urlopen(request, data) print("Response Recived From "+address+" : \n") print(f.read().decode('utf-8')) again=input("Do you want to test again ? yes or no") if again=='yes': main() else: sys.exit(0) except urllib.error.URLError as err0: print(err0) except urllib.error.HTTPError as err1: print(err1) def get_(self,address): request = urllib.request.Request(address) request.add_header("Content-Type","application/x-www-form-urlencoded;charset=utf-8") request.add_header("User-Agent","Mozilla/5.0 (X11; U; Linux i686) Gecko/20071127 Firefox/2.0.0.11") request.add_header('Referer', 'http://www.python.org/') try : f = urllib.request.urlopen(request) print("Response Recived From "+address+" : \n") print(f.read().decode('utf-8')) again=input("Do you want to test again ? yes or no") if again=='yes': main() else: sys.exit(0) except urllib.error.URLError as err0: print(err0) except urllib.error.HTTPError as err1: print(err1) def main(): barname=check() barname.address() barname.method() barname.getkey() barname.getval() address=barname.address method=barname.method key=barname.keys val=barname.values if method=="GET" or method=="get" : c=0 datas={} for i in key: datas[i]=val[c] c=c+1 datass=str(datas) a=datass.replace('}','') a=a.replace('{','') a=a.replace("'",'') a=a.replace(":",'=') a=a.replace(",",'&') a=a.replace(" ",'') j=address+'?'+a barname.get_(j) else: c=0 datas={} for i in key: datas[i]=val[c] c=c+1 barname.post_(address,**datas) if __name__=="__main__":main()
cc0-1.0
1,671,651,297,372,823,600
31.772727
107
0.521221
false
3.782791
false
false
false
iniverno/RnR-LLC
simics-3.0-install/simics-3.0.31/amd64-linux/lib/leon2_components.py
1
10174
# MODULE: leon2-components # CLASS: leon2-simple from sim_core import * from components import * def _make_leon_cfg(dsu, sdram, wpts, mac, nwin, icsz, ilsz, dcsz, dlsz, div, mul, wdog, mst, fpu, pci, wp): leoncfg = 0 leoncfg = leoncfg | dsu << 30 # Debug Support Unit leoncfg = leoncfg | sdram << 29 # SDRAM Controller leoncfg = leoncfg | wpts << 26 # N Watchpoints leoncfg = leoncfg | mac << 25 # MAC Instr Available leoncfg = leoncfg | (nwin-1) << 20 # Number of Register Windows leoncfg = leoncfg | icsz << 17 # I-Cache Size leoncfg = leoncfg | ilsz << 15 # I-Cache Line Size leoncfg = leoncfg | dcsz << 12 # D-Cache Size leoncfg = leoncfg | dlsz << 10 # D-Cache Line Size leoncfg = leoncfg | div << 9 # Integer Divide Instructions Enabled leoncfg = leoncfg | mul << 8 # Integer Multiply Instructions Enabled leoncfg = leoncfg | wdog << 7 # Watchdog Present leoncfg = leoncfg | mst << 6 # Mem Stat and Fail Addr Regs Available leoncfg = leoncfg | fpu << 4 # FPU Type (00 = None, 01 = Meiko) leoncfg = leoncfg | pci << 2 # PCI Core (00 = None, 01 = InSilicon, # 10 = ESA, 11 = Other) leoncfg = leoncfg | wp << 0 # Write Protection Type (00 = None, # 01 = Standard) return leoncfg class leon2_simple(component_object): classname = 'leon2-simple' basename = 'system' description = ('A simple LEON2 based component including a CPU and some memory.') connectors = { 'uart1' : {'type' : 'serial', 'direction' : 'down', 'empty_ok' : True, 'hotplug' : True, 'multi' : False}, 'uart2' : {'type' : 'serial', 'direction' : 'down', 'empty_ok' : True, 'hotplug' : True, 'multi' : False}, } def __init__(self, parse_obj): component_object.__init__(self, parse_obj) def get_cpu_frequency(self, idx): return self.freq_mhz def set_cpu_frequency(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.freq_mhz = val return Sim_Set_Ok def get_prom_size(self, idx): return self.prom_size def set_prom_size(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.prom_size = val return Sim_Set_Ok def get_sram_size(self, idx): return self.sram_size def set_sram_size(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.sram_size = val return Sim_Set_Ok def set_has_sram(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.has_sram = val return Sim_Set_Ok def get_has_sram(self, idx): return self.has_sram def get_sdram_size(self, idx): return self.sdram_size def set_sdram_size(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.sdram_size = val return Sim_Set_Ok def set_num_windows(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value # must be a power of two and in the range [2, 32] if ((val & (val - 1)) != 0) and (val < 2) and (val > 32): return Sim_Set_Illegal_Value self.num_windows = val return Sim_Set_Ok def set_has_v8e_mac(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.has_v8e_mac = val return Sim_Set_Ok def set_has_v8_mul(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.has_v8_mul = val return Sim_Set_Ok def set_has_v8_div(self, val, idx): if self.obj.configured: return Sim_Set_Illegal_Value self.has_v8_div = val return Sim_Set_Ok def get_num_windows(self, idx): return self.num_windows def get_has_v8e_mac(self, idx): return self.has_v8e_mac def get_has_v8_mul(self, idx): return self.has_v8_mul def get_has_v8_div(self, idx): return self.has_v8_div def add_objects(self): self.o.amba = pre_obj('amba$', 'memory-space') self.o.cpu = pre_obj('cpu$', 'leon2') self.o.cpu.processor_number = get_next_cpu_number() self.o.cpu.freq_mhz = self.freq_mhz self.o.cpu.physical_memory = self.o.amba self.o.cpu.num_windows = self.num_windows self.o.cpu.has_v8e_mac = self.has_v8e_mac self.o.cpu.has_v8_mul = self.has_v8_mul self.o.cpu.has_v8_div = self.has_v8_div # Interrupt controller self.o.irq1 = pre_obj("irq$", "leon2_irq") self.o.irq1.cpu = self.o.cpu self.o.irq1.queue = self.o.cpu self.o.cpu.interrupt_controller = self.o.irq1 # Onchip prom self.o.prom_image = pre_obj('prom$_image', 'image') self.o.prom_image.size = self.prom_size self.o.prom_memory = pre_obj('prom$_memory', 'rom') self.o.prom_memory.image = self.o.prom_image # Onchip sram / sdram self.o.sram_image = pre_obj('sram$_image', 'image') self.o.sram_image.size = self.sram_size self.o.sram_memory = pre_obj('sram$_memory', 'ram') self.o.sram_memory.image = self.o.sram_image self.o.sdram_image = pre_obj('sdram$_image', 'image') self.o.sdram_image.size = self.sdram_size self.o.sdram_memory = pre_obj('sdram$_memory', 'ram') self.o.sdram_memory.image = self.o.sdram_image # UARTS self.o.uart1 = pre_obj('uart$', 'leon2_uart') self.o.uart1.irq = self.o.irq1 self.o.uart1.interrupt = 3 self.o.uart1.queue = self.o.cpu self.o.uart2 = pre_obj('uart$', 'leon2_uart') self.o.uart2.irq = self.o.irq1 self.o.uart2.interrupt = 2 self.o.uart2.queue = self.o.cpu # Timer self.o.timer = pre_obj("timer$", "leon2_timer") self.o.timer.irq = self.o.irq1 self.o.timer.queue = self.o.cpu # Configuration registers (power down reg, memcfg, ccr, etc) self.o.cfg = pre_obj("cfg$", "leon2_cfg") self.o.cfg.cpu = self.o.cpu # Set the LEON2 configuration register sdram = 1 if self.sdram_size == 0: sdram = 0 self.o.cfg.b_leonconfig = _make_leon_cfg(0, sdram, 0, self.has_v8e_mac, self.num_windows, 0, 0, 0, 0, self.has_v8_div, self.has_v8_mul, 0, 0, 1, 0, 0) # Parallel IO self.o.ioport = pre_obj("ioport$", "leon2_ioport") # Ethernet self.o.eth = pre_obj("eth$", "opencores_eth") self.o.eth.irq_ctrl = self.o.irq1 self.o.amba.map = [ [0x00000000, self.o.prom_memory, 0, 0, self.prom_size], [0x80000000, self.o.cfg, 0, 0, 0x28], [0x80000040, self.o.timer, 0, 0, 0x28], [0x80000070, self.o.uart1, 0, 0, 16], [0x80000080, self.o.uart2, 0, 0, 16], [0x80000090, self.o.irq1, 0, 0, 16], [0x800000a0, self.o.ioport, 0, 0, 12], [0xb0000000, self.o.eth, 0, 0, 0x0001ffff]] # physical memory map if self.has_sram == 1: self.o.amba.map = self.o.amba.map + [ [0x00000000, self.o.prom_memory, 0, 0, self.prom_size], [0x40000000, self.o.sram_memory, 0, 0, self.sram_size], [0x60000000, self.o.sdram_memory, 0, 0, self.sdram_size]] else: self.o.amba.map = self.o.amba.map + [ [0x00000000, self.o.prom_memory, 0, 0, self.prom_size], [0x40000000, self.o.sdram_memory, 0, 0, self.sdram_size]] def add_connector_info(self): self.connector_info['uart1'] = [None, self.o.uart1, self.o.uart1.name] self.connector_info['uart2'] = [None, self.o.uart2, self.o.uart2.name] def connect_serial(self, connector, link, console): if connector == 'uart1': if link: self.o.uart1.link = link else: self.o.uart1.console = console elif connector == 'uart2': if link: self.o.uart2.link = link else: self.o.uart2.console = console def disconnect_serial(self, connector): if connector == 'uart1': self.o.uart1.console = None elif connector == 'uart2': self.o.uart2.console = None def instantiation_done(self): component_object.instantiation_done(self) conf.sim.handle_outside_memory = 1 def get_clock(self): return self.o.cpu def get_processors(self): return [self.o.cpu] leon2_simple_attributes = [ ['cpu_frequency', Sim_Attr_Required, 'f', 'Processor frequency in MHz.'], ['prom_size', Sim_Attr_Required, 'i', 'Size of PROM in bytes'], ['has_sram', Sim_Attr_Required, 'i', 'True if SRAM is available (if so, SDRAM starts at 0x60000000)'], ['sram_size', Sim_Attr_Required, 'i', 'Size of SRAM in bytes'], ['sdram_size', Sim_Attr_Required, 'i', 'Size of SDRAM in bytes'], ['num_windows', Sim_Attr_Required, 'i', 'Number of register windows, (must be a power of 2)'], ['has_v8e_mac', Sim_Attr_Required, 'b', 'TRUE if the V8E UMAC / SMAC instructions are to be allowed'], ['has_v8_mul', Sim_Attr_Required, 'b', 'TRUE if the V8 IMUL instructions are to be allowed'], ['has_v8_div', Sim_Attr_Required, 'b', 'TRUE if the V8 IDIV instructions are to be allowed']] register_component_class( leon2_simple, leon2_simple_attributes, top_level = True)
gpl-2.0
7,219,553,931,094,871,000
35.597122
85
0.546098
false
3.144005
true
false
false
leoc/home-assistant
homeassistant/components/climate/nest.py
1
6636
""" Support for Nest thermostats. For more details about this platform, please refer to the documentation at https://home-assistant.io/components/climate.nest/ """ import logging import voluptuous as vol import homeassistant.components.nest as nest from homeassistant.components.climate import ( STATE_AUTO, STATE_COOL, STATE_HEAT, ClimateDevice, PLATFORM_SCHEMA, ATTR_TARGET_TEMP_HIGH, ATTR_TARGET_TEMP_LOW, ATTR_TEMPERATURE) from homeassistant.const import ( TEMP_CELSIUS, CONF_SCAN_INTERVAL, STATE_ON, STATE_OFF, STATE_UNKNOWN) from homeassistant.util.temperature import convert as convert_temperature DEPENDENCIES = ['nest'] _LOGGER = logging.getLogger(__name__) PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Optional(CONF_SCAN_INTERVAL): vol.All(vol.Coerce(int), vol.Range(min=1)), }) def setup_platform(hass, config, add_devices, discovery_info=None): """Setup the Nest thermostat.""" temp_unit = hass.config.units.temperature_unit add_devices([NestThermostat(structure, device, temp_unit) for structure, device in nest.devices()]) # pylint: disable=abstract-method,too-many-public-methods class NestThermostat(ClimateDevice): """Representation of a Nest thermostat.""" def __init__(self, structure, device, temp_unit): """Initialize the thermostat.""" self._unit = temp_unit self.structure = structure self.device = device self._fan_list = [STATE_ON, STATE_AUTO] self._operation_list = [STATE_HEAT, STATE_COOL, STATE_AUTO, STATE_OFF] @property def name(self): """Return the name of the nest, if any.""" location = self.device.where name = self.device.name if location is None: return name else: if name == '': return location.capitalize() else: return location.capitalize() + '(' + name + ')' @property def unit_of_measurement(self): """Return the unit of measurement.""" return TEMP_CELSIUS @property def device_state_attributes(self): """Return the device specific state attributes.""" # Move these to Thermostat Device and make them global return { "humidity": self.device.humidity, "target_humidity": self.device.target_humidity, } @property def current_temperature(self): """Return the current temperature.""" return self.device.temperature @property def current_operation(self): """Return current operation ie. heat, cool, idle.""" if self.device.mode == 'cool': return STATE_COOL elif self.device.mode == 'heat': return STATE_HEAT elif self.device.mode == 'range': return STATE_AUTO elif self.device.mode == 'off': return STATE_OFF else: return STATE_UNKNOWN @property def target_temperature(self): """Return the temperature we try to reach.""" if self.device.mode != 'range' and not self.is_away_mode_on: return self.device.target else: return None @property def target_temperature_low(self): """Return the lower bound temperature we try to reach.""" if self.is_away_mode_on and self.device.away_temperature[0]: # away_temperature is always a low, high tuple return self.device.away_temperature[0] if self.device.mode == 'range': return self.device.target[0] else: return None @property def target_temperature_high(self): """Return the upper bound temperature we try to reach.""" if self.is_away_mode_on and self.device.away_temperature[1]: # away_temperature is always a low, high tuple return self.device.away_temperature[1] if self.device.mode == 'range': return self.device.target[1] else: return None @property def is_away_mode_on(self): """Return if away mode is on.""" return self.structure.away def set_temperature(self, **kwargs): """Set new target temperature.""" if kwargs.get(ATTR_TARGET_TEMP_LOW) is not None and \ kwargs.get(ATTR_TARGET_TEMP_HIGH) is not None: target_temp_high = convert_temperature(kwargs.get( ATTR_TARGET_TEMP_HIGH), self._unit, TEMP_CELSIUS) target_temp_low = convert_temperature(kwargs.get( ATTR_TARGET_TEMP_LOW), self._unit, TEMP_CELSIUS) if self.device.mode == 'range': temp = (target_temp_low, target_temp_high) else: temp = kwargs.get(ATTR_TEMPERATURE) _LOGGER.debug("Nest set_temperature-output-value=%s", temp) self.device.target = temp def set_operation_mode(self, operation_mode): """Set operation mode.""" if operation_mode == STATE_HEAT: self.device.mode = 'heat' elif operation_mode == STATE_COOL: self.device.mode = 'cool' elif operation_mode == STATE_AUTO: self.device.mode = 'range' elif operation_mode == STATE_OFF: self.device.mode = 'off' @property def operation_list(self): """List of available operation modes.""" return self._operation_list def turn_away_mode_on(self): """Turn away on.""" self.structure.away = True def turn_away_mode_off(self): """Turn away off.""" self.structure.away = False @property def current_fan_mode(self): """Return whether the fan is on.""" return STATE_ON if self.device.fan else STATE_AUTO @property def fan_list(self): """List of available fan modes.""" return self._fan_list def set_fan_mode(self, fan): """Turn fan on/off.""" self.device.fan = fan.lower() @property def min_temp(self): """Identify min_temp in Nest API or defaults if not available.""" temp = self.device.away_temperature.low if temp is None: return super().min_temp else: return temp @property def max_temp(self): """Identify max_temp in Nest API or defaults if not available.""" temp = self.device.away_temperature.high if temp is None: return super().max_temp else: return temp def update(self): """Python-nest has its own mechanism for staying up to date.""" pass
mit
3,391,957,943,504,084,000
31.851485
74
0.602471
false
4.137157
false
false
false
bazwilliams/openhomedevice
tests/DidlLiteTest.py
1
4675
import unittest from openhomedevice.didl_lite import generate_string, parse, parse_duration, parse_int class DidlLiteTests(unittest.TestCase): def test_int_parsing(self): self.assertEqual(parse_duration("42"), 42) self.assertEqual(parse_duration("42.5"), 42) self.assertIsNone(parse_int("forty")) self.assertIsNone(parse_int(None)) def test_duration_parsing(self): self.assertEqual(parse_duration("0:07:40.000"), 460) self.assertEqual(parse_duration("1:00.000"), 60) self.assertEqual(parse_duration("42.000"), 42) self.assertEqual(parse_duration("2:0.5"), 120) self.assertIsNone(parse_duration("forty")) self.assertIsNone(parse_duration(None)) def test_parse_empty_didlite(self): result = parse(None) self.assertEqual(result, {}) def test_parse_corrupt_didlite(self): result = parse( '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></itemX></DIDL-Lite>' ) self.assertEqual(result, {}) def test_parse_didlite_missing_item(self): result = parse( '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"></DIDL-Lite>' ) self.assertEqual(result, {}) def test_empty_track_details(self): track_details = {} result = generate_string(track_details) self.assertEqual( result, '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>', ) def test_track_details_title_is_none(self): track_details = {} track_details["title"] = None result = generate_string(track_details) self.assertEqual( result, '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>', ) def test_track_details_uri_is_none(self): track_details = {} track_details["uri"] = None result = generate_string(track_details) self.assertEqual( result, '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>', ) def test_track_details_albumArtwork_is_none(self): track_details = {} track_details["albumArtwork"] = None result = generate_string(track_details) self.assertEqual( result, '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title></dc:title><res protocolInfo="*:*:*:*"></res><upnp:albumArtURI></upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>', ) def test_track_details(self): track_details = {} track_details["albumArtwork"] = "ALBUMARTWORK" track_details["title"] = "TITLE" track_details["uri"] = "URI" result = generate_string(track_details) self.assertEqual( result, '<DIDL-Lite xmlns:dc="http://purl.org/dc/elements/1.1/" xmlns:upnp="urn:schemas-upnp-org:metadata-1-0/upnp/" xmlns="urn:schemas-upnp-org:metadata-1-0/DIDL-Lite/"><item id="" parentID="" restricted="True"><dc:title>TITLE</dc:title><res protocolInfo="*:*:*:*">URI</res><upnp:albumArtURI>ALBUMARTWORK</upnp:albumArtURI><upnp:class>object.item.audioItem</upnp:class></item></DIDL-Lite>', )
mit
-3,629,042,253,428,072,000
56.716049
395
0.643422
false
3.158784
true
false
false
Barrog/C4-Datapack
data/jscript/quests/8_AnAdventureBegins/__init__.py
1
2997
# Created by CubicVirtuoso # Any problems feel free to drop by #l2j-datapack on irc.freenode.net import sys from net.sf.l2j.gameserver.model.quest import State from net.sf.l2j.gameserver.model.quest import QuestState from net.sf.l2j.gameserver.model.quest.jython import QuestJython as JQuest #NPCs JASMINE = 7134 ROSELYN = 7355 HARNE = 7144 #ITEM ROSELYNS_NOTE = 7573 #REWARDS ADENA = 57 SCROLL_OF_ESCAPE_GIRAN = 7559 MARK_OF_TRAVELER = 7570 class Quest (JQuest) : def __init__(self,id,name,descr): JQuest.__init__(self,id,name,descr) def onEvent (self,event,st) : htmltext = event if event == "7134-03.htm" : st.set("cond","1") st.setState(STARTED) st.playSound("ItemSound.quest_accept") elif event == "7355-02.htm" : st.giveItems(ROSELYNS_NOTE,1) st.set("cond","2") st.set("id","2") st.playSound("ItemSound.quest_middle") elif event == "7144-02.htm" : st.takeItems(ROSELYNS_NOTE,-1) st.set("cond","3") st.set("id","3") st.playSound("ItemSound.quest_middle") elif event == "7134-06.htm" : st.giveItems(SCROLL_OF_ESCAPE_GIRAN,1) st.giveItems(MARK_OF_TRAVELER, 1) st.set("cond","0") st.setState(COMPLETED) st.playSound("ItemSound.quest_finish") return htmltext def onTalk (Self,npc,st): htmltext = "<html><head><body>I have nothing to say you</body></html>" npcId = npc.getNpcId() cond = st.getInt("cond") id = st.getState() if id == CREATED : st.set("cond","0") if st.getPlayer().getRace().ordinal() == 2 : if st.getPlayer().getLevel() >= 3 : htmltext = "7134-02.htm" else : htmltext = "<html><head><body>Quest for characters level 3 and above.</body></html>" st.exitQuest(1) else : htmltext = "7134-01.htm" st.exitQuest(1) elif npcId == JASMINE and id == COMPLETED : htmltext = "<html><head><body>I can't supply you with another Giran Scroll of Escape. Sorry traveller.</body></html>" elif npcId == JASMINE and cond == 1 : htmltext = "7134-04.htm" elif npcId == ROSELYN and cond : if st.getQuestItemsCount(ROSELYNS_NOTE) == 0 : htmltext = "7355-01.htm" else : htmltext = "7355-03.htm" elif npcId == HARNE and cond == 2 and st.getQuestItemsCount(ROSELYNS_NOTE) > 0 : htmltext = "7144-01.htm" elif npcId == JASMINE and cond == 3 : htmltext = "7134-05.htm" return htmltext QUEST = Quest(8,"8_AnAdventureBegins","An Adventure Begins") CREATED = State('Start', QUEST) STARTED = State('Started', QUEST) COMPLETED = State('Completed', QUEST) QUEST.setInitialState(CREATED) QUEST.addStartNpc(JASMINE) CREATED.addTalkId(JASMINE) COMPLETED.addTalkId(JASMINE) STARTED.addTalkId(JASMINE) STARTED.addTalkId(ROSELYN) STARTED.addTalkId(HARNE) STARTED.addQuestDrop(JASMINE,ROSELYNS_NOTE,1) print "importing quests: 8: An Adventure Begins"
gpl-2.0
-565,340,082,806,344,960
29.272727
123
0.634968
false
2.659272
false
false
false
USGSDenverPychron/pychron
pychron/processing/analyses/view/error_components_view.py
1
4244
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= enthought library imports ======================= from enable.component_editor import ComponentEditor from traits.api import HasTraits, List, Str, Float, Bool from traitsui.api import View, UItem, VGroup, VSplit from traitsui.editors import TableEditor from traitsui.table_column import ObjectColumn # ============= standard library imports ======================== # ============= local library imports ========================== from pychron.core.helpers.formatting import floatfmt from pychron.processing.analyses.view.magnitude_editor import MagnitudeColumn from pychron.pychron_constants import INTERFERENCE_KEYS # class ErrorComponentAdapter(TabularAdapter): # columns=[('Component', 'name'), ('Value', 'value')] # value_text = Property # # def _get_value_text(self): # return floatfmt(self.item.value, n=2) class ErrorComponent(HasTraits): name = Str value = Float class ErrorComponentsView(HasTraits): name = 'Error Components' error_components = List # pie_canvas = Instance(PieChartCanvas, ()) pie_enabled = Bool(False) def __init__(self, an, *args, **kw): super(ErrorComponentsView, self).__init__(*args, **kw) self._load(an) def _load(self, an): es = [] for k in an.isotope_keys: iso = an.isotopes[k] es.append(ErrorComponent(name=k, value=iso.age_error_component)) for k in an.isotope_keys: d = '{} D'.format(k) es.append(ErrorComponent(name=d, value=an.get_error_component(d))) for k in an.isotope_keys: d = '{} bk'.format(k) es.append(ErrorComponent(name=d, value=an.get_error_component(d))) for k in INTERFERENCE_KEYS + ('J',): v = an.get_error_component(k) es.append(ErrorComponent(name=k, value=v)) # for var, error in an.uage.error_components().items(): # print var.tag # print sum([e.value for e in es]) self.error_components = es # self.pie_canvas.load_scene(es) def traits_view(self): cols = [ObjectColumn(name='name', label='Component'), MagnitudeColumn(name='value', label='', width=200), ObjectColumn(name='value', label='Value', format_func=lambda x: floatfmt(x, n=2))] editor = TableEditor(columns=cols, sortable=False, editable=False) v = View(VGroup( # Item('pie_enabled', label='Show Pie Chart', # visible_when='pie_enabled'), # HGroup(Item('pie_enabled', label='Show Pie Chart')), VGroup( UItem('error_components', editor=editor), visible_when='not pie_enabled'), VSplit( UItem('error_components', editor=editor), UItem('pie_canvas', editor=ComponentEditor()), visible_when='pie_enabled'))) return v # def traits_view(self): # v = View(UItem('error_components', # editor=TabularEditor(adapter=ErrorComponentAdapter(), # editable=False))) # return v # ============= EOF =============================================
apache-2.0
4,124,247,835,074,852,400
37.234234
82
0.536522
false
4.313008
false
false
false
windflyer/apport
test/test_backend_apt_dpkg.py
1
41266
import unittest, gzip, imp, subprocess, tempfile, shutil, os, os.path, time import glob, urllib from apt import apt_pkg if os.environ.get('APPORT_TEST_LOCAL'): impl = imp.load_source('', 'backends/packaging-apt-dpkg.py').impl else: from apport.packaging_impl import impl def _has_internet(): '''Return if there is sufficient network connection for the tests. This checks if http://ddebs.ubuntu.com/ can be downloaded from, to check if we can run the online tests. ''' if os.environ.get('SKIP_ONLINE_TESTS'): return False if _has_internet.cache is None: _has_internet.cache = False try: f = urllib.request.urlopen('http://ddebs.ubuntu.com/dbgsym-release-key.asc', timeout=30) if f.readline().startswith(b'-----BEGIN PGP'): _has_internet.cache = True except (IOError, urllib.error.URLError): pass return _has_internet.cache _has_internet.cache = None class T(unittest.TestCase): def setUp(self): # save and restore configuration file self.orig_conf = impl.configuration self.workdir = tempfile.mkdtemp() try: impl.get_available_version('coreutils-dbgsym') self.has_dbgsym = True except ValueError: self.has_dbgsym = False def tearDown(self): impl.configuration = self.orig_conf shutil.rmtree(self.workdir) def test_check_files_md5(self): '''_check_files_md5().''' td = tempfile.mkdtemp() try: f1 = os.path.join(td, 'test 1.txt') f2 = os.path.join(td, 'test:2.txt') sumfile = os.path.join(td, 'sums.txt') with open(f1, 'w') as fd: fd.write('Some stuff') with open(f2, 'w') as fd: fd.write('More stuff') # use one relative and one absolute path in checksums file with open(sumfile, 'wb') as fd: fd.write(b'2e41290da2fa3f68bd3313174467e3b5 ' + f1[1:].encode() + b'\n') fd.write(b'f6423dfbc4faf022e58b4d3f5ff71a70 ' + f2.encode() + b'\n') fd.write(b'deadbeef000001111110000011110000 /bin/\xc3\xa4') self.assertEqual(impl._check_files_md5(sumfile), [], 'correct md5sums') with open(f1, 'w') as fd: fd.write('Some stuff!') self.assertEqual(impl._check_files_md5(sumfile), [f1[1:]], 'file 1 wrong') with open(f2, 'w') as fd: fd.write('More stuff!') self.assertEqual(impl._check_files_md5(sumfile), [f1[1:], f2], 'files 1 and 2 wrong') with open(f1, 'w') as fd: fd.write('Some stuff') self.assertEqual(impl._check_files_md5(sumfile), [f2], 'file 2 wrong') # check using a direct md5 list as argument with open(sumfile, 'rb') as fd: self.assertEqual(impl._check_files_md5(fd.read()), [f2], 'file 2 wrong') finally: shutil.rmtree(td) def test_get_version(self): '''get_version().''' self.assertTrue(impl.get_version('libc6').startswith('2')) self.assertRaises(ValueError, impl.get_version, 'nonexisting') self.assertRaises(ValueError, impl.get_version, 'wukrainian') def test_get_available_version(self): '''get_available_version().''' self.assertTrue(impl.get_available_version('libc6').startswith('2')) self.assertRaises(ValueError, impl.get_available_version, 'nonexisting') def test_get_dependencies(self): '''get_dependencies().''' # package with both Depends: and Pre-Depends: d = impl.get_dependencies('bash') self.assertTrue(len(d) > 2) self.assertTrue('libc6' in d) for dep in d: self.assertTrue(impl.get_version(dep)) # Pre-Depends: only d = impl.get_dependencies('coreutils') self.assertTrue(len(d) >= 1) self.assertTrue('libc6' in d) for dep in d: self.assertTrue(impl.get_version(dep)) # Depends: only d = impl.get_dependencies('libc6') self.assertTrue(len(d) >= 1) for dep in d: self.assertTrue(impl.get_version(dep)) def test_get_source(self): '''get_source().''' self.assertRaises(ValueError, impl.get_source, 'nonexisting') self.assertEqual(impl.get_source('bash'), 'bash') self.assertTrue('glibc' in impl.get_source('libc6')) def test_get_package_origin(self): '''get_package_origin().''' # determine distro name distro = impl.get_os_version()[0] self.assertRaises(ValueError, impl.get_package_origin, 'nonexisting') # this assumes that this package is not installed self.assertRaises(ValueError, impl.get_package_origin, 'robocode-doc') # this assumes that bash is native self.assertEqual(impl.get_package_origin('bash'), distro) # no non-native test here, hard to come up with a generic one def test_is_distro_package(self): '''is_distro_package().''' self.assertRaises(ValueError, impl.is_distro_package, 'nonexisting') self.assertTrue(impl.is_distro_package('bash')) # no False test here, hard to come up with a generic one def test_get_architecture(self): '''get_architecture().''' self.assertRaises(ValueError, impl.get_architecture, 'nonexisting') # just assume that bash uses the native architecture d = subprocess.Popen(['dpkg', '--print-architecture'], stdout=subprocess.PIPE) system_arch = d.communicate()[0].decode().strip() assert d.returncode == 0 self.assertEqual(impl.get_architecture('bash'), system_arch) def test_get_files(self): '''get_files().''' self.assertRaises(ValueError, impl.get_files, 'nonexisting') self.assertTrue('/bin/bash' in impl.get_files('bash')) def test_get_file_package(self): '''get_file_package() on installed files.''' self.assertEqual(impl.get_file_package('/bin/bash'), 'bash') self.assertEqual(impl.get_file_package('/bin/cat'), 'coreutils') self.assertEqual(impl.get_file_package('/etc/pam.conf'), 'libpam-runtime') self.assertEqual(impl.get_file_package('/nonexisting'), None) def test_get_file_package_uninstalled(self): '''get_file_package() on uninstalled packages.''' # generate a test Contents.gz basedir = tempfile.mkdtemp() try: # test Contents.gz for release pocket mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename()) os.makedirs(mapdir) with gzip.open(os.path.join(mapdir, 'Contents-%s.gz' % impl.get_system_architecture()), 'w') as f: f.write(b''' foo header FILE LOCATION usr/bin/frobnicate foo/frob usr/bin/frob foo/frob-utils bo/gu/s na/mypackage bin/true admin/superutils ''') # test Contents.gz for -updates pocket mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename() + '-updates') os.makedirs(mapdir) with gzip.open(os.path.join(mapdir, 'Contents-%s.gz' % impl.get_system_architecture()), 'w') as f: f.write(b''' foo header FILE LOCATION lib/libnew.so.5 universe/libs/libnew5 ''') # use this as a mirror impl.set_mirror('file://' + basedir) self.assertEqual(impl.get_file_package('usr/bin/frob', False), None) # must not match frob (same file name prefix) self.assertEqual(impl.get_file_package('usr/bin/frob', True), 'frob-utils') self.assertEqual(impl.get_file_package('/usr/bin/frob', True), 'frob-utils') # find files from -updates pocket self.assertEqual(impl.get_file_package('/lib/libnew.so.5', False), None) self.assertEqual(impl.get_file_package('/lib/libnew.so.5', True), 'libnew5') # invalid mirror impl.set_mirror('file:///foo/nonexisting') self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob', True) # valid mirror, test cache directory impl.set_mirror('file://' + basedir) cache_dir = os.path.join(basedir, 'cache') os.mkdir(cache_dir) self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir), 'frob-utils') cache_dir_files = os.listdir(cache_dir) self.assertEqual(len(cache_dir_files), 2) self.assertEqual(impl.get_file_package('/bo/gu/s', True, cache_dir), 'mypackage') # valid cache, should not need to access the mirror impl.set_mirror('file:///foo/nonexisting') self.assertEqual(impl.get_file_package('/bin/true', True, cache_dir), 'superutils') self.assertEqual(impl.get_file_package('/bo/gu/s', True, cache_dir), 'mypackage') self.assertEqual(impl.get_file_package('/lib/libnew.so.5', True, cache_dir), 'libnew5') # outdated cache, must refresh the cache and hit the invalid # mirror if 'updates' in cache_dir_files[0]: cache_file = cache_dir_files[1] else: cache_file = cache_dir_files[0] now = int(time.time()) os.utime(os.path.join(cache_dir, cache_file), (now, now - 90000)) self.assertRaises(IOError, impl.get_file_package, '/bo/gu/s', True, cache_dir) finally: shutil.rmtree(basedir) def test_get_file_package_uninstalled_multiarch(self): '''get_file_package() on foreign arches and releases''' # map "Foonux 3.14" to "mocky" orig_distro_release_to_codename = impl._distro_release_to_codename impl._distro_release_to_codename = lambda r: (r == 'Foonux 3.14') and 'mocky' or None # generate test Contents.gz for two fantasy architectures basedir = tempfile.mkdtemp() try: mapdir = os.path.join(basedir, 'dists', impl.get_distro_codename()) os.makedirs(mapdir) with gzip.open(os.path.join(mapdir, 'Contents-even.gz'), 'w') as f: f.write(b''' foo header FILE LOCATION usr/lib/even/libfrob.so.1 foo/libfrob1 usr/bin/frob foo/frob-utils ''') with gzip.open(os.path.join(mapdir, 'Contents-odd.gz'), 'w') as f: f.write(b''' foo header FILE LOCATION usr/lib/odd/libfrob.so.1 foo/libfrob1 usr/bin/frob foo/frob-utils ''') # and another one for fantasy release os.mkdir(os.path.join(basedir, 'dists', 'mocky')) with gzip.open(os.path.join(basedir, 'dists', 'mocky', 'Contents-even.gz'), 'w') as f: f.write(b''' foo header FILE LOCATION usr/lib/even/libfrob.so.0 foo/libfrob0 usr/bin/frob foo/frob ''') # use this as a mirror impl.set_mirror('file://' + basedir) # must not match system architecture self.assertEqual(impl.get_file_package('usr/bin/frob', False), None) # must match correct architecture self.assertEqual(impl.get_file_package('usr/bin/frob', True, arch='even'), 'frob-utils') self.assertEqual(impl.get_file_package('usr/bin/frob', True, arch='odd'), 'frob-utils') self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, arch='even'), 'libfrob1') self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, arch='odd'), None) self.assertEqual(impl.get_file_package('/usr/lib/odd/libfrob.so.1', True, arch='odd'), 'libfrob1') # for mocky release ("Foonux 3.14") self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, release='Foonux 3.14', arch='even'), None) self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.0', True, release='Foonux 3.14', arch='even'), 'libfrob0') self.assertEqual(impl.get_file_package('/usr/bin/frob', True, release='Foonux 3.14', arch='even'), 'frob') # invalid mirror impl.set_mirror('file:///foo/nonexisting') self.assertRaises(IOError, impl.get_file_package, '/usr/lib/even/libfrob.so.1', True, arch='even') self.assertRaises(IOError, impl.get_file_package, '/usr/lib/even/libfrob.so.0', True, release='Foonux 3.14', arch='even') # valid mirror, test caching impl.set_mirror('file://' + basedir) cache_dir = os.path.join(basedir, 'cache') os.mkdir(cache_dir) self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.1', True, cache_dir, arch='even'), 'libfrob1') self.assertEqual(len(os.listdir(cache_dir)), 1) cache_file = os.listdir(cache_dir)[0] self.assertEqual(impl.get_file_package('/usr/lib/even/libfrob.so.0', True, cache_dir, release='Foonux 3.14', arch='even'), 'libfrob0') self.assertEqual(len(os.listdir(cache_dir)), 2) # valid cache, should not need to access the mirror impl.set_mirror('file:///foo/nonexisting') self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir, arch='even'), 'frob-utils') self.assertEqual(impl.get_file_package('usr/bin/frob', True, cache_dir, release='Foonux 3.14', arch='even'), 'frob') # but no cached file for the other arch self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob', True, cache_dir, arch='odd') # outdated cache, must refresh the cache and hit the invalid # mirror now = int(time.time()) os.utime(os.path.join(cache_dir, cache_file), (now, now - 90000)) self.assertRaises(IOError, impl.get_file_package, 'usr/bin/frob', True, cache_dir, arch='even') finally: shutil.rmtree(basedir) impl._distro_release_to_codename = orig_distro_release_to_codename def test_get_file_package_diversion(self): '''get_file_package() for a diverted file.''' # pick first diversion we have p = subprocess.Popen('LC_ALL=C dpkg-divert --list | head -n 1', shell=True, stdout=subprocess.PIPE) out = p.communicate()[0].decode('UTF-8') assert p.returncode == 0 assert out fields = out.split() file = fields[2] pkg = fields[-1] self.assertEqual(impl.get_file_package(file), pkg) def test_mirror_from_apt_sources(self): s = os.path.join(self.workdir, 'sources.list') # valid file, should grab the first mirror with open(s, 'w') as f: f.write('''# some comment deb-src http://source.mirror/foo tuxy main deb http://binary.mirror/tuxy tuxy main deb http://secondary.mirror tuxy extra ''') f.flush() self.assertEqual(impl._get_primary_mirror_from_apt_sources(s), 'http://binary.mirror/tuxy') # valid file with options with open(s, 'w') as f: f.write('''# some comment deb-src http://source.mirror/foo tuxy main deb [arch=flowerpc,leghf] http://binary.mirror/tuxy tuxy main deb http://secondary.mirror tuxy extra ''') f.flush() self.assertEqual(impl._get_primary_mirror_from_apt_sources(s), 'http://binary.mirror/tuxy') # empty file with open(s, 'w') as f: f.flush() self.assertRaises(SystemError, impl._get_primary_mirror_from_apt_sources, s) def test_get_modified_conffiles(self): '''get_modified_conffiles()''' # very shallow self.assertEqual(type(impl.get_modified_conffiles('bash')), type({})) self.assertEqual(type(impl.get_modified_conffiles('apport')), type({})) self.assertEqual(type(impl.get_modified_conffiles('nonexisting')), type({})) def test_get_system_architecture(self): '''get_system_architecture().''' arch = impl.get_system_architecture() # must be nonempty without line breaks self.assertNotEqual(arch, '') self.assertTrue('\n' not in arch) def test_get_library_paths(self): '''get_library_paths().''' paths = impl.get_library_paths() # must be nonempty without line breaks self.assertNotEqual(paths, '') self.assertTrue(':' in paths) self.assertTrue('/lib' in paths) self.assertTrue('\n' not in paths) def test_compare_versions(self): '''compare_versions.''' self.assertEqual(impl.compare_versions('1', '2'), -1) self.assertEqual(impl.compare_versions('1.0-1ubuntu1', '1.0-1ubuntu2'), -1) self.assertEqual(impl.compare_versions('1.0-1ubuntu1', '1.0-1ubuntu1'), 0) self.assertEqual(impl.compare_versions('1.0-1ubuntu2', '1.0-1ubuntu1'), 1) self.assertEqual(impl.compare_versions('1:1.0-1', '2007-2'), 1) self.assertEqual(impl.compare_versions('1:1.0-1~1', '1:1.0-1'), -1) def test_enabled(self): '''enabled.''' impl.configuration = '/nonexisting' self.assertEqual(impl.enabled(), True) f = tempfile.NamedTemporaryFile() impl.configuration = f.name f.write('# configuration file\nenabled = 1'.encode()) f.flush() self.assertEqual(impl.enabled(), True) f.close() f = tempfile.NamedTemporaryFile() impl.configuration = f.name f.write('# configuration file\n enabled =0 '.encode()) f.flush() self.assertEqual(impl.enabled(), False) f.close() f = tempfile.NamedTemporaryFile() impl.configuration = f.name f.write('# configuration file\nnothing here'.encode()) f.flush() self.assertEqual(impl.enabled(), True) f.close() def test_get_kernel_package(self): '''get_kernel_package().''' self.assertTrue('linux' in impl.get_kernel_package()) def test_package_name_glob(self): '''package_name_glob().''' self.assertTrue(len(impl.package_name_glob('a*')) > 5) self.assertTrue('bash' in impl.package_name_glob('ba*h')) self.assertEqual(impl.package_name_glob('bash'), ['bash']) self.assertEqual(impl.package_name_glob('xzywef*'), []) @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_versioned(self): '''install_packages() with versions and with cache''' self._setup_foonux_config(updates=True) obsolete = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', '8.21-1ubuntu5'), # should not come from updates ('libc6', '2.19-0ubuntu6'), ('tzdata', None), # should come from -updates, > 2014b-1 ], False, self.cachedir) def sandbox_ver(pkg): with gzip.open(os.path.join(self.rootdir, 'usr/share/doc', pkg, 'changelog.Debian.gz')) as f: return f.readline().decode().split()[1][1:-1] self.assertEqual(obsolete, '') # packages get installed self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'), impl.get_system_architecture()) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/lib/debug/usr/bin/stat'))) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/share/zoneinfo/zone.tab'))) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/share/doc/libc6/copyright'))) # their versions are as expected self.assertEqual(sandbox_ver('coreutils'), '8.21-1ubuntu5') self.assertEqual(sandbox_ver('libc6'), '2.19-0ubuntu6') self.assertEqual(sandbox_ver('libc6-dbg'), '2.19-0ubuntu6') self.assertGreater(sandbox_ver('tzdata'), '2015') with open(os.path.join(self.rootdir, 'packages.txt')) as f: pkglist = f.read().splitlines() self.assertIn('coreutils 8.21-1ubuntu5', pkglist) self.assertIn('coreutils-dbgsym 8.21-1ubuntu5', pkglist) self.assertIn('libc6 2.19-0ubuntu6', pkglist) self.assertIn('libc6-dbg 2.19-0ubuntu6', pkglist) self.assertIn('tzdata ' + sandbox_ver('tzdata'), pkglist) self.assertEqual(len(pkglist), 5, str(pkglist)) # does not clobber config dir self.assertEqual(os.listdir(self.configdir), ['Foonux 1.2']) self.assertEqual(sorted(os.listdir(os.path.join(self.configdir, 'Foonux 1.2'))), ['armhf', 'codename', 'sources.list']) self.assertEqual(os.listdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf')), ['sources.list']) # caches packages, and their versions are as expected cache = os.listdir(os.path.join(self.cachedir, 'Foonux 1.2', 'apt', 'var', 'cache', 'apt', 'archives')) cache_versions = {} for p in cache: try: (name, ver) = p.split('_')[:2] cache_versions[name] = ver except ValueError: pass # not a .deb, ignore self.assertEqual(cache_versions['coreutils'], '8.21-1ubuntu5') self.assertEqual(cache_versions['coreutils-dbgsym'], '8.21-1ubuntu5') self.assertIn('tzdata', cache_versions) self.assertEqual(cache_versions['libc6'], '2.19-0ubuntu6') self.assertEqual(cache_versions['libc6-dbg'], '2.19-0ubuntu6') # installs cached packages os.unlink(os.path.join(self.rootdir, 'usr/bin/stat')) os.unlink(os.path.join(self.rootdir, 'packages.txt')) obsolete = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', '8.21-1ubuntu5'), ], False, self.cachedir) self.assertEqual(obsolete, '') self.assertTrue(os.path.exists( os.path.join(self.rootdir, 'usr/bin/stat'))) # complains about obsolete packages result = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('gnome-common', '1.1')]) self.assertEqual(len(result.splitlines()), 1) self.assertTrue('gnome-common' in result) self.assertTrue('1.1' in result) # ... but installs the current version anyway self.assertTrue(os.path.exists( os.path.join(self.rootdir, 'usr/bin/gnome-autogen.sh'))) self.assertGreaterEqual(sandbox_ver('gnome-common'), '3.1.0-0ubuntu1') # does not crash on nonexisting packages result = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('buggerbogger', None)]) self.assertEqual(len(result.splitlines()), 1) self.assertTrue('buggerbogger' in result) self.assertTrue('not exist' in result) # can interleave with other operations dpkg = subprocess.Popen(['dpkg-query', '-Wf${Version}', 'dash'], stdout=subprocess.PIPE) dash_version = dpkg.communicate()[0].decode() self.assertEqual(dpkg.returncode, 0) self.assertEqual(impl.get_version('dash'), dash_version) self.assertRaises(ValueError, impl.get_available_version, 'buggerbogger') # still installs packages after above operations os.unlink(os.path.join(self.rootdir, 'usr/bin/stat')) os.unlink(os.path.join(self.rootdir, 'packages.txt')) impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', '8.21-1ubuntu5'), ('dpkg', None), ], False, self.cachedir) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/dpkg'))) @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_unversioned(self): '''install_packages() without versions and no cache''' self._setup_foonux_config() obsolete = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', None), ('tzdata', None), ], False, None) self.assertEqual(obsolete, '') self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'), impl.get_system_architecture()) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/lib/debug/usr/bin/stat'))) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/share/zoneinfo/zone.tab'))) # does not clobber config dir self.assertEqual(os.listdir(self.configdir), ['Foonux 1.2']) self.assertEqual(sorted(os.listdir(os.path.join(self.configdir, 'Foonux 1.2'))), ['armhf', 'codename', 'sources.list']) self.assertEqual(os.listdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf')), ['sources.list']) # no cache self.assertEqual(os.listdir(self.cachedir), []) # keeps track of package versions with open(os.path.join(self.rootdir, 'packages.txt')) as f: pkglist = f.read().splitlines() self.assertIn('coreutils 8.21-1ubuntu5', pkglist) self.assertIn('coreutils-dbgsym 8.21-1ubuntu5', pkglist) self.assertIn('tzdata 2014b-1', pkglist) self.assertEqual(len(pkglist), 3, str(pkglist)) @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_system(self): '''install_packages() with system configuration''' # trigger an unrelated package query here to get the cache set up, # reproducing an install failure when the internal caches are not # reset properly impl.get_version('dash') self._setup_foonux_config() result = impl.install_packages(self.rootdir, None, None, [('coreutils', impl.get_version('coreutils')), ('tzdata', '1.1'), ], False, self.cachedir) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/share/zoneinfo/zone.tab'))) # complains about obsolete packages self.assertGreaterEqual(len(result.splitlines()), 1) self.assertTrue('tzdata' in result) self.assertTrue('1.1' in result) # caches packages cache = os.listdir(os.path.join(self.cachedir, 'system', 'apt', 'var', 'cache', 'apt', 'archives')) cache_names = [p.split('_')[0] for p in cache] self.assertTrue('coreutils' in cache_names) self.assertEqual('coreutils-dbgsym' in cache_names, self.has_dbgsym) self.assertTrue('tzdata' in cache_names) # works with relative paths and existing cache os.unlink(os.path.join(self.rootdir, 'usr/bin/stat')) os.unlink(os.path.join(self.rootdir, 'packages.txt')) orig_cwd = os.getcwd() try: os.chdir(self.workdir) impl.install_packages('root', None, None, [('coreutils', None)], False, 'cache') finally: os.chdir(orig_cwd) self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_error(self): '''install_packages() with errors''' # sources.list with invalid format self._setup_foonux_config() with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f: f.write('bogus format') try: impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('tzdata', None)], False, self.cachedir) self.fail('install_packages() unexpectedly succeeded with broken sources.list') except SystemError as e: self.assertTrue('bogus' in str(e)) self.assertFalse('Exception' in str(e)) # sources.list with wrong server with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f: f.write('deb http://archive.ubuntu.com/nosuchdistro/ trusty main\n') try: impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('tzdata', None)], False, self.cachedir) self.fail('install_packages() unexpectedly succeeded with broken server URL') except SystemError as e: self.assertTrue('nosuchdistro' in str(e), str(e)) self.assertTrue('index files failed to download' in str(e)) @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_permanent_sandbox(self): '''install_packages() with a permanent sandbox''' self._setup_foonux_config() zonetab = os.path.join(self.rootdir, 'usr/share/zoneinfo/zone.tab') impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('tzdata', None)], False, self.cachedir, permanent_rootdir=True) # This will now be using a Cache with our rootdir. archives = apt_pkg.config.find_dir('Dir::Cache::archives') tzdata = glob.glob(os.path.join(archives, 'tzdata*.deb')) if not tzdata: self.fail('tzdata was not downloaded') tzdata_written = os.path.getctime(tzdata[0]) zonetab_written = os.path.getctime(zonetab) impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', None), ('tzdata', None)], False, self.cachedir, permanent_rootdir=True) if not glob.glob(os.path.join(archives, 'coreutils*.deb')): self.fail('coreutils was not downloaded.') self.assertEqual(os.path.getctime(tzdata[0]), tzdata_written, 'tzdata downloaded twice.') self.assertEqual(zonetab_written, os.path.getctime(zonetab), 'zonetab written twice.') self.assertTrue(os.path.exists( os.path.join(self.rootdir, 'usr/bin/stat'))) # Prevent packages from downloading. apt_pkg.config.set('Acquire::http::Proxy', 'http://nonexistent') orig_env = os.environ.copy() os.environ['http_proxy'] = 'http://nonexistent' try: del os.environ['no_proxy'] except KeyError: pass self.assertRaises(SystemExit, impl.install_packages, self.rootdir, self.configdir, 'Foonux 1.2', [('libc6', None)], False, self.cachedir, permanent_rootdir=True) os.environ = orig_env # These packages exist, so attempting to install them should not fail. impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', None), ('tzdata', None)], False, self.cachedir, permanent_rootdir=True) # even without cached debs, trying to install the same versions should # be a no-op and succeed for f in glob.glob('%s/Foonux 1.2/apt/var/cache/apt/archives/coreutils*' % self.cachedir): os.unlink(f) impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', None)], False, self.cachedir, permanent_rootdir=True) # trying to install another package should fail, though self.assertRaises(SystemExit, impl.install_packages, self.rootdir, self.configdir, 'Foonux 1.2', [('aspell-doc', None)], False, self.cachedir, permanent_rootdir=True) apt_pkg.config.set('Acquire::http::Proxy', '') @unittest.skipUnless(_has_internet(), 'online test') def test_install_packages_permanent_sandbox_repack(self): self._setup_foonux_config() include_path = os.path.join(self.rootdir, 'usr/include/krb5.h') impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('libkrb5-dev', None)], False, self.cachedir, permanent_rootdir=True) self.assertIn('mit-krb5/', os.readlink(include_path)) impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('heimdal-dev', None)], False, self.cachedir, permanent_rootdir=True) self.assertIn('heimdal/', os.readlink(include_path)) impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('libkrb5-dev', None)], False, self.cachedir, permanent_rootdir=True) self.assertIn('mit-krb5/', os.readlink(include_path)) @unittest.skipUnless(_has_internet(), 'online test') @unittest.skipIf(impl.get_system_architecture() == 'armhf', 'native armhf architecture') def test_install_packages_armhf(self): '''install_packages() for foreign architecture armhf''' self._setup_foonux_config() obsolete = impl.install_packages(self.rootdir, self.configdir, 'Foonux 1.2', [('coreutils', '8.21-1ubuntu5'), ('libc6', '2.19-0ubuntu5'), ], False, self.cachedir, architecture='armhf') self.assertEqual(obsolete, 'libc6 version 2.19-0ubuntu5 required, but 2.19-0ubuntu6 is available\n') self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/bin/stat'))) self.assert_elf_arch(os.path.join(self.rootdir, 'usr/bin/stat'), 'armhf') self.assertTrue(os.path.exists(os.path.join(self.rootdir, 'usr/share/doc/libc6/copyright'))) # caches packages cache = os.listdir(os.path.join(self.cachedir, 'Foonux 1.2', 'apt', 'var', 'cache', 'apt', 'archives')) self.assertTrue('coreutils_8.21-1ubuntu5_armhf.deb' in cache, cache) self.assertTrue('libc6_2.19-0ubuntu6_armhf.deb' in cache, cache) @unittest.skipUnless(_has_internet(), 'online test') def test_get_source_tree_sandbox(self): self._setup_foonux_config() out_dir = os.path.join(self.workdir, 'out') os.mkdir(out_dir) impl._build_apt_sandbox(self.rootdir, os.path.join(self.configdir, 'Foonux 1.2', 'sources.list')) res = impl.get_source_tree('base-files', out_dir, sandbox=self.rootdir, apt_update=True) self.assertTrue(os.path.isdir(os.path.join(res, 'debian'))) # this needs to be updated when the release in _setup_foonux_config # changes self.assertTrue(res.endswith('/base-files-7.2ubuntu5'), 'unexpected version: ' + res.split('/')[-1]) def _setup_foonux_config(self, updates=False): '''Set up directories and configuration for install_packages()''' self.cachedir = os.path.join(self.workdir, 'cache') self.rootdir = os.path.join(self.workdir, 'root') self.configdir = os.path.join(self.workdir, 'config') os.mkdir(self.cachedir) os.mkdir(self.rootdir) os.mkdir(self.configdir) os.mkdir(os.path.join(self.configdir, 'Foonux 1.2')) with open(os.path.join(self.configdir, 'Foonux 1.2', 'sources.list'), 'w') as f: f.write('deb http://archive.ubuntu.com/ubuntu/ trusty main\n') f.write('deb-src http://archive.ubuntu.com/ubuntu/ trusty main\n') f.write('deb http://ddebs.ubuntu.com/ trusty main\n') if updates: f.write('deb http://archive.ubuntu.com/ubuntu/ trusty-updates main\n') f.write('deb-src http://archive.ubuntu.com/ubuntu/ trusty-updates main\n') f.write('deb http://ddebs.ubuntu.com/ trusty-updates main\n') os.mkdir(os.path.join(self.configdir, 'Foonux 1.2', 'armhf')) with open(os.path.join(self.configdir, 'Foonux 1.2', 'armhf', 'sources.list'), 'w') as f: f.write('deb http://ports.ubuntu.com/ trusty main\n') f.write('deb-src http://ports.ubuntu.com/ trusty main\n') f.write('deb http://ddebs.ubuntu.com/ trusty main\n') if updates: f.write('deb http://ports.ubuntu.com/ trusty-updates main\n') f.write('deb-src http://ports.ubuntu.com/ trusty-updates main\n') f.write('deb http://ddebs.ubuntu.com/ trusty-updates main\n') with open(os.path.join(self.configdir, 'Foonux 1.2', 'codename'), 'w') as f: f.write('trusty') def assert_elf_arch(self, path, expected): '''Assert that an ELF file is for an expected machine type. Expected is a Debian-style architecture (i386, amd64, armhf) ''' archmap = { 'i386': '80386', 'amd64': 'X86-64', 'armhf': 'ARM', } # get ELF machine type readelf = subprocess.Popen(['readelf', '-e', path], env={}, stdout=subprocess.PIPE, universal_newlines=True) out = readelf.communicate()[0] assert readelf.returncode == 0 for line in out.splitlines(): if line.startswith(' Machine:'): machine = line.split(maxsplit=1)[1] break else: self.fail('could not fine Machine: in readelf output') self.assertTrue(archmap[expected] in machine, '%s has unexpected machine type "%s" for architecture %s' % ( path, machine, expected)) # only execute if dpkg is available try: if subprocess.call(['dpkg', '--help'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) == 0: unittest.main() except OSError: pass
gpl-2.0
9,061,224,673,467,465,000
45.004459
108
0.551471
false
3.86603
true
false
false
sony/nnabla
build-tools/code_generator/update_function_types.py
1
1554
# Copyright 2018,2019,2020,2021 Sony Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from os.path import exists import code_generator_utils as utils from collections import OrderedDict def get_args(): import argparse p = argparse.ArgumentParser() p.add_argument('path_types', type=str) p.add_argument('--default-type', type=str, default=None) args = p.parse_args() return args def main(): args = get_args() func_info = utils.load_function_info(flatten=True) if exists(args.path_types): func_types = utils.load_yaml_ordered(open(args.path_types, 'r')) else: func_types = OrderedDict() for name, func in func_info.items(): if name in func_types: continue print("Processing %s..." % name) types = OrderedDict() if args.default_type is not None: types[args.default_type] = [args.default_type] func_types[name] = types utils.dump_yaml(func_types, open(args.path_types, 'w')) if __name__ == '__main__': main()
apache-2.0
1,218,484,060,768,999,700
31.375
74
0.676319
false
3.771845
false
false
false
david2307/backend_159
activities/migrations/0001_initial.py
1
1103
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('persons', '0007_auto_20150711_2332'), ] operations = [ migrations.CreateModel( name='Activity', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('name', models.CharField(max_length=100)), ('description', models.CharField(max_length=200)), ('creation_date', models.DateTimeField(auto_now_add=True)), ('begin_date', models.DateTimeField()), ('end_date', models.DateTimeField()), ('latitude', models.DecimalField(null=True, max_digits=23, decimal_places=20)), ('longitude', models.DecimalField(null=True, max_digits=23, decimal_places=20)), ('minimum_assitant', models.IntegerField()), ('town', models.ForeignKey(to='persons.Town')), ], ), ]
gpl-3.0
-1,431,205,959,632,305,700
37.034483
114
0.56573
false
4.359684
false
false
false
xiang12835/python_web
py2_web2py/web2py/gluon/restricted.py
1
10754
#!/usr/bin/env python # -*- coding: utf-8 -*- """ | This file is part of the web2py Web Framework | Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> | License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) Restricted environment to execute application's code ----------------------------------------------------- """ import sys from gluon._compat import pickle, ClassType, unicodeT, to_bytes import traceback import types import os import logging from gluon.storage import Storage from gluon.http import HTTP from gluon.html import BEAUTIFY, XML from gluon.settings import global_settings logger = logging.getLogger("web2py") __all__ = ['RestrictedError', 'restricted', 'TicketStorage', 'compile2'] class TicketStorage(Storage): """ Defines the ticket object and the default values of its members (None) """ def __init__( self, db=None, tablename='web2py_ticket' ): Storage.__init__(self) self.db = db self.tablename = tablename def store(self, request, ticket_id, ticket_data): """ Stores the ticket. It will figure out if this must be on disk or in db """ if self.db: self._store_in_db(request, ticket_id, ticket_data) else: self._store_on_disk(request, ticket_id, ticket_data) def _store_in_db(self, request, ticket_id, ticket_data): self.db._adapter.reconnect() try: table = self._get_table(self.db, self.tablename, request.application) table.insert(ticket_id=ticket_id, ticket_data=pickle.dumps(ticket_data, pickle.HIGHEST_PROTOCOL), created_datetime=request.now) self.db.commit() message = 'In FILE: %(layer)s\n\n%(traceback)s\n' except Exception: self.db.rollback() message =' Unable to store in FILE: %(layer)s\n\n%(traceback)s\n' self.db.close() logger.error(message % ticket_data) def _store_on_disk(self, request, ticket_id, ticket_data): ef = self._error_file(request, ticket_id, 'wb') try: pickle.dump(ticket_data, ef) finally: ef.close() def _error_file(self, request, ticket_id, mode, app=None): root = request.folder if app: root = os.path.join(os.path.join(root, '..'), app) errors_folder = os.path.abspath( os.path.join(root, 'errors')) # .replace('\\', '/') return open(os.path.join(errors_folder, ticket_id), mode) def _get_table(self, db, tablename, app): tablename = tablename + '_' + app table = db.get(tablename) if not table: table = db.define_table( tablename, db.Field('ticket_id', length=100), db.Field('ticket_data', 'text'), db.Field('created_datetime', 'datetime')) return table def load( self, request, app, ticket_id, ): if not self.db: try: ef = self._error_file(request, ticket_id, 'rb', app) except IOError: return {} try: return pickle.load(ef) finally: ef.close() else: table = self._get_table(self.db, self.tablename, app) rows = self.db(table.ticket_id == ticket_id).select() return pickle.loads(rows[0].ticket_data) if rows else {} class RestrictedError(Exception): """ Class used to wrap an exception that occurs in the restricted environment below. The traceback is used to log the exception and generate a ticket. """ def __init__( self, layer='', code='', output='', environment=None, ): """ Layer here is some description of where in the system the exception occurred. """ if environment is None: environment = {} self.layer = layer self.code = code self.output = output self.environment = environment if layer: try: try: self.traceback = traceback.format_exc() except: self.traceback = traceback.format_exc(limit=1) except: self.traceback = 'no traceback because template parsing error' try: self.snapshot = snapshot(context=10, code=code, environment=self.environment) except: self.snapshot = {} else: self.traceback = '(no error)' self.snapshot = {} def log(self, request): """ Logs the exception. """ try: d = { 'layer': str(self.layer), 'code': str(self.code), 'output': str(self.output), 'traceback': str(self.traceback), 'snapshot': self.snapshot, } ticket_storage = TicketStorage(db=request.tickets_db) ticket_storage.store(request, request.uuid.split('/', 1)[1], d) cmd_opts = global_settings.cmd_options if cmd_opts and cmd_opts.print_errors: logger.error(self.traceback) return request.uuid except: logger.error(self.traceback) return None def load(self, request, app, ticket_id): """ Loads a logged exception. """ ticket_storage = TicketStorage(db=request.tickets_db) d = ticket_storage.load(request, app, ticket_id) self.layer = d.get('layer') self.code = d.get('code') self.output = d.get('output') self.traceback = d.get('traceback') self.snapshot = d.get('snapshot') def __str__(self): # safely show an useful message to the user try: output = self.output if not isinstance(output, str, bytes, bytearray): output = str(output) if isinstance(output, unicodeT): output = to_bytes(output) except: output = "" return output def compile2(code, layer): return compile(code, layer, 'exec') def restricted(ccode, environment=None, layer='Unknown', scode=None): """ Runs code in environment and returns the output. If an exception occurs in code it raises a RestrictedError containing the traceback. Layer is passed to RestrictedError to identify where the error occurred. """ if environment is None: environment = {} environment['__file__'] = layer environment['__name__'] = '__restricted__' try: exec(ccode, environment) except HTTP: raise except RestrictedError: # do not encapsulate (obfuscate) the original RestrictedError raise except Exception as error: # extract the exception type and value (used as output message) etype, evalue, tb = sys.exc_info() # XXX Show exception in Wing IDE if running in debugger if __debug__ and 'WINGDB_ACTIVE' in os.environ: sys.excepthook(etype, evalue, tb) del tb output = "%s %s" % (etype, evalue) # Save source code in ticket when available scode = scode if scode else ccode raise RestrictedError(layer, scode, output, environment) def snapshot(info=None, context=5, code=None, environment=None): """Return a dict describing a given traceback (based on cgitb.text).""" import time import linecache import inspect import pydoc import cgitb # if no exception info given, get current: etype, evalue, etb = info or sys.exc_info() if isinstance(etype, ClassType): etype = etype.__name__ # create a snapshot dict with some basic information s = {} s['pyver'] = 'Python ' + sys.version.split()[0] + ': ' + sys.executable + ' (prefix: %s)' % sys.prefix s['date'] = time.ctime(time.time()) # start to process frames records = inspect.getinnerframes(etb, context) del etb # Prevent circular references that would cause memory leaks s['frames'] = [] for frame, file, lnum, func, lines, index in records: file = file and os.path.abspath(file) or '?' args, varargs, varkw, locals = inspect.getargvalues(frame) call = '' if func != '?': call = inspect.formatargvalues(args, varargs, varkw, locals, formatvalue=lambda value: '=' + pydoc.text.repr(value)) # basic frame information f = {'file': file, 'func': func, 'call': call, 'lines': {}, 'lnum': lnum} highlight = {} def reader(lnum=[lnum]): highlight[lnum[0]] = 1 try: return linecache.getline(file, lnum[0]) finally: lnum[0] += 1 vars = cgitb.scanvars(reader, frame, locals) # if it is a view, replace with generated code if file.endswith('html'): lmin = lnum > context and (lnum - context) or 0 lmax = lnum + context lines = code.split("\n")[lmin:lmax] index = min(context, lnum) - 1 if index is not None: i = lnum - index for line in lines: f['lines'][i] = line.rstrip() i += 1 # dump local variables (referenced in current line only) f['dump'] = {} for name, where, value in vars: if name in f['dump']: continue if value is not cgitb.__UNDEF__: if where == 'global': name = 'global ' + name elif where != 'local': name = where + name.split('.')[-1] f['dump'][name] = pydoc.text.repr(value) else: f['dump'][name] = 'undefined' s['frames'].append(f) # add exception type, value and attributes s['etype'] = str(etype) s['evalue'] = str(evalue) s['exception'] = {} if isinstance(evalue, BaseException): for name in dir(evalue): value = pydoc.text.repr(getattr(evalue, name)) s['exception'][name] = value # add all local values (of last frame) to the snapshot s['locals'] = {} for name, value in locals.items(): s['locals'][name] = pydoc.text.repr(value) # add web2py environment variables for k, v in environment.items(): if k in ('request', 'response', 'session'): s[k] = XML(str(BEAUTIFY(v))) return s
apache-2.0
-6,934,575,283,434,433,000
31.489426
106
0.548075
false
4.145721
false
false
false
ikreymer/pywb
tests/memento_fixture.py
1
1343
import re MEMENTO_DATETIME = 'Memento-Datetime' ACCEPT_DATETIME = 'Accept-Datetime' LINK = 'Link' VARY = 'Vary' LINK_FORMAT = 'application/link-format' class MementoMixin(object): def _timemap_get(self, url, fmod=True, **kwargs): app = self.testapp if fmod else self.testapp_non_frame return app.get(url, extra_environ={'REQUEST_URI': url}, **kwargs) def get_links(self, resp): return list(map(lambda x: x.strip(), re.split(', (?![0-9])', resp.headers[LINK]))) def make_timemap_link(self, url, coll='pywb'): format_ = '<http://localhost:80/{2}/timemap/link/{0}>; rel="timemap"; type="{1}"' return format_.format(url, LINK_FORMAT, coll) def make_original_link(self, url): format_ = '<{0}>; rel="original"' return format_.format(url) def make_timegate_link(self, url, fmod='', coll='pywb'): fmod_slash = fmod + '/' if fmod else '' format_ = '<http://localhost:80/{2}/{1}{0}>; rel="timegate"' return format_.format(url, fmod_slash, coll) def make_memento_link(self, url, ts, dt, fmod='', coll='pywb', include_coll=True): format_ = '<http://localhost:80/{4}/{1}{3}/{0}>; rel="memento"; datetime="{2}"' if include_coll: format_ += '; collection="{4}"' return format_.format(url, ts, dt, fmod, coll)
gpl-3.0
9,091,554,098,200,876,000
36.305556
90
0.597915
false
3.116009
false
false
false
jbfavre/exabgp
lib/exabgp/bgp/message/update/attribute/mpurnlri.py
1
2146
# encoding: utf-8 """ mprnlri.py Created by Thomas Mangin on 2009-11-05. Copyright (c) 2009-2013 Exa Networks. All rights reserved. """ from struct import unpack from exabgp.protocol.family import AFI from exabgp.protocol.family import SAFI from exabgp.protocol.ip.address import Address from exabgp.bgp.message import IN from exabgp.bgp.message.update.attribute.attribute import Attribute from exabgp.bgp.message.update.nlri.nlri import NLRI from exabgp.bgp.message.notification import Notify # ================================================================= MP NLRI (14) class MPURNLRI (Attribute,Address): FLAG = Attribute.Flag.OPTIONAL ID = Attribute.ID.MP_UNREACH_NLRI MULTIPLE = True __slots__ = ['nlris'] def __init__ (self,afi,safi,nlris): Address.__init__(self,afi,safi) self.nlris = nlris def packed_attributes (self,addpath): if not self.nlris: return mpurnlri = {} for nlri in self.nlris: mpurnlri.setdefault((nlri.afi.pack(),nlri.safi.pack()),[]).append(nlri.pack(addpath)) for (pafi,psafi),nlris in mpurnlri.iteritems(): yield self._attribute(pafi + psafi + ''.join(nlris)) def pack (self,addpath): return ''.join(self.packed_attributes(addpath)) def __len__ (self): return len(self.pack()) def __str__ (self): return "MP_UNREACH_NLRI for %s %s with %d NLRI(s)" % (self.afi,self.safi,len(self.nlris)) @classmethod def unpack (cls,data,negotiated): nlris = [] # -- Reading AFI/SAFI afi,safi = unpack('!HB',data[:3]) offset = 3 data = data[offset:] if (afi,safi) not in negotiated.families: raise Notify(3,0,'presented a non-negotiated family %s %s' % (AFI(afi),SAFI(safi))) # Is the peer going to send us some Path Information with the route (AddPath) addpath = negotiated.addpath.receive(afi,safi) while data: length,nlri = NLRI.unpack(afi,safi,data,addpath,None,IN.withdrawn) nlris.append(nlri) data = data[length:] #logger.parser(LazyFormat("parsed withdraw mp nlri %s payload " % nlri,od,data[:length])) return cls(afi,safi,nlris) MPURNLRI.register_attribute() EMPTY_MPURNLRI = MPURNLRI(AFI(AFI.undefined),SAFI(SAFI.undefined),[])
bsd-3-clause
2,751,306,918,672,450,000
26.164557
92
0.687325
false
2.823684
false
false
false
kreatorkodi/repository.torrentbr
plugin.video.youtube/resources/lib/youtube_plugin/youtube/helper/tv.py
1
5888
__author__ = 'bromix' from six import PY2 from ... import kodion from ...youtube.helper import utils from ...kodion.items.video_item import VideoItem def my_subscriptions_to_items(provider, context, json_data, do_filter=False): result = [] video_id_dict = {} incognito = str(context.get_param('incognito', False)).lower() == 'true' filter_list = [] black_list = False if do_filter: black_list = context.get_settings().get_bool('youtube.filter.my_subscriptions_filtered.blacklist', False) filter_list = context.get_settings().get_string('youtube.filter.my_subscriptions_filtered.list', '') filter_list = filter_list.replace(', ', ',') filter_list = filter_list.split(',') filter_list = [x.lower() for x in filter_list] items = json_data.get('items', []) for item in items: channel = item['channel'].lower() channel = channel.replace(',', '') if PY2: channel = channel.encode('utf-8', 'ignore') if not do_filter or (do_filter and (not black_list) and (channel in filter_list)) or \ (do_filter and black_list and (channel not in filter_list)): video_id = item['id'] item_params = {'video_id': video_id} if incognito: item_params.update({'incognito': incognito}) item_uri = context.create_uri(['play'], item_params) video_item = VideoItem(item['title'], uri=item_uri) if incognito: video_item.set_play_count(0) result.append(video_item) video_id_dict[video_id] = video_item use_play_data = not incognito and context.get_settings().use_playback_history() channel_item_dict = {} utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict, use_play_data=use_play_data) utils.update_fanarts(provider, context, channel_item_dict) # next page next_page_token = json_data.get('next_page_token', '') if next_page_token or json_data.get('continue', False): new_params = {} new_params.update(context.get_params()) new_params['next_page_token'] = next_page_token new_params['offset'] = int(json_data.get('offset', 0)) new_context = context.clone(new_params=new_params) current_page = int(new_context.get_param('page', 1)) next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context)) result.append(next_page_item) return result def tv_videos_to_items(provider, context, json_data): result = [] video_id_dict = {} incognito = str(context.get_param('incognito', False)).lower() == 'true' items = json_data.get('items', []) for item in items: video_id = item['id'] item_params = {'video_id': video_id} if incognito: item_params.update({'incognito': incognito}) item_uri = context.create_uri(['play'], item_params) video_item = VideoItem(item['title'], uri=item_uri) if incognito: video_item.set_play_count(0) result.append(video_item) video_id_dict[video_id] = video_item use_play_data = not incognito and context.get_settings().use_playback_history() channel_item_dict = {} utils.update_video_infos(provider, context, video_id_dict, channel_items_dict=channel_item_dict, use_play_data=use_play_data) utils.update_fanarts(provider, context, channel_item_dict) # next page next_page_token = json_data.get('next_page_token', '') if next_page_token or json_data.get('continue', False): new_params = {} new_params.update(context.get_params()) new_params['next_page_token'] = next_page_token new_params['offset'] = int(json_data.get('offset', 0)) new_context = context.clone(new_params=new_params) current_page = int(new_context.get_param('page', 1)) next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context)) result.append(next_page_item) return result def saved_playlists_to_items(provider, context, json_data): result = [] playlist_id_dict = {} incognito = str(context.get_param('incognito', False)).lower() == 'true' thumb_size = context.get_settings().use_thumbnail_size() items = json_data.get('items', []) for item in items: title = item['title'] channel_id = item['channel_id'] playlist_id = item['id'] image = utils.get_thumbnail(thumb_size, item.get('thumbnails', {})) item_params = {} if incognito: item_params.update({'incognito': incognito}) item_uri = context.create_uri(['channel', channel_id, 'playlist', playlist_id], item_params) playlist_item = kodion.items.DirectoryItem(title, item_uri, image=image) playlist_item.set_fanart(provider.get_fanart(context)) result.append(playlist_item) playlist_id_dict[playlist_id] = playlist_item channel_items_dict = {} utils.update_playlist_infos(provider, context, playlist_id_dict, channel_items_dict) utils.update_fanarts(provider, context, channel_items_dict) # next page next_page_token = json_data.get('next_page_token', '') if next_page_token or json_data.get('continue', False): new_params = {} new_params.update(context.get_params()) new_params['next_page_token'] = next_page_token new_params['offset'] = int(json_data.get('offset', 0)) new_context = context.clone(new_params=new_params) current_page = int(new_context.get_param('page', 1)) next_page_item = kodion.items.NextPageItem(new_context, current_page, fanart=provider.get_fanart(new_context)) result.append(next_page_item) return result
gpl-2.0
8,029,044,386,622,593,000
37.48366
129
0.631284
false
3.467609
false
false
false
lwerdna/chess
PgnParser.py
1
9869
#!/usr/bin/python # Copyright 2012-2016 Andrew Lamoureux # # This file is a part of FunChess # # FunChess is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. import os import re import sys import copy import Common import ChessMove import PgnTokenizer from ChessState import ChessState ############################################################################### # Match # - contains tags, comments, moves, and states of a bughouse chess match # - is able to load itself from bpgn match text ############################################################################### class PgnChessMatch: def __init__(self): self.initState = ChessState(Common.initChessFEN) self.moves = [] self.tags = {} self.comments = [] self.states = [self.initState] self.result = None def copy(self): return copy.deepcopy(self) # - parses, populates the tags member # - parses, populates the moves member # - parses, populates the comments member # - calculates the states member # def parsePgn(self, text): tokens = PgnTokenizer.tokenize(text) currMoveNum = 0 player = 'W' while tokens: token = tokens.pop(0) #print "on token: -%s-" % token # tag tokens eg: [Event "May 2013 Tourney"] m = re.match(r'\[(.*?) "(.*?)"\]', token) if m: self.tags[m.group(1)] = m.group(2) continue # comment tokens eg: { good move! also consider Rxe8 } m = re.match('^{(.*)}$', token) if m: # if we're in the moves section, comment applies to a move if self.moves: self.moves[-1].addComment(m.group(1)) # else it applies to the match comments else: self.comments.append(m.group(1)) continue # result tokens eg: 0-1 m = re.match(Common.regexResults, token) if m: self.result = token if tokens: raise Exception("result token was not the final token! next is: " + tokens[0]) continue # move number token eg: 34. m = re.match(r'(\d+)\.', token) if m: if currMoveNum + 1 != int(m.group(1)): raise Exception("out of order move number: " + token) player = 'w' currMoveNum += 1 # normal move (SAN) m = re.match(Common.regexSanChess, token) if m: move = ChessMove.ChessMove() move.moveNum = currMoveNum move.player = player move.san = token self.moves.append(move) player = {'w':'b', 'b':'w'}[player] # calculate all board states # # initial state? or special state? (Fischer960, etc.) if 'SetUp' in self.tags and self.tags['SetUp'] == '1': if 'FEN' in self.tags: self.initState = ChessState(self.tags['FEN']) self.states = [self.initState] # loop over all moves... for move in self.moves: # exceptions (repeated moves due to time forfeiture, etc.) just carry state along... if 'TIME_FORFEIT' in move.flags: self.states.append(self.states[-1]) continue currState = self.states[-1] nextState = currState.transition(move) self.states.append(nextState) def __str__(self): answer = '' #answer = '%s[%s],%s[%s] vs %s[%s],%s[%s]\n' % ( \ # self.tags['WhiteA'], self.tags['WhiteAElo'], self.tags['BlackA'], self.tags['BlackAElo'], \ # self.tags['BlackB'], self.tags['BlackBElo'], self.tags['WhiteA'], self.tags['WhiteAElo'] \ #) for tag,value in self.tags.iteritems(): answer += "[%s \"%s\"]\n" % (tag, value) #answer += "COMMENTS:\n" #for c in self.comments: # answer += c + "\n" #answer += "MOVES (%d total):\n" % len(self.moves) for m in self.moves: answer += str(m) + ' ' # blah answer += self.result # done return answer ############################################################################### # PgnChessMatchIteratorFile # - return matches from file containing multiple matches # - basically, split the text around '[Event "..."]' tags ############################################################################### class PgnChessMatchIteratorFile: def __init__(self, path): self.path = path self.fp = open(path, 'r') self.lineNum = -1 def __iter__(self): self.fp.seek(0, 0) self.lineNum = -1 return self def peekLine(self, doStrip=1): line = self.fp.readline() self.fp.seek(-1*len(line), 1) if doStrip: line = line.rstrip() return line def readLine(self): self.lineNum += 1 temp = self.fp.readline() #print "read: %s" % temp return temp def consumeNewLines(self): while 1: line = self.peekLine(False) if not line: return False if not re.match(r'^\s+$', line): break self.readLine() return True # strategy here is simple: consume lines until an Event tag is found # in other words, Event tags delimit the matches def next(self): if not self.consumeNewLines(): raise StopIteration matchText = self.readLine() if not re.match(r'^\[Event', matchText): raise Exception(("expected Event tag at %s:%d\n" + \ "(instead got: %s)") % (self.path, self.lineNum, matchText)) # so long as the next line is not an Event tag, add to current match while 1: line = self.peekLine() if re.match(r'^\[Event ', line): break matchText += '\n' + line # consume the peek'd line, breaking if error if not self.readLine(): break # return a match match = PgnChessMatch() match.path = self.path match.parsePgn(matchText) return match def __del__(self): if self.fp: self.fp.close() self.fp = None ############################################################################### # MatchIteratorDir # - return matches from a directory containing files # - basically, loop over MatchIteratorFile for every file in a directory ############################################################################### class PgnChessMatchIteratorDir: def __init__(self, path): self.walkObj = os.walk(path) self.matchIterFileObj = None self.filesList = [] def __iter__(self): return self def next(self): while 1: # first level: does the file iterator still have something left? if self.matchIterFileObj: try: return self.matchIterFileObj.next() except StopIteration: self.matchIterFileObj = None # second level, is current list of files exhausted? can we create a new # file iterator? if self.filesList: self.matchIterFileObj = PgnChessMatchIteratorFile(self.filesList.pop()) continue # third level: no file iterator, no files list, descend! # purposely don't trap exception: StopIterations should bubble up and tell # caller that we're done (root, subFolder, files) = self.walkObj.next() for f in files: (dummy, ext) = os.path.splitext(f) if ext == '.bpgn': self.filesList.append(os.path.join(root, f)) ############################################################################### # main() ############################################################################### if __name__ == '__main__': gamesCount = 0 goodGamesCount = 0 path = sys.argv[1] it = None if os.path.isfile(path): it = PgnChessMatchIteratorFile(path) elif os.path.isdir(path): it = PgnChessMatchIteratorDir(path) else: raise Exception("WTF?") for m in it: gamesCount += 1 try: m.sanityCheck() #except MatchMovesOOOException as e: # print "%s: skipping match due to out of order (or missing) moves\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e)) # continue #except MatchZeroMovesException as e: # print "%s: skipping match due to it being empty (no moves whatsoever)\n%s\n%s" % (m.path, '\n'.join(m.comments), str(e)) # continue except Exception as e: print e for s in m.states: print s goodGamesCount += 1 #raw_input("hit enter for next game") print "%d/%d games are good (%02.2f%%)" % (goodGamesCount, gamesCount, 100.0*goodGamesCount/gamesCount)
gpl-3.0
1,263,938,477,565,290,800
31.251634
133
0.510791
false
4.183552
false
false
false
poppogbr/genropy
gnrpy/gnr/pdf/test/testsuite.py
1
1391
from gnr.pdf.gnrpdf import GnrPdf from reportlab.lib.units import inch def testPage(root): for x in range(2): page = root.page(x=1 * inch, y=1 * inch) page.setFont("Helvetica", 9) pane1 = page.pane(x_=1, y_=15) pane2 = page.pane(x_=9, y_=9) pane2.setFont("Helvetica", 12) pane1.rect(x=0, y=0, width_='10', height_='5') pane3 = pane1.pane(x_=2, y_=2) pane3.rect(x=0, y=0, width_='7', height_='2') pane1.setFillGray(gray=0.4) pane1.drawString(x_=1, y_=4, text="Hello World") pane2.drawString(x_=1, y_=4, text="Hello World") # #textobject = pane2.textObject(x_=1, y_=3) ##textobject = canvas.beginText() ##textobject.setTextOrigin(inch, 2.5*inch) #textobject.setFont("Helvetica-Oblique", 14) #for line in lyrics: # textobject.textLine(line) #textobject.setFillGray(0.4) #textobject.textLines(''' #With many apologies to the Beach Boys #and anyone else who finds this objectionable #''') ##canvas.drawText(textobject) # if __name__ == '__main__': pdf = GnrPdf('/testsuite.pdf', unit='cm') root = pdf.root testPage(root) pdf.draw() pdf.save() pdf.toXml('/testsuite.xml') f = open('/testsuite.txt', 'w') f.write('\n'.join(pdf._log)) f.close()
lgpl-2.1
-5,206,272,216,680,700,000
26.294118
56
0.558591
false
2.947034
false
false
false
ecbtln/1411wrightfisher
wright_fisher.py
1
24312
__author__ = 'eblubin@mit.edu, nanaya@mit.edu' import math import numpy as np from random import choice from inspect import isfunction from matplotlib import pyplot as plt from UserDict import IterableUserDict import random import operator import heapq from progressbar import AnimatedProgressBar # This is used to help debug the code in case of unexpected output. This will start the simulation at a particular # state (a tuple of the signals_sent, and the receiver strategies), where each is a list of dictionaries of the # appropriate length. DEBUG_STATE = None # The precision of the decimal comparison operations this should not need any changing DECIMAL_PRECISION = 5 # Colors used to plot the senders and receivers GRAPH_COLORS = 'mcrgbyk' class SparseDictionary(IterableUserDict): """ A helper dictionary that helps minimize the overhead of storing continuous actions. Instead of storing keys for every possible strategy, we make use of the fact that keys will be queried in order and that this dictionary will only be used to store cumulative frequencies. """ def __init__(self, asc=True, default=0.0, *args, **kwargs): """ Initialize the sparse SparseDictionary :param asc: whether the dictionary will be queried in ascending or descending order. Ascending corresponds to sender payoffs where we accumulate upwards, and descending corresponds to receiver payoffs where we are accumulating downwards :param default: The default value to return if the key does not have a value associated with it """ IterableUserDict.__init__(self, *args, **kwargs) self.default = default if asc: self.cmp = operator.lt else: self.cmp = operator.gt self.history = [] self.last_key, self.last_value = None, None def __getitem__(self, item): try: out = IterableUserDict.__getitem__(self, item) self.last_key = item self.last_value = out return out except KeyError as e: if self.last_key is None or self.cmp(item, self.last_key): return self.default else: return self.last_value class WrightFisher(object): """ A robust Wright-Fisher simulator of the costly signaling model, that allows for a variety of sender/receiver modifications and combinations and options for parameters. """ def __init__(self, wages=(5,), sender_dist=(2.0/3.0, 1.0/3.0), w=0.15, u=0.02, receiver_prop=1.0/2.0, cost_fns = (lambda x: x * 3, lambda x: x), signals=(0, 1, 2, 3), receiver_dist = (1.0,), receiver_payoffs=((0, 10),), pop_size=100, fitness_func = lambda p, w: math.e**(p*w), animated_progress=True): """ Construct a WrightFisher simulator with the desired parameters to be simulated one or more times. :param wages: a list of wages that receiver i needs to pay any sender whom it accepts. :param sender_dist: a probability distribution identifying how the senders will be divided by sender type. The sum of this must be 1, and this will also specify the number of types of senders there are :param w: the selection strength associated with the simulation :param u: the mutation rate, the probability that a given individual does not keep the same strategy but instead randomly chooses a new strategy :param receiver_prop: the proportion of the pop_size that wll be devoted to receivers, (1 - receiver_prop) will be devoted to senders. :param cost_fns: The cost functions for each type of sender, which can be passed in as callables or dictionaries mapping a signal to its cost :param signals: a list of all possible signals that can be sent :param receiver_dist: the distribute of proportion of receivers to each possible receiver type. :param receiver_payoffs: a list of payoffs that the receiver of type i receives for accepting a sender of type j :param pop_size: the population size used for the simulations, note this this should be sufficiently large relative to the number of possible signals :param fitness_func: a function that takes as arguments a payoff and selection strength and outputs fitness :param animated_progress: whether or not to display an animated progress bar while performing the simulation """ # Verify the correctness and compatibility of the parameters assert math.fsum(sender_dist) == 1.0, "the sender distribution must be a valid probability distribution" assert math.fsum(receiver_dist) == 1.0, "the receiver distribution must be a valid probability distribution" assert len(sender_dist) == len(cost_fns), "the number of elements in the sender distribution must be equal to the number of elements in the cost functions" for x in receiver_payoffs: assert len(x) == len(sender_dist), "the number of elements in each of the receiver payoffs must be equal to the number of senders" assert len(receiver_dist) == len(receiver_payoffs) == len(wages), "the number of of elements in the receiver distribution, the receiver's payoffs, and the number of wages must all equal the number of total receiver types" assert len(sender_dist) > 1, "this model only makes sense with more than one type of sender" assert len(receiver_dist) > 0, "this model only makes sense with a nonzero number of senders" assert isinstance(pop_size, int), "the population size must be an integer, not something else" assert len(signals) == len(set(signals)), "the list of signals should not have any repeated elements" self.animated_progress = animated_progress self.wages = wages # benefit for being accepted by a given receiver self.sender_dist = sender_dist self.receiver_dist = receiver_dist self.n_types_of_senders = len(sender_dist) self.n_types_of_receivers = len(receiver_dist) self.w = w self.u = u self.num_signals = len(signals) self.signals = signals cost_functions_by_index = [] # cost_fns can be inputted as either arrays (corresponding to the signals), or functions (mapping signal to cost) # we want to map them to arrays before we begin for f in cost_fns: if isinstance(f, (tuple, list)): assert len(f) == self.num_signals, "the list of payoffs for a given sender must be equal to the number of signals" cost_functions_by_index.append(f) else: assert isfunction(f) x = [f(s) for s in self.signals] cost_functions_by_index.append(x) self.cost_fns_by_signal_index = cost_functions_by_index # for each sender, a lookup table mapping the signal's index (in the signals array) to its cost # for convenience, we also want to make a direct mapping of all signals to their costs self.cost_fns = [{signals[i]:x[i] for i, s in enumerate(signals)} for x in cost_functions_by_index] self.signals = signals self.receiver_payoffs = receiver_payoffs self.n_types_of_receivers = len(receiver_dist) self.fitness_func = lambda p: fitness_func(p, w) assert pop_size is not None self.num_senders = [pop_size * x * (1 - receiver_prop) for x in sender_dist] total_receivers = pop_size * receiver_prop self.num_receivers = [total_receivers * x for x in receiver_dist] self.pop_size = pop_size self.num_senders = self._round_individuals(self.num_senders) self.num_receivers = self._round_individuals(self.num_receivers) self.index_of_signal = {s:i for i, s in enumerate(self._possible_receiver_strategies())} def _round_given_type(self, unrounded_dict, desired_total): """ Converts a given sender or receiver's distribution, given as a dictionary, and scales it proportionally to add to the desired_total :param unrounded_dict: a weighted distribution of the number of senders and receivers sending each signal :param desired_total: the total to which the aggregate sum should be scaled """ unrounded_total = sum(unrounded_dict[k] for k in unrounded_dict) total = int(round(unrounded_total, DECIMAL_PRECISION)) assert total == desired_total int_nums = {k:int(unrounded_dict[k]) for k in unrounded_dict} diff = total - sum(int_nums[k] for k in int_nums) if diff > 0: thresh = [((int_nums[k] - unrounded_dict[k]), k) for k in int_nums] heapq.heapify(thresh) while diff > 0: v, i = heapq.heappop(thresh) int_nums[i] += 1 diff -= 1 assert sum(int_nums[k] for k in int_nums) == total return int_nums def _round_individuals(self, unrounded_frequencies): """ Due to integer cutoffs, the number of senders and receivers might not be consistent. This take the integer part of each of the inputs and then assign the remaining few leftovers (so that the sum is the sum of the original floats) in a way such that the numbers with higher decimal parts will get the extra int before those with lower. """ unrounded_total = math.fsum(unrounded_frequencies) total = int(round(unrounded_total, DECIMAL_PRECISION)) int_num_senders = [int(x) for x in unrounded_frequencies] diff = total - sum(int_num_senders) if diff > 0: # note the difference needs to be negative, because heapq's only implement a minimum priority queue but we want max priority queue thresh = [((x - y), i) for i, (x, y) in enumerate(zip(int_num_senders, unrounded_frequencies))] heapq.heapify(thresh) while diff > 0: v, i = heapq.heappop(thresh) int_num_senders[i] += 1 diff -= 1 assert sum(int_num_senders) == total, "the total number of individuals after rounding must be the same as before rounding" return int_num_senders def _normalize_to_pop_size(self, senders, receivers): """ Takes in a list of distributions of senders and receivers and rounds each distribution of each type such that each type is scaled back to the appropriate total (since each type's population remains constant :param senders: the list of sender proportions :param receivers: the list of receiver proportions :return sender, receivers: a tuple of the scaled versions of the inputs """ # to normalize, the sum at index i of senders should correspond to self.sender_dist at index i total_senders = [sum(d[k] for k in d) for d in senders] total_receivers = [sum(d[k] for k in d) for d in receivers] signals_sent = [{k:y[k] * N / total for k in y} for y, N, total in zip(senders, self.num_senders, total_senders)] receiver_strats = [{k:y[k] * N / total for k in y} for y, N, total in zip(receivers, self.num_receivers, total_receivers)] for i in xrange(self.n_types_of_senders): signals = signals_sent[i] signals_sent[i] = self._round_given_type(signals, self.num_senders[i]) assert sum(sum(x[k] for k in x) for x in signals_sent) == sum(self.num_senders) for i in xrange(self.n_types_of_receivers): signals = receiver_strats[i] receiver_strats[i] = self._round_given_type(signals, self.num_receivers[i]) assert sum(sum(x[k] for k in x) for x in receiver_strats) == sum(self.num_receivers) return signals_sent, receiver_strats def _compute_avg_cost(self, signals_by_sender_type): """ :param signals_by_sender_type: an array of senders, and each sender has a dictionary mapping a signal sent and the proportion of the population sending that signal. :Returns: the average signal sent by each sender type, as an array """ out = [] for f, signals in zip(self.cost_fns, signals_by_sender_type): sum_n = 0 sum_v = 0 for k in signals: sum_n += signals[k] sum_v += signals[k] * f[k] out.append(float(sum_v) / sum_n) return out def _compute_acceptance_frequencies(self, receiver_strategies): """ :returns: an array of dictionaries mapping a key (the signal sent) to a value (the proportion of receivers accepting that signal) for every type of receiver """ overall_out = [] for z in receiver_strategies: out = {} def increment(k, v): out[k] = out.get(k, 0) + v for k in z: increment(k, z[k]) signals = sorted(list(out.keys())) # make the frequency distribution into cumulative sums for i in xrange(len(signals) - 1): out[signals[i+1]] += out[signals[i]] frequency_accepted = SparseDictionary() for x in signals: frequency_accepted[x] = float(out[x])/out[signals[-1]] overall_out.append(frequency_accepted) return overall_out def _compute_type_frequencies(self, signals_sent_by_sender): """ :returns: a dictionary mapping a key (the signal accepted), to an array, where each value at index i is the likelihood of having accepted a sender with that type """ out = {} sums = {} def increment(x, s_index, val): sums[x] = sums.get(x, 0) + val likelihood = out.get(x, None) if likelihood is None: out[x] = np.zeros(self.n_types_of_senders) likelihood = out[x] likelihood[s_index] += val for s_index, sender in enumerate(signals_sent_by_sender): for x in sender: increment(x, s_index, sender[x]) signals = sorted(list(out.keys())) # we go in opposite order as above because we are now change the receiver signal chosen, so lower means more, not # less, will be accepted for i in reversed(xrange(1, len(signals))): out[signals[i-1]] += out[signals[i]] # numpy element-wise addition total = sum(out[signals[0]]) retvalue = SparseDictionary(asc=False, default=[0]*self.n_types_of_senders) for s in signals: retvalue[s] = out[s] return retvalue def _mean_of_frequency_table(self, freq_table): """ Compute the mean of a frequency table, which is a dictionary mapping values to their frequencies """ s = 0 tv = 0 for k in freq_table: num = freq_table[k] s += num tv += k * num return float(tv)/s def _possible_receiver_strategies(self): return self.signals def simulate(self, num_gens=1000, show_signals_graph=True): """ Performs a simulation on the given WrightFisher simulation object to a desired number of generations and defaulting to showing both the average cost of each sender type as well as the average signals of each sender and receiver type :param num_gens: the number of iterations to run the simulation for :param show_signals_graph: whether or not to show the supplemental graph """ # if the DEBUG flag is turned on if DEBUG_STATE is not None: signals_sent, receiver_strats = DEBUG_STATE else: # initialize the state of the world to same random state, given the restrictions on the counts for the number of each player population # for each type of sender, randomly initialize a signal for each sender and store them as a frequency table signals_sent = [] for x in self.num_senders: sender_freqs = {} for i in xrange(x): c = choice(self.signals) sender_freqs[c] = sender_freqs.get(c, 0) + 1 signals_sent.append(sender_freqs) # for each receiver, randomly initialize a strategy based on the existing signals (plus a reject all) possible_receiver_strats = self._possible_receiver_strategies() receiver_strats = [] for x in self.num_receivers: receiver_freqs = {} for i in xrange(x): c = choice(possible_receiver_strats) receiver_freqs[c] = receiver_freqs.get(c, 0) + 1 receiver_strats.append(receiver_freqs) avg_cost_signals_sent = np.zeros((num_gens, self.n_types_of_senders)) avg_signals_sent = np.zeros((num_gens, self.n_types_of_senders)) avg_signals_sent[0, :] = [self._mean_of_frequency_table(x) for x in signals_sent] avg_cost_signals_sent[0, :] = self._compute_avg_cost(signals_sent) avg_signals_accepted = np.zeros((num_gens, self.n_types_of_receivers)) avg_signals_accepted[0, :] = [self._mean_of_frequency_table(x) for x in receiver_strats] if self.animated_progress: # start the animated progress bar, if the bool is enabled progress_bar = AnimatedProgressBar(end=num_gens, width=80) print progress_bar, # Iterate through all the generations for t in xrange(num_gens - 1): # determine payoffs of each player # 1. for each type of receiver and for each strategy, determine proportion of receivers # accepting that strategy acceptance_ratios = self._compute_acceptance_frequencies(receiver_strats) # 2. for each type of sender, compute payoff for each possible signal sender_payoffs = [[sum(acceptance_ratios[r_i][s]*w - f[s] for r_i, w in enumerate(self.wages)) for s in self.signals] for f in self.cost_fns] # 3. compute payoffs for each possible receiver strategy for each possible receiver sender_likelihoods = self._compute_type_frequencies(signals_sent) receiver_payoffs = [[sum(sender_likelihoods[x][i]* (r_payoffs[i] - w) for i in reversed(xrange(self.n_types_of_senders))) / (self.pop_size / 5) for x in self._possible_receiver_strategies()] for w, r_payoffs in zip(self.wages, self.receiver_payoffs)] # compute fitnesses # this is a lookup table, where for each type of sender, we have the function for each possible strategy f_senders = [[self.fitness_func(p) for p in x] for x in sender_payoffs] f_receivers = [[self.fitness_func(p) for p in x] for x in receiver_payoffs] # generate frequencies for next generation, with some mutation rate ,u # we use a slightly different strategy than that included in the problem set. Instead of using a random # number generate to index into the cumulative distribution of fitnesses of individuals, we instead allocate # the exact fitness (as a decimal) for number of people in the ensuing population, and then normalize over # the sum of these fitnesses. This strategy seems to be more effective, as it reduces the random noise that # was present in the simulations for the problem set. new_signals_sent = [] for i, signals_sent_by_sender in enumerate(signals_sent): new_freqs = {} fitnesses = f_senders[i] for signal in signals_sent_by_sender: num = signals_sent_by_sender[signal] for j in xrange(num): if random.random() < self.u: cur_signal = choice(self.signals) else: cur_signal = signal idx = self.index_of_signal[cur_signal] assert cur_signal == self.signals[idx] # make sure the lookup table is correct f = fitnesses[idx] old = new_freqs.get(cur_signal, 0) new_freqs[cur_signal] = old + f assert new_freqs[cur_signal] > old # make sure no overflow new_signals_sent.append(new_freqs) signals_sent = new_signals_sent #needs to repeat for all types of senders new_signals_received = [] for i, signals_sent_by_receiver in enumerate(receiver_strats): new_freqs = {} fitnesses = f_receivers[i] for signal in signals_sent_by_receiver: num = signals_sent_by_receiver[signal] for j in xrange(num): if random.random() < self.u: cur_signal = choice(self.signals) else: cur_signal = signal idx = self.index_of_signal[cur_signal] assert cur_signal == self.signals[idx] f = fitnesses[idx] old = new_freqs.get(cur_signal, 0) new_freqs[cur_signal] = old + f assert new_freqs[cur_signal] > old # make sure no overflow new_signals_received.append(new_freqs) receiver_strats = new_signals_received # now we need to normalize new_signals and receiver_strats back down to their original population sizes signals_sent, receiver_strats = self._normalize_to_pop_size(signals_sent, receiver_strats) # We now want to update our running totals avg_signals_sent[t + 1, :] = [self._mean_of_frequency_table(x) for x in signals_sent] avg_cost_signals_sent[t + 1, :] = self._compute_avg_cost(signals_sent) avg_signals_accepted[t + 1, :] = [self._mean_of_frequency_table(x) for x in receiver_strats] if self.animated_progress: # print the progress bar, if it is enabled print '\r', print progress_bar + 1, # plot the results self._plot_results(avg_signals_sent, avg_cost_signals_sent, avg_signals_accepted, num_gens, show_signals_graph=show_signals_graph) def _plot_results(self, avg_signals_sent, avg_costs, avg_accepted, t, show_signals_graph=False): colors = GRAPH_COLORS x_axis = range(t) plt.figure(1, figsize=(12, 8), dpi=80, facecolor='w', edgecolor='k') if show_signals_graph: plt.subplot(211) for sender_type_idx in xrange(self.n_types_of_senders): plt.plot(x_axis, avg_costs[: t, sender_type_idx], colors[sender_type_idx], label='S_%d' % sender_type_idx) if not show_signals_graph: plt.legend(borderaxespad=0, bbox_to_anchor=(1.01, 1), loc=2) plt.xlabel('Generation') plt.title('Costly Signaling in Wright Fisher') plt.ylabel('Average cost of signal') plt.ylim(self.signals[0], np.max(avg_costs)) # show supplemental graph to help interpret results, this one will just show the signal sent and received by # all parties over time if show_signals_graph: plt.subplot(212) for sender_type_idx in xrange(self.n_types_of_senders): plt.plot(x_axis, avg_signals_sent[: t, sender_type_idx], colors[sender_type_idx], label='S_%d' % sender_type_idx) for receiver_type_idx in xrange(self.n_types_of_receivers): plt.plot(x_axis, avg_accepted[: t, receiver_type_idx], colors[self.n_types_of_senders + receiver_type_idx], label='R_%d' % receiver_type_idx) plt.legend(loc=3, borderaxespad=0, ncol=self.n_types_of_senders + self.n_types_of_receivers, mode="expand", bbox_to_anchor=(0., -.22, 1., .102)) plt.ylabel('Average signal') plt.ylim(self.signals[0], self.signals[-1]) plt.show() if __name__ == '__main__': w = WrightFisher(pop_size=100, signals=(0, 1, 2, 4)) w.simulate(num_gens=10000)
mit
6,413,463,132,501,530,000
46.116279
305
0.622861
false
3.996712
false
false
false
google-research/tf-slim
tf_slim/losses/__init__.py
1
1814
# coding=utf-8 # Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Ops for building neural network losses. See [Contrib Losses](https://tensorflow.org/api_guides/python/contrib.losses). """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from tf_slim.losses import metric_learning # pylint: disable=wildcard-import from tf_slim.losses.loss_ops import * from tf_slim.losses.metric_learning import * # pylint: disable=g-direct-tensorflow-import from tensorflow.python.util.all_util import remove_undocumented _allowed_symbols = [ 'absolute_difference', 'add_loss', 'cluster_loss', 'compute_weighted_loss', 'contrastive_loss', 'cosine_distance', 'get_losses', 'get_regularization_losses', 'get_total_loss', 'hinge_loss', 'lifted_struct_loss', 'log_loss', 'mean_pairwise_squared_error', 'mean_squared_error', 'metric_learning', 'npairs_loss', 'npairs_loss_multilabel', 'sigmoid_cross_entropy', 'softmax_cross_entropy', 'sparse_softmax_cross_entropy', 'triplet_semihard_loss', ] remove_undocumented(__name__, _allowed_symbols)
apache-2.0
8,135,164,806,222,323,000
30.275862
80
0.688534
false
3.787056
false
false
false
us-ignite/us_ignite
us_ignite/apps/importer.py
1
4966
import logging import pytz import requests from StringIO import StringIO from django.contrib.auth.models import User from django.core import files from django.core.files.storage import default_storage from django.core.files.base import ContentFile from django.utils import timezone from django.utils.dateparse import parse_datetime from us_ignite.apps.models import Application, ApplicationURL, Domain from us_ignite.profiles.models import Profile logger = logging.getLogger('us_ignite.apps.importer') TIMEZONE = 'America/New_York' def parse_date(date_str): naive = parse_datetime(date_str) return pytz.timezone(TIMEZONE).localize(naive, is_dst=None) def import_author(data): email = data['email'] try: user = User.objects.get(email=email) except User.DoesNotExist: user = User.objects.create_user( data['username'], email, first_name=data['name'][:30]) profile, is_new = Profile.objects.get_or_create(user=user) if is_new: profile.website = data['website'] profile.save() return user def get_domain(data): categories = { 'education': 'education-workforce', 'advanced-manufacturing': 'advanced-manufacturing', 'health-it': 'healthcare', 'public-safety': 'public-safety', 'clean-energy': 'energy', } old_slug = data['slug'] if old_slug in categories: return Domain.objects.get(slug=categories[old_slug]) assert False, data def get_stage(data): name = data['name'].lower() stages = { 'development': Application.ALPHA, 'ideation': Application.IDEA, } if name in stages: return stages[name] assert False, name def import_urls(application, blog, repo): if blog: blog_url, is_new = (ApplicationURL.objects .get_or_create(application=application, url=blog)) blog_url.name = 'Blog' blog_url.save() else: blog_url = None if repo: repo_url, is_new = (ApplicationURL.objects .get_or_create(application=application, url=repo)) repo_url.name = 'Repository' repo_url.save() else: repo_url = None return (blog_url, repo_url) def import_image(path, key): url = 'https://mozillaignite.org%s' % path if default_storage.exists(key): logger.debug('Ignoring existing file: %s', key) return key logger.debug('Downloading: %s', url) response = requests.get(url, verify=False) if response.status_code == 200: image_file = files.File(StringIO(response.content)) return default_storage.save(key, ContentFile(image_file.read())) return u'' def _get_key_from_url(url, prefix='apps'): suffix = url.split('/')[-1] return u'%s/%s' % (prefix, suffix) _title = lambda t: u'\n###%s\n' % t def import_app(data): author_data = data.get('created_by') author = import_author(author_data) if author_data else None slug = 'MI-%s' % data['slug'] application, is_new = Application.objects.get_or_create(slug=slug) application.name = data['name'] application.summary = data['brief_description'] application.team_description = data['collaborators'] application.impact_statement = data['life_improvements'] application.domain = get_domain(data['category']) application.owner = author application.stage = get_stage(data['phase']) application.website = data['blog_url'] or data['repository_url'] application.created = parse_date(data['created_on']) application.modified = parse_date(data['updated_on']) if data['is_draft']: application.status = Application.DRAFT else: application.status = Application.PUBLISHED description_list = [ data['description'], ] if data['take_advantage']: description_list += [ _title('How does your idea take advantage of ' 'next-generation networks?'), data['take_advantage']] if data['required_effort']: description_list += [ _title('How much effort do you expect this work to take?'), data['required_effort']] if data['interest_making']: description_list += [_title('Interest making'), data['interest_making']] application.description = '\n'.join(description_list) application.notes = ('Imported from the Mozilla Ignite site ' '(%s).' % timezone.now()) image_url = data.get('sketh_note') if image_url: application.image = import_image( image_url, _get_key_from_url(image_url)) application.save() application.tags.add('mozillaignite') import_urls(application, data['blog_url'], data['repository_url']) return application def digest_payload(payload): imported_apps = [] for app in payload: imported_apps.append(import_app(app)) return [a for a in imported_apps if a]
bsd-3-clause
1,271,934,513,240,245,200
30.833333
80
0.639952
false
3.76783
false
false
false
i3visio/osrframework
osrframework/wrappers/goodreads.py
1
3899
################################################################################ # # Copyright 2015-2020 Félix Brezo and Yaiza Rubio # # This program is part of OSRFramework. You can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ################################################################################ __author__ = "Felix Brezo, Yaiza Rubio <contacto@i3visio.com>" __version__ = "2.0" from osrframework.utils.platforms import Platform class Goodreads(Platform): """A <Platform> object for Goodreads""" def __init__(self): self.platformName = "Goodreads" self.tags = ["social", "opinions"] ######################## # Defining valid modes # ######################## self.isValidMode = {} self.isValidMode["phonefy"] = False self.isValidMode["usufy"] = True self.isValidMode["searchfy"] = False ###################################### # Search URL for the different modes # ###################################### # Strings with the URL for each and every mode self.url = {} #self.url["phonefy"] = "http://anyurl.com//phone/" + "<phonefy>" self.url["usufy"] = "http://www.goodreads.com/" + "<usufy>" #self.url["searchfy"] = "http://anyurl.com/search/" + "<searchfy>" ###################################### # Whether the user needs credentials # ###################################### self.needsCredentials = {} #self.needsCredentials["phonefy"] = False self.needsCredentials["usufy"] = False #self.needsCredentials["searchfy"] = False ################# # Valid queries # ################# # Strings that will imply that the query number is not appearing self.validQuery = {} # The regular expression '.+' will match any query. #self.validQuery["phonefy"] = ".*" self.validQuery["usufy"] = ".+" #self.validQuery["searchfy"] = ".*" ################### # Not_found clues # ################### # Strings that will imply that the query number is not appearing self.notFoundText = {} #self.notFoundText["phonefy"] = [] self.notFoundText["usufy"] = ["<title>Page not found</title>"] #self.notFoundText["searchfy"] = [] ######################### # Fields to be searched # ######################### self.fieldsRegExp = {} # Definition of regular expressions to be searched in phonefy mode #self.fieldsRegExp["phonefy"] = {} # Example of fields: #self.fieldsRegExp["phonefy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in usufy mode self.fieldsRegExp["usufy"] = {} # Example of fields: #self.fieldsRegExp["usufy"]["i3visio.location"] = "" # Definition of regular expressions to be searched in searchfy mode #self.fieldsRegExp["searchfy"] = {} # Example of fields: #self.fieldsRegExp["searchfy"]["i3visio.location"] = "" ################ # Fields found # ################ # This attribute will be feeded when running the program. self.foundFields = {}
agpl-3.0
6,122,434,766,374,014,000
37.98
80
0.524371
false
4.365062
false
false
false
Tesora-Release/tesora-trove
trove/db/sqlalchemy/migrate_repo/versions/038_instance_faults.py
1
2042
# Copyright 2016 Tesora, Inc. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # from sqlalchemy import ForeignKey from sqlalchemy.schema import Column from sqlalchemy.schema import MetaData from trove.db.sqlalchemy.migrate_repo.schema import Boolean from trove.db.sqlalchemy.migrate_repo.schema import create_tables from trove.db.sqlalchemy.migrate_repo.schema import DateTime from trove.db.sqlalchemy.migrate_repo.schema import drop_tables from trove.db.sqlalchemy.migrate_repo.schema import String from trove.db.sqlalchemy.migrate_repo.schema import Table from trove.db.sqlalchemy.migrate_repo.schema import Text meta = MetaData() instance_faults = Table( 'instance_faults', meta, Column('id', String(length=64), primary_key=True, nullable=False), Column('instance_id', String(length=64), ForeignKey('instances.id', ondelete="CASCADE", onupdate="CASCADE"), nullable=False), Column('message', String(length=255), nullable=False), Column('details', Text(length=65535), nullable=False), Column('created', DateTime(), nullable=False), Column('updated', DateTime(), nullable=False), Column('deleted', Boolean(), default=0, nullable=False), Column('deleted_at', DateTime()), ) def upgrade(migrate_engine): meta.bind = migrate_engine Table('instances', meta, autoload=True) create_tables([instance_faults]) def downgrade(migrate_engine): meta.bind = migrate_engine drop_tables([instance_faults])
apache-2.0
-6,658,379,695,200,043,000
35.464286
78
0.728208
false
3.942085
false
false
false
neozhangthe1/coverage_model
groundhog/trainer/SGD.py
1
6790
""" Stochastic Gradient Descent with momentum. TODO: write more documentation """ __docformat__ = 'restructedtext en' __authors__ = ("KyungHyun Cho " "Razvan Pascanu " "Caglar Gulcehre ") __contact__ = "Razvan Pascanu <r.pascanu@gmail>" import numpy import time import theano import theano.tensor as TT # from theano.sandbox.scan import scan from theano.scan_module import scan from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams from groundhog.utils import print_time, print_mem, const class SGD(object): """ Stochastic gradient descent class """ def __init__(self, model, state, data): """ :type model: groundhog model class :param model: class depicting the model to be optimized :type state: dictionary or jobman DD object :param state: dictionary containing various hyper-parameters. The class will write into this dictionary updates like the current training error and so on :type data: groundhog dataset object :param data: data iterator over which training is done """ ##################################### # Step 0. Constructs shared variables ##################################### bs = state['bs'] self.model = model self.rng = numpy.random.RandomState(state['seed']) srng = RandomStreams(self.rng.randint(213)) self.gs = [theano.shared(numpy.zeros(p.get_value(borrow=True).shape, dtype=theano.config.floatX), name=p.name) for p in model.params] self.step = 0 self.bs = bs self.state = state self.data = data self.step_timer = time.time() self.gdata = [theano.shared(numpy.zeros( (2,)*x.ndim, dtype=x.dtype), name=x.name) for x in model.inputs] if 'profile' not in self.state: self.state['profile'] = 0 ################################### # Step 1. Compile training function ################################### print('Constructing grad function') loc_data = self.gdata lr = TT.scalar('lr') self.prop_exprs = [x[1] for x in model.properties] self.prop_names = [x[0] for x in model.properties] self.update_rules = [x[1] for x in model.updates] rval = theano.clone(model.param_grads + self.update_rules + \ self.prop_exprs + [model.train_cost], replace=list(zip(model.inputs, loc_data))) nparams = len(model.params) nouts = len(self.prop_exprs) nrules = len(self.update_rules) gs = rval[:nparams] rules = rval[nparams:nparams + nrules] outs = rval[nparams + nrules:] norm_gs = sum(TT.sum(x**2) for x,p in zip(gs, self.model.params) if p not in self.model.exclude_params_for_norm) if 'cutoff' in state and state['cutoff'] > 0: c = numpy.float32(state['cutoff']) if state['cutoff_rescale_length']: c = c * TT.cast(loc_data[0].shape[0], 'float32') notfinite = TT.or_(TT.isnan(norm_gs), TT.isinf(norm_gs)) _gs = [] for g,p in zip(gs,self.model.params): if p not in self.model.exclude_params_for_norm: tmpg = TT.switch(TT.ge(norm_gs, c), g*c/norm_gs, g) _gs.append( TT.switch(notfinite, numpy.float32(.1)*p, tmpg)) else: _gs.append(g) gs = _gs store_gs = [(s,g) for s,g in zip(self.gs, gs)] updates = store_gs + [(s[0], r) for s,r in zip(model.updates, rules)] print('Compiling grad function') st = time.time() self.train_fn = theano.function( [], outs, name='train_function', updates = updates, givens = list(zip(model.inputs, loc_data)), profile=self.state['profile']) print('took', time.time() - st) self.lr = numpy.float32(state['lr']) new_params = [p - s*lr*g for s, p, g in zip(model.params_grad_scale, model.params, self.gs)] self.update_fn = theano.function( [lr], [], name='update_function', allow_input_downcast=True, updates = list(zip(model.params, new_params)), profile=self.state['profile']) self.old_cost = 1e20 self.schedules = model.get_schedules() self.return_names = self.prop_names + \ ['cost', 'time_step', 'whole_time', 'lr'] def __call__(self): batch = next(self.data) # Perturb the data (! and the model) if isinstance(batch, dict): batch = self.model.perturb(**batch) else: batch = self.model.perturb(*batch) # Load the dataset into GPU # Note: not the most efficient approach in general, as it involves # each batch is copied individually on gpu if isinstance(batch, dict): for gdata in self.gdata: gdata.set_value(batch[gdata.name], borrow=True) else: for gdata, data in zip(self.gdata, batch): gdata.set_value(data, borrow=True) # Run the trianing function g_st = time.time() rvals = self.train_fn() for schedule in self.schedules: schedule(self, rvals[-1]) self.update_fn(self.lr) g_ed = time.time() self.state['lr'] = float(self.lr) cost = rvals[-1] self.old_cost = cost whole_time = time.time() - self.step_timer if self.step % self.state['trainFreq'] == 0: msg = '.. iter %4d cost %.3f' vals = [self.step, cost] for dx, prop in enumerate(self.prop_names): msg += ' '+prop+' %.2e' vals += [float(numpy.array(rvals[dx]))] msg += ' step time %s whole time %s lr %.2e' vals += [print_time(g_ed - g_st), print_time(time.time() - self.step_timer), float(self.lr)] print(msg % tuple(vals)) self.step += 1 ret = dict([('cost', float(cost)), ('lr', float(self.lr)), ('time_step', float(g_ed - g_st)), ('whole_time', float(whole_time))]+list(zip(self.prop_names, rvals))) return ret
bsd-3-clause
-7,551,729,599,220,333,000
36.103825
100
0.508837
false
3.904543
false
false
false
jamesiter/JimV-C
jimvc/api/snapshot.py
1
12999
#!/usr/bin/env python # -*- coding: utf-8 -*- from math import ceil import requests from flask import Blueprint, url_for from flask import request import json import jimit as ji import os from jimvc.api.base import Base from jimvc.models import Guest, Config, Disk from jimvc.models import Snapshot, SnapshotDiskMapping from jimvc.models import OSTemplateImage from jimvc.models import Utils from jimvc.models import Rules from jimvc.models import OSTemplateImageKind __author__ = 'James Iter' __date__ = '2018/4/10' __contact__ = 'james.iter.cn@gmail.com' __copyright__ = '(c) 2018 by James Iter.' blueprint = Blueprint( 'api_snapshot', __name__, url_prefix='/api/snapshot' ) blueprints = Blueprint( 'api_snapshots', __name__, url_prefix='/api/snapshots' ) snapshot_base = Base(the_class=Snapshot, the_blueprint=blueprint, the_blueprints=blueprints) @Utils.dumps2response def r_create(): args_rules = [ Rules.GUEST_UUID.value ] if 'label' in request.json: args_rules.append( Rules.LABEL.value, ) try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ji.Check.previewing(args_rules, request.json) snapshot = Snapshot() guest = Guest() guest.uuid = request.json.get('guest_uuid') guest.get_by('uuid') snapshot.label = request.json.get('label', '') snapshot.status = guest.status snapshot.guest_uuid = guest.uuid snapshot.snapshot_id = '_'.join(['tmp', ji.Common.generate_random_code(length=8)]) snapshot.parent_id = '-' snapshot.progress = 0 snapshot.create() snapshot.get_by('snapshot_id') message = { '_object': 'snapshot', 'action': 'create', 'uuid': guest.uuid, 'node_id': guest.node_id, 'passback_parameters': {'id': snapshot.id} } Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) ret['data'] = snapshot.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_update(snapshot_id): snapshot = Snapshot() args_rules = [ Rules.SNAPSHOT_ID.value ] if 'label' in request.json: args_rules.append( Rules.LABEL.value, ) if args_rules.__len__() < 2: ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret request.json['snapshot_id'] = snapshot_id try: ji.Check.previewing(args_rules, request.json) snapshot.snapshot_id = request.json.get('snapshot_id') snapshot.get_by('snapshot_id') snapshot.label = request.json.get('label', snapshot.label) snapshot.update() snapshot.get() ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = snapshot.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_get(snapshots_id): return snapshot_base.get(ids=snapshots_id, ids_rule=Rules.SNAPSHOTS_ID.value, by_field='snapshot_id') @Utils.dumps2response def r_get_by_filter(): return snapshot_base.get_by_filter() @Utils.dumps2response def r_content_search(): return snapshot_base.content_search() @Utils.dumps2response def r_delete(snapshots_id): args_rules = [ Rules.SNAPSHOTS_ID.value ] try: ji.Check.previewing(args_rules, {'snapshots_id': snapshots_id}) snapshot = Snapshot() guest = Guest() # 检测所指定的 快照 都存在 for snapshot_id in snapshots_id.split(','): snapshot.snapshot_id = snapshot_id snapshot.get_by('snapshot_id') guest.uuid = snapshot.guest_uuid guest.get_by('uuid') # 执行删除操作 for snapshot_id in snapshots_id.split(','): snapshot.snapshot_id = snapshot_id snapshot.get_by('snapshot_id') guest.uuid = snapshot.guest_uuid guest.get_by('uuid') message = { '_object': 'snapshot', 'action': 'delete', 'uuid': snapshot.guest_uuid, 'snapshot_id': snapshot.snapshot_id, 'node_id': guest.node_id, 'passback_parameters': {'id': snapshot.id} } Utils.emit_instruction(message=json.dumps(message)) # 删除创建失败的 快照 if snapshot.progress == 255: SnapshotDiskMapping.delete_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot.snapshot_id])) snapshot.delete() else: snapshot.progress = 254 snapshot.update() ret = dict() ret['state'] = ji.Common.exchange_state(20000) return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_revert(snapshot_id): args_rules = [ Rules.SNAPSHOT_ID.value ] try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id}) snapshot = Snapshot() guest = Guest() snapshot.snapshot_id = snapshot_id snapshot.get_by('snapshot_id') snapshot.progress = 253 snapshot.update() snapshot.get() guest.uuid = snapshot.guest_uuid guest.get_by('uuid') message = { '_object': 'snapshot', 'action': 'revert', 'uuid': guest.uuid, 'snapshot_id': snapshot.snapshot_id, 'node_id': guest.node_id, 'passback_parameters': {'id': snapshot.id} } Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) ret['data'] = snapshot.__dict__ return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_get_disks(snapshot_id): args_rules = [ Rules.SNAPSHOT_ID.value ] try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = list() ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id}) rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot_id])) for row in rows: ret['data'].append(row['disk_uuid']) return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_get_snapshots_by_disks_uuid(disks_uuid): args_rules = [ Rules.UUIDS.value ] try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = list() ji.Check.previewing(args_rules, {'uuids': disks_uuid}) rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['disk_uuid', 'in', disks_uuid])) ret['data'] = rows return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_convert_to_os_template_image(snapshot_id, disk_uuid): args_rules = [ Rules.SNAPSHOT_ID.value, Rules.DISK_UUID.value, Rules.LABEL.value ] try: ret = dict() ret['state'] = ji.Common.exchange_state(20000) ji.Check.previewing(args_rules, {'snapshot_id': snapshot_id, 'disk_uuid': disk_uuid, 'label': request.json.get('label')}) rows, _ = SnapshotDiskMapping.get_by_filter(filter_str=':'.join(['snapshot_id', 'eq', snapshot_id])) disks_uuid = list() for row in rows: disks_uuid.append(row['disk_uuid']) if disk_uuid not in disks_uuid: ret['state'] = ji.Common.exchange_state(40401) ret['state']['sub']['zh-cn'] = ''.join([ret['state']['sub']['zh-cn'], u': 未在快照: ', snapshot_id, u' 中找到磁盘:', disk_uuid]) return ret config = Config() config.id = 1 config.get() snapshot = Snapshot() os_template_image = OSTemplateImage() guest = Guest() disk = Disk() snapshot.snapshot_id = snapshot_id snapshot.get_by('snapshot_id') snapshot.progress = 252 guest.uuid = snapshot.guest_uuid guest.get_by('uuid') disk.uuid = disk_uuid disk.get_by('uuid') os_template_image.id = guest.os_template_image_id os_template_image.get() image_name = '_'.join([snapshot.snapshot_id, disk.uuid]) + '.' + disk.format os_template_image.id = 0 os_template_image.label = request.json.get('label') os_template_image.path = '/'.join([os.path.dirname(os_template_image.path), image_name]) os_template_image.kind = OSTemplateImageKind.custom.value os_template_image.progress = 0 os_template_image.create_time = ji.Common.tus() if os_template_image.exist_by('path'): ret['state'] = ji.Common.exchange_state(40901) ret['state']['sub']['zh-cn'] = ''.join([ret['state']['sub']['zh-cn'], ': ', os_template_image.path]) return ret os_template_image.create() os_template_image.get_by('path') message = { '_object': 'snapshot', 'action': 'convert', 'uuid': disk.guest_uuid, 'snapshot_id': snapshot.snapshot_id, 'storage_mode': config.storage_mode, 'dfs_volume': config.dfs_volume, 'node_id': disk.node_id, 'snapshot_path': disk.path, 'template_path': os_template_image.path, 'os_template_image_id': os_template_image.id, 'passback_parameters': {'id': snapshot.snapshot_id, 'os_template_image_id': os_template_image.id} } Utils.emit_instruction(message=json.dumps(message, ensure_ascii=False)) snapshot.update() return ret except ji.PreviewingError, e: return json.loads(e.message) @Utils.dumps2response def r_show(): args = list() page = int(request.args.get('page', 1)) page_size = int(request.args.get('page_size', 10)) keyword = request.args.get('keyword', None) order_by = request.args.get('order_by', None) order = request.args.get('order', 'desc') if page is not None: args.append('page=' + page.__str__()) if page_size is not None: args.append('page_size=' + page_size.__str__()) if keyword is not None: args.append('keyword=' + keyword.__str__()) if order_by is not None: args.append('order_by=' + order_by) if order is not None: args.append('order=' + order) snapshots_url = url_for('api_snapshots.r_get_by_filter', _external=True) if keyword is not None: snapshots_url = url_for('api_snapshots.r_content_search', _external=True) if args.__len__() > 0: snapshots_url = snapshots_url + '?' + '&'.join(args) snapshots_ret = requests.get(url=snapshots_url, cookies=request.cookies) snapshots_ret = json.loads(snapshots_ret.content) guests_uuid = list() for snapshot in snapshots_ret['data']: guests_uuid.append(snapshot['guest_uuid']) guests, _ = Guest.get_by_filter(filter_str='uuid:in:' + ','.join(guests_uuid)) # Guest uuid 与 Guest 的映射 guests_mapping_by_uuid = dict() for guest in guests: guests_mapping_by_uuid[guest['uuid']] = guest for i, snapshot in enumerate(snapshots_ret['data']): if snapshot['guest_uuid'].__len__() == 36: snapshots_ret['data'][i]['guest'] = guests_mapping_by_uuid[snapshot['guest_uuid']] last_page = int(ceil(1 / float(page_size))) page_length = 5 pages = list() if page < int(ceil(page_length / 2.0)): for i in range(1, page_length + 1): pages.append(i) if i == last_page or last_page == 0: break elif last_page - page < page_length / 2: for i in range(last_page - page_length + 1, last_page + 1): if i < 1: continue pages.append(i) else: for i in range(page - page_length / 2, page + int(ceil(page_length / 2.0))): pages.append(i) if i == last_page or last_page == 0: break ret = dict() ret['state'] = ji.Common.exchange_state(20000) ret['data'] = { 'snapshots': snapshots_ret['data'], 'paging': snapshots_ret['paging'], 'guests_mapping_by_uuid': guests_mapping_by_uuid, 'page': page, 'page_size': page_size, 'keyword': keyword, 'pages': pages, 'last_page': last_page, 'order_by': order_by, 'order': order } return ret
gpl-3.0
4,323,306,669,651,697,000
25.970772
118
0.571639
false
3.573721
true
false
false
AlienCowEatCake/ImageViewer
src/ThirdParty/Exiv2/exiv2-0.27.3-Source/tests/bugfixes/redmine/test_issue_540.py
3
5145
# -*- coding: utf-8 -*- import system_tests class PrettyPrintXmp(metaclass=system_tests.CaseMeta): url = "http://dev.exiv2.org/issues/540" filename = "$data_path/exiv2-bug540.jpg" commands = ["$exiv2 -u -px $filename"] stdout = ["""Xmp.dc.creator XmpSeq 1 Ian Britton Xmp.dc.description LangAlt 1 lang="x-default" Communications Xmp.dc.rights LangAlt 1 lang="x-default" ian Britton - FreeFoto.com Xmp.dc.source XmpText 12 FreeFoto.com Xmp.dc.subject XmpBag 1 Communications Xmp.dc.title LangAlt 1 lang="x-default" Communications Xmp.exif.ApertureValue XmpText 3 F16 Xmp.exif.BrightnessValue XmpText 8 0.260156 Xmp.exif.ColorSpace XmpText 1 sRGB Xmp.exif.DateTimeOriginal XmpText 20 2002:07:13 15:58:28 Xmp.exif.ExifVersion XmpText 4 2.00 Xmp.exif.ExposureBiasValue XmpText 6 -13/20 EV Xmp.exif.ExposureProgram XmpText 1 Shutter priority Xmp.exif.FNumber XmpText 3 F0.6 Xmp.exif.FileSource XmpText 1 (0) Xmp.exif.FlashpixVersion XmpText 4 1.00 Xmp.exif.FocalLength XmpText 3 0.0 mm Xmp.exif.FocalPlaneResolutionUnit XmpText 1 inch Xmp.exif.FocalPlaneXResolution XmpText 8 12.0508 Xmp.exif.FocalPlaneYResolution XmpText 8 12.0508 Xmp.exif.GPSLatitude XmpText 13 54,59.380000N Xmp.exif.GPSLongitude XmpText 12 1,54.850000W Xmp.exif.GPSMapDatum XmpText 5 WGS84 Xmp.exif.GPSTimeStamp XmpText 20 2002:07:13 14:58:24 Xmp.exif.GPSVersionID XmpText 7 2.0.0.0 Xmp.exif.ISOSpeedRatings XmpSeq 1 0 Xmp.exif.MeteringMode XmpText 1 Multi-segment Xmp.exif.PixelXDimension XmpText 4 2400 Xmp.exif.PixelYDimension XmpText 4 1600 Xmp.exif.SceneType XmpText 1 (0) Xmp.exif.SensingMethod XmpText 1 One-chip color area Xmp.exif.ShutterSpeedValue XmpText 10 1/724 s Xmp.pdf.Keywords XmpText 14 Communications Xmp.photoshop.AuthorsPosition XmpText 12 Photographer Xmp.photoshop.CaptionWriter XmpText 11 Ian Britton Xmp.photoshop.Category XmpText 3 BUS Xmp.photoshop.City XmpText 1 Xmp.photoshop.Country XmpText 14 Ubited Kingdom Xmp.photoshop.Credit XmpText 11 Ian Britton Xmp.photoshop.DateCreated XmpText 10 2002-06-20 Xmp.photoshop.Headline XmpText 14 Communications Xmp.photoshop.State XmpText 1 Xmp.photoshop.SupplementalCategories XmpBag 1 Communications Xmp.photoshop.Urgency XmpText 1 5 Xmp.tiff.Artist XmpText 11 Ian Britton Xmp.tiff.BitsPerSample XmpSeq 1 8 Xmp.tiff.Compression XmpText 1 6 Xmp.tiff.Copyright LangAlt 1 lang="x-default" ian Britton - FreeFoto.com Xmp.tiff.ImageDescription LangAlt 1 lang="x-default" Communications Xmp.tiff.ImageLength XmpText 3 400 Xmp.tiff.ImageWidth XmpText 3 600 Xmp.tiff.Make XmpText 8 FUJIFILM Xmp.tiff.Model XmpText 12 FinePixS1Pro Xmp.tiff.Orientation XmpText 1 top, left Xmp.tiff.ResolutionUnit XmpText 1 inch Xmp.tiff.Software XmpText 19 Adobe Photoshop 7.0 Xmp.tiff.XResolution XmpText 5 300 Xmp.tiff.YCbCrPositioning XmpText 1 Co-sited Xmp.tiff.YResolution XmpText 5 300 Xmp.xmp.CreateDate XmpText 20 2002-07-13T15:58:28Z Xmp.xmp.ModifyDate XmpText 20 2002-07-19T13:28:10Z Xmp.xmpBJ.JobRef XmpText 0 type="Bag" Xmp.xmpBJ.JobRef[1] XmpText 0 type="Struct" Xmp.xmpBJ.JobRef[1]/stJob:name XmpText 12 Photographer Xmp.xmpMM.DocumentID XmpText 58 adobe:docid:photoshop:84d4dba8-9b11-11d6-895d-c4d063a70fb0 Xmp.xmpRights.Marked XmpText 4 True Xmp.xmpRights.WebStatement XmpText 16 www.freefoto.com """] stderr = [""] retval = [0]
gpl-3.0
4,314,905,811,117,422,000
62.518519
118
0.515063
false
3.398283
false
false
false
fabiking/plugin.video.Mfabiking
resources/tools/livesoccertv.py
1
9605
# -*- coding: utf-8 -*- #------------------------------------------------------------ # Parser de LiveSoccerTV para PalcoTV # Version 0.1 (05.05.2014) #------------------------------------------------------------ # License: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #------------------------------------------------------------ import xbmc import xbmcgui import xbmcaddon import xbmcplugin import plugintools from __main__ import * addonName = xbmcaddon.Addon().getAddonInfo("name") addonVersion = xbmcaddon.Addon().getAddonInfo("version") addonId = xbmcaddon.Addon().getAddonInfo("id") addonPath = xbmcaddon.Addon().getAddonInfo("path") playlists = xbmc.translatePath(os.path.join('special://userdata/playlists', '')) temp = xbmc.translatePath(os.path.join('special://userdata/playlists/tmp', '')) def lstv0(params): plugintools.log("[%s %s] LiveSoccerTV " % (addonName, addonVersion)) thumbnail = params.get("thumbnail") fanart = params.get("fanart") url = params.get("url") data = gethttp_referer_headers(url,url) today0 = plugintools.find_single_match(data, '<a class="open-calendar">(.*?)</a>') today1 = plugintools.find_single_match(data, '<a class="open-calendar navbar_cal_current-data">(.*?)</a>') today0 = diasem(today0) plugintools.add_item(action="", title='[COLOR lightyellow][B]LiveSoccerTV[/B] / [COLOR lightgreen][I]'+today0+' '+today1+'[/I][/COLOR]', url = "", thumbnail = thumbnail , fanart = fanart, folder = False, isPlayable = False) ligas = plugintools.find_multiple_matches(data, '<div class="clearfix b_trim">(.*?)<div class="b_league -low -blue-bg -accordion -white-border-bottom">') liga_logo = plugintools.find_multiple_matches(data, 'class="fll b_league_logo"><img src="([^"]+)') print 'liga_logo',liga_logo i=0 for entry in ligas: cabecera = plugintools.find_single_match(entry, '<span class="fll b_league_name b_trim_inner">(.*?)</span>') try: ligalogo = liga_logo[i] except: ligalogo = thumbnail #plugintools.log("cabecera= "+cabecera) cabecera=cabecera.replace("&#039;", "'") plugintools.add_item(action="", title='[COLOR orange][B]'+cabecera+'[/B][/COLOR]', fanart=fanart, thumbnail=ligalogo, url="", folder=False, isPlayable=False) matches = plugintools.find_multiple_matches(entry, '<div class="b_match_info-elem-wrapper">(.*?)class="b_match_all-link"></a></div>') i = i + 1 for entry in matches: url = 'http'+plugintools.find_single_match(entry, 'href="http([^"]+)') teams = plugintools.find_multiple_matches(entry, '<span>(.*?)</span>') goals = plugintools.find_multiple_matches(entry, '<div class="b_match_count">(.*?)</div>') chs = plugintools.find_single_match(entry, '<div class="b_match_channel_links">(.*?)</div>').strip() chs = chs.split(",") bcasters = "" for item in chs: if bcasters == "": bcasters = item else: bcasters = bcasters + ", " + item if chs[0] == "": bcasters = 'Sin emisión en España' print bcasters bcasters = bcasters.replace("\t", "") if len(goals) == 2: match_title = '[COLOR white]'+teams[0] + '[COLOR lightyellow][B] '+goals[0]+'[/COLOR][/B][COLOR white] vs ' + teams[1]+' [/COLOR][COLOR lightyellow][B]'+goals[1]+'[/COLOR][/B]' else: match_title = '[COLOR white]'+teams[0] + ' vs ' + teams[1]+'[/COLOR]' match_title=match_title.replace("&#039;", "'") plugintools.add_item(action="lstv1", title=match_title, url=url, thumbnail=ligalogo, extra=bcasters, fanart=fanart, folder=False, isPlayable=False) def lstv1(params): menu_selec = ['[COLOR cyan]'+params.get("extra")+'[/COLOR]', "Ver cobertura internacional", "Estadísticas en vivo"] dia_lstv = plugintools.selector(menu_selec, params.get("title")) if dia_lstv == 1: lstv2() if dia_lstv == 2: lstv3() def lstv2(): params = plugintools.get_params() url = params.get("url") data = gethttp_referer_headers(url,url) match_coverage = plugintools.find_single_match(data, 'International Coverage(.*?)<div id="match-lineups" class="match-info hidden">') country_match = plugintools.find_multiple_matches(match_coverage, '<div class="row">(.*?)<div class="b_channel col-xs-12 -low b_trim -international">') for entry in country_match: plugintools.log("entry= "+entry) country = plugintools.find_single_match(entry, '<div class="fll b_channel_name -broadcast -country b_trim_inner">(.*?)</div>').replace("&nbsp;", "").strip() if country != "": channels = "" channel = plugintools.find_multiple_matches(entry, '<div class="fll b_channel_name -broadcast b_trim_inner">(.*?)</div>') for item in channel: if channels == "": channels = item else: channels = channels + ', '+item lstv_file = open(temp + "lstv.tmp", "a") lstv_file.write('[COLOR gold][B]'+country+'[/B][/COLOR][COLOR white]: '+channels+'[/COLOR]\n') lstv_file.close() params["url"] = temp + 'lstv.tmp' txt_reader(params) def lstv3(): params=plugintools.get_params() title = params.get("title").replace("[COLOR white]", "[COLOR lightgreen]") team_a = title.split(" vs ")[0] team_b = title.split(" vs ")[1] url = 'http://m.livesoccertv.com/match/1709586/olympiakos-piraeus-vs-bayern-m-nchen/' data = gethttp_referer_headers(url,url) lstv_file = open(temp + "lstv_stats.tmp", "wb") lstv_file.write("\n[COLOR red]"+title+"[/COLOR]\n") lstv_file.write("\n[COLOR gold]TITULARES[/COLOR]\n") stats = plugintools.find_single_match(data, '<span>Stats</span>(.*?)Substitutes</h3>') players_a = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -right">(.*?)</div>') players_b = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -left">(.*?)</div>') i = 0 while i < len(players_a): players_a[i]=players_a[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip() players_b[i]=players_b[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip() spaces = 80 - len(players_b[i]) plugintools.log("longitud_texto= "+str(len(players_a[i]))) plugintools.log("espacios que faltan= "+str(spaces)) tabulador = "" j = spaces k = 0 while k <= j: tabulador = tabulador + "..." k = k + 1 line_player = players_b[i]+tabulador+players_a[i]+'\n' lstv_file.write(line_player) print line_player i = i + 1 lstv_file.write("\n\n[COLOR gold]SUPLENTES[/COLOR]\n") stats = plugintools.find_single_match(data, 'Substitutes</h3>(.*?)<div id="match-stats"') players_a = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -right">(.*?)</div>') players_b = plugintools.find_multiple_matches(stats, '<div class="fll b_lineup_players b_trim_inner -left">(.*?)</div>') i = 0 while i < len(players_a): players_a[i]=players_a[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip() players_b[i]=players_b[i].replace("</span>", "[/COLOR] ").replace('<span class="b_lineup_number">', '[COLOR lightyellow]').rstrip() spaces = 80 - len(players_b[i]) tabulador = "" j = spaces k = 0 while k <= j: tabulador = tabulador + "..." k = k + 1 line_player = players_b[i]+tabulador+players_a[i]+'\n' lstv_file.write(line_player) print line_player i = i + 1 lstv_file.close() params["url"] = temp + 'lstv_stats.tmp' txt_reader(params) def gethttp_referer_headers(url,referer): plugintools.modo_vista("tvshows");request_headers=[] request_headers.append(["User-Agent","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.65 Safari/537.31"]) request_headers.append(["Referer", referer]) body,response_headers = plugintools.read_body_and_headers(url, headers=request_headers); try: r='\'set-cookie\',\s\'([^;]+.)';jar=plugintools.find_single_match(str(response_headers),r);jar=getjad(jar); except: pass try: r='\'location\',\s\'([^\']+)';loc=plugintools.find_single_match(str(response_headers),r); except: pass if loc: request_headers.append(["Referer",url]); if jar: request_headers.append(["Cookie",jar]);#print jar body,response_headers=plugintools.read_body_and_headers(loc,headers=request_headers); try: r='\'set-cookie\',\s\'([^;]+.)';jar=plugintools.find_single_match(str(response_headers),r);jar=getjad(jar); except: pass plugintools.modo_vista("tvshows") return body def diasem(dia): if dia == "Monday": dia = "Lun" elif dia == "Tuesday": dia = "Mar" elif dia == "Wednesday": dia = "Mié" elif dia == "Thursday": dia = "Jue" elif dia == "Friday": dia = "Vie" elif dia == "Saturday": dia = "Sáb" elif dia == "Sunday": dia = "Dom" return dia
gpl-2.0
-7,714,623,710,669,975,000
46.058824
227
0.587083
false
3.276451
false
false
false
mar29th/ring
ring/connection.py
1
8091
# Copyright 2016 Douban Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import socket import os import threading import cPickle from ring.connection_impl import Again from ring.constants import ( TYPE_ACTIVATE_SEND, TYPE_ACTIVATE_RECV, BACKLOG, TYPE_ERROR, TYPE_CLOSED, TYPE_FINALIZE, TYPE_CONNECT_SUCCESS, ERR_CONNRESET ) from ring.events import Mailbox from ring.poller import READ from ring.puller import PullerConnectionImpl from ring.pusher import PusherConnectionImpl from ring.replier import ReplierConnectionImpl from ring.requester import RequesterConnectionImpl from ring.utils import RingError, raise_exc_info _idle = 1 _open = 1 << 1 _closing = 1 << 2 _closed = 1 << 3 REPLIER = 1 REQUESTER = 2 PULLER = 3 PUSHER = 5 NONBLOCK = 1 POLLIN = 1 POLLOUT = 1 << 1 class ConnectionError(RingError): def __init__(self, errno): super(ConnectionError, self).__init__(self, os.strerror(errno)) self.errno = errno class ConnectionInUse(RingError): def __init__(self): super(ConnectionInUse, self).__init__('Connection in use') class ConnectionClosedError(RingError): def __init__(self): super(ConnectionClosedError, self).__init__('Socket closed') class Connection(object): def __init__(self, type, ctx): self._socket = None self._type = type self._state = _idle self._context = ctx self._target_addr = None self._target_port = None self._bound_addr = None self._bound_port = None self._mailbox = Mailbox() self._lock = threading.RLock() def bind(self, target): if self._state & (_closing | _closed): raise ConnectionClosedError if self._state != _idle: raise ConnectionInUse if not self._type & (REPLIER | PULLER): raise NotImplementedError('Bind is not applicable to such type of socket') self._bound_addr = target[0] self._bound_port = target[1] self._initialize_socket() self._state = _open self._socket.bind(target) self._socket.listen(BACKLOG) self._initialize_impl() def connect(self, target): if self._state & (_closing | _closed): raise ConnectionClosedError if self._state != _idle: raise ConnectionInUse if not self._type & (REQUESTER | PUSHER): raise NotImplementedError('Connect is not applicable to such type of socket') self._target_addr = target[0] self._target_port = target[1] self._state = _open self._initialize_socket() self._initialize_impl() self._impl.connect(target) self._process_commands(None) def close(self): if self._state != _open: raise ConnectionClosedError self._impl.close() self._context.reaper.register( self._mailbox.waker_fd, READ, lambda fd, events: self._process_commands(0)) self._state = _closing def _initialize_socket(self): self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._socket.setblocking(0) if self._type & (REPLIER | PULLER): self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) def _initialize_impl(self): if self._type == REPLIER: self._impl = ReplierConnectionImpl(self._socket, self._context, self._mailbox) elif self._type == REQUESTER: self._impl = RequesterConnectionImpl(self._socket, self._context, self._mailbox) elif self._type == PULLER: self._impl = PullerConnectionImpl(self._socket, self._context, self._mailbox) elif self._type == PUSHER: self._impl = PusherConnectionImpl(self._socket, self._context, self._mailbox) else: raise RuntimeError('Type not implemented') def _process_commands(self, timeout): while 1: try: result = self._mailbox.recv(timeout) except Again: return else: cmd = result.command if cmd == TYPE_ACTIVATE_SEND: self._impl.activate_send(*result.args) elif cmd == TYPE_ACTIVATE_RECV: self._impl.activate_recv(*result.args) elif cmd == TYPE_CONNECT_SUCCESS: # Nothing to be done. We're just attempting to block here. pass elif cmd == TYPE_ERROR: self._impl.connection_close(*result.args) if not getattr(result.args[1][1], 'errno', -1) in ERR_CONNRESET: # Only raise the exception when the error is not connection reset raise_exc_info(result.args[1]) elif cmd == TYPE_CLOSED: self._impl.connection_close(*result.args) elif cmd == TYPE_FINALIZE: self._impl.connection_finalize() self._connection_finalize() # Finalize event should break immediately as everything is closed. break else: raise RuntimeError('Received undefined command %s' % (cmd,)) # Rerun. Set timeout to 0. timeout = 0 def _connection_finalize(self): self._socket.close() self._context.reaper.unregister(self._mailbox.waker_fd) self._mailbox.close() self._state = _closed def getsockname(self): if self._state != _open: raise ConnectionClosedError return self._socket.getsockname() def poll(self, events): return (POLLIN & events & self._impl.recv_available()) | \ (POLLOUT & events & self._impl.send_available()) << 1 def recv(self, flags=0): if self._state != _open: raise ConnectionClosedError # Process once self._process_commands(0) # Receive once try: return self._impl.recv() except Again: if not flags & NONBLOCK: # If the connection should block, wait until recv is activated pass else: # If user wants nonblocking send, just raise Again to user raise # Let's wait while 1: self._process_commands(None) try: return self._impl.recv() except Again: continue def send(self, data, flags=0): if self._state != _open: raise ConnectionClosedError # Process once self._process_commands(0) # Send once try: self._impl.send(data) except Again: if not flags & NONBLOCK: # If the connection should block, wait until send is activated pass else: # If user wants nonblocking send, just raise Again to user raise # Let's wait while 1: self._process_commands(None) try: self._impl.send(data) break except Again: continue def recv_pyobj(self, flags=0): return cPickle.loads(self.recv(flags=flags)) def send_pyobj(self, data, flags=0): self.send(cPickle.dumps(data), flags=flags) __all__ = ['Connection', 'REPLIER', 'REQUESTER', 'PULLER', 'PUSHER', 'NONBLOCK']
apache-2.0
-2,285,787,492,178,655,700
30.119231
92
0.577555
false
4.283219
false
false
false
liquidinstruments/pymoku
pymoku/_waveform_generator.py
1
66353
import math import logging import warnings from pymoku._instrument import to_reg_unsigned from pymoku._instrument import from_reg_unsigned from pymoku._instrument import to_reg_signed from pymoku._instrument import from_reg_signed from pymoku._instrument import deprecated from pymoku._instrument import MokuInstrument from pymoku._instrument import needs_commit from pymoku._instrument import ValueOutOfRangeException from pymoku._instrument import DAC_SMP_RATE from pymoku import _utils from pymoku._trigger import Trigger from pymoku._sweep_generator import SweepGenerator warnings.simplefilter('always', DeprecationWarning) log = logging.getLogger(__name__) REG_BASE_MOD_0 = 43 REG_BASE_MOD_1 = 60 REG_BASE_WAV_0 = 80 REG_BASE_WAV_1 = 104 REG_GATETHRESH_L_CH1 = 76 REG_GATETHRESH_H_CH1 = 77 REG_GATETHRESH_L_CH2 = 78 REG_GATETHRESH_H_CH2 = 79 _WG_WAVE_SINE = 0 _WG_WAVE_SQUARE = 1 _WG_MOD_NONE = 0 _WG_MOD_AMPL = 1 _WG_MOD_FREQ = 2 _WG_MOD_PHASE = 4 _WG_MODSOURCE_INT = 0 _WG_MODSOURCE_ADC = 1 _WG_MODSOURCE_DAC = 2 _WG_FREQSCALE = 1.0e9 / 2**64 _WG_FREQSCALE_SQR = 1.0e9 / 2**48 _WG_PERIODSCALE_SQR = 2**48 - 1 _WG_RISESCALE = 2**24 _WG_MAX_RISE = 1.0 / (2 ** 39 - 1) _WG_TIMESCALE = 1.0 / (2**32 - 1) # Doesn't wrap _WG_MOD_FREQ_MAX = 62.5e6 _WG_MOD_DEPTH_MAX = 2.0 ** 31 - 1 # 100% modulation depth in bits _WG_TRIG_ADC1 = 0 _WG_TRIG_ADC2 = 1 _WG_TRIG_DAC1 = 2 _WG_TRIG_DAC2 = 3 _WG_TRIG_EXT = 4 _WG_TRIG_INTER = 5 _WG_MOD_ADC1 = 0 _WG_MOD_ADC2 = 1 _WG_MOD_DAC1 = 2 _WG_MOD_DAC2 = 3 _WG_MOD_INTER = 4 _WG_MOD_GATE = 5 _WG_GATE_ADC = 0 _WG_GATE_DAC = 1 _WG_GATE_SWEEP = 2 _WG_GATE_EXT = 3 _WG_TRIG_MODE_OFF = 0 _WG_TRIG_MODE_GATE = 1 _WG_TRIG_MODE_START = 2 _WG_TRIG_MODE_NCYCLE = 3 _WG_TRIG_MODE_SWEEP = 4 _WG_TRIGLVL_ADC_MAX = 5.0 _WG_TRIGLVL_ADC_MIN = -5.0 _WG_TRIGLVL_DAC_MAX = 1.0 _WG_TRIGLVL_DAC_MIN = -1.0 class BasicWaveformGenerator(MokuInstrument): """ .. automethod:: pymoku.instruments.WaveformGenerator.__init__ """ def __init__(self): """ Create a new WaveformGenerator instance, ready to be attached to a Moku.""" super(BasicWaveformGenerator, self).__init__() self._register_accessors(_wavegen_reg_handlers) self.id = 4 self.type = "signal_generator" self._sweep1 = SweepGenerator(self, REG_BASE_WAV_0 + 3) self._sweep2 = SweepGenerator(self, REG_BASE_WAV_1 + 3) self.enable_reset_ch1 = False self.enable_reset_ch2 = False @needs_commit def set_defaults(self): super(BasicWaveformGenerator, self).set_defaults() self.enable_ch1 = True self.enable_ch2 = True self.out1_amplitude = 0 self.out2_amplitude = 0 self.adc1_statuslight = False self.adc2_statuslight = False # Init channel sweep gens: self._set_sweepgenerator(self._sweep1, 0, 0, 0, 0, 0, 0, 0) self._set_sweepgenerator(self._sweep2, 0, 0, 0, 0, 0, 0, 0) # Disable inputs on hardware that supports it self.en_in_ch1 = True self.en_in_ch2 = True # Configure front end: self._set_frontend(channel=1, fiftyr=True, atten=False, ac=False) self._set_frontend(channel=2, fiftyr=True, atten=False, ac=False) def _set_sweepgenerator(self, sweepgen, waveform=None, waitfortrig=None, frequency=None, offset=None, logsweep=None, duration=None, holdlast=None): sweepgen.waveform = 2 sweepgen.stop = (2**64 - 1) sweepgen.direction = 0 if waitfortrig is not None: sweepgen.waitfortrig = waitfortrig if offset is not None: sweepgen.start = offset / 360.0 * (2**64 - 1) if frequency is not None: sweepgen.step = frequency / _WG_FREQSCALE if duration is not None: sweepgen.duration = duration * 125.0e6 if logsweep is not None: sweepgen.logsweep = logsweep if holdlast is not None: sweepgen.holdlast = holdlast @needs_commit def gen_sinewave(self, ch, amplitude, frequency, offset=0, phase=0.0): """ Generate a Sine Wave with the given parameters on the given channel. :type ch: int; {1,2} :param ch: Channel on which to generate the wave :type amplitude: float, [0.0,2.0] Vpp :param amplitude: Waveform peak-to-peak amplitude :type frequency: float, [0,250e6] Hz :param frequency: Frequency of the wave :type offset: float, [-1.0,1.0] Volts :param offset: DC offset applied to the waveform :type phase: float, [0-360] degrees :param phase: Phase offset of the wave """ _utils.check_parameter_valid('set', ch, [1, 2], 'output channel') _utils.check_parameter_valid( 'range', amplitude, [0.0, 2.0], 'sinewave amplitude', 'Volts') _utils.check_parameter_valid( 'range', frequency, [0, 250e6], 'sinewave frequency', 'Hz') _utils.check_parameter_valid( 'range', offset, [-1.0, 1.0], 'sinewave offset', 'Volts') _utils.check_parameter_valid( 'range', phase, [0, 360], 'sinewave phase', 'degrees') # Ensure offset does not cause signal to exceed allowable 2.0Vpp range upper_voltage = offset + (amplitude / 2.0) lower_voltage = offset - (amplitude / 2.0) if (upper_voltage > 1.0) or (lower_voltage < -1.0): raise ValueOutOfRangeException( "Sinewave offset limited by amplitude (max output " "range 2.0Vpp).") if ch == 1: self.enable_ch1 = True self._set_sweepgenerator( sweepgen=self._sweep1, frequency=frequency, offset=phase) self.amplitude_ch1 = amplitude self.offset_ch1 = offset self.waveform_type_ch1 = _WG_WAVE_SINE self.phase_dly_ch1 = (11 * frequency / 125e6) % 1 * 2**32 elif ch == 2: self.enable_ch2 = True self._set_sweepgenerator( sweepgen=self._sweep2, frequency=frequency, offset=phase) self.amplitude_ch2 = amplitude self.offset_ch2 = offset self.waveform_type_ch2 = _WG_WAVE_SINE self.phase_dly_ch2 = (11 * frequency / 125e6) % 1 * 2**32 @needs_commit def gen_squarewave(self, ch, amplitude, frequency, offset=0.0, duty=0.5, risetime=0.0, falltime=0.0, phase=0.0): """ Generate a Square Wave with given parameters on the given channel. :type ch: int; {1,2} :param ch: Channel on which to generate the wave :type amplitude: float, [0, 2.0] volts :param amplitude: Waveform peak-to-peak amplitude :type frequency: float, [0, 100e6] hertz :param frequency: Frequency of the wave :type offset: float, [-1.0, 1.0] volts :param offset: DC offset applied to the waveform :type duty: float, [0, 1.0] :param duty: Fractional duty cycle :type risetime: float, [0, 1.0] :param risetime: Fraction of a cycle taken for the waveform to rise :type falltime: float [0, 1.0] :param falltime: Fraction of a cycle taken for the waveform to fall :type phase: float, degrees 0-360 :param phase: Phase offset of the wave """ _utils.check_parameter_valid('set', ch, [1, 2], 'output channel') _utils.check_parameter_valid( 'range', amplitude, [0.0, 2.0], 'squarewave amplitude', 'Volts') _utils.check_parameter_valid( 'range', frequency, [0, 100e6], 'squarewave frequency', 'Hz') _utils.check_parameter_valid( 'range', offset, [-1.0, 1.0], 'squarewave offset', 'Volts') _utils.check_parameter_valid( 'range', duty, [0, 1.0], 'squarewave duty', 'cycles') _utils.check_parameter_valid( 'range', risetime, [0, 1.0], 'squarewave risetime', 'cycles') _utils.check_parameter_valid( 'range', falltime, [0, 1.0], 'squarewave falltime', 'cycles') _utils.check_parameter_valid( 'range', phase, [0, 360], 'squarewave phase', 'degrees') # Ensure offset does not cause signal to exceed allowable 2.0Vpp range upper_voltage = offset + (amplitude / 2.0) lower_voltage = offset - (amplitude / 2.0) if (upper_voltage > 1.0) or (lower_voltage < -1.0): raise ValueOutOfRangeException( "Squarewave offset limited by amplitude (max output " "range 2.0Vpp).") frequency = float(frequency) if duty < risetime: raise ValueOutOfRangeException( "Squarewave duty too small for given rise time.") elif duty + falltime > 1: raise ValueOutOfRangeException( "Squarewave duty and fall time too big.") # ensure duty cycle and fall/rise time combinations don't overflow if frequency != 0: minedgetime = 4.0e-9 * frequency if risetime < minedgetime: risetime = minedgetime log.warning( "WARNING: Risetime restricted to minimum value of 4 ns.") if falltime < minedgetime: falltime = minedgetime log.warning( "WARNING: Falltime restricted to minimum value of 4 ns.") if duty < minedgetime: duty = minedgetime log.warning("WARNING: Duty cycle restricted to %s" % duty) if duty > 1 - minedgetime: duty = 1 - minedgetime log.warning("WARNING: Duty cycle restricted to %s" % duty) if risetime > 1 - minedgetime: risetime = 1 - minedgetime log.warning("WARNING: Risetime restricted to maximum value.") if falltime > 1 - minedgetime: falltime = 1 - minedgetime log.warning("WARNING: Falltime restricted to maximum value.") else: falltime = _WG_MAX_RISE risetime = _WG_MAX_RISE # Set rise/fall rate and t0, t1 and t2 t0 = risetime t1 = duty t2 = duty + falltime phase_dly = 0 if ch == 1: self.waveform_type_ch1 = _WG_WAVE_SQUARE self.enable_ch1 = True self._set_sweepgenerator(sweepgen=self._sweep1, frequency=frequency, offset=phase, holdlast=0) self.amplitude_ch1 = amplitude self.offset_ch1 = offset # This is overdefined, but saves the FPGA doing a tricky division self.t0_ch1 = t0 self.t1_ch1 = t1 self.t2_ch1 = t2 self.riserate_ch1 = risetime self.fallrate_ch1 = -falltime self.phase_dly_ch1 = phase_dly elif ch == 2: self.waveform_type_ch2 = _WG_WAVE_SQUARE self.enable_ch2 = True self._set_sweepgenerator(sweepgen=self._sweep2, frequency=frequency, offset=phase, holdlast=0) self.amplitude_ch2 = amplitude self.offset_ch2 = offset self.t0_ch2 = t0 self.t1_ch2 = t1 self.t2_ch2 = t2 self.riserate_ch2 = risetime self.fallrate_ch2 = -falltime self.phase_dly_ch2 = phase_dly @needs_commit def gen_rampwave( self, ch, amplitude, frequency, offset=0, symmetry=0.5, phase=0.0): """ Generate a Ramp with the given parameters on the given channel. This is a wrapper around the Square Wave generator, using the *riserate* and *fallrate* parameters to form the ramp. :type ch: int; {1,2} :param ch: Channel on which to generate the wave :type amplitude: float, [0, 2.0] volts :param amplitude: Waveform peak-to-peak amplitude :type frequency: float, [0, 100e6] hertz :param frequency: Frequency of the wave :type offset: float, [-1.0, 1.0] volts :param offset: DC offset applied to the waveform :type symmetry: float, [0, 1.0] :param symmetry: Fraction of the cycle rising. :type phase: float, degrees [0, 360] :param phase: Phase offset of the wave """ _utils.check_parameter_valid('set', ch, [1, 2], 'output channel') _utils.check_parameter_valid( 'range', amplitude, [0.0, 2.0], 'rampwave amplitude', 'Volts') _utils.check_parameter_valid( 'range', frequency, [0, 100e6], 'rampwave frequency', 'Hz') _utils.check_parameter_valid( 'range', offset, [-1.0, 1.0], 'rampwave offset', 'cycles') _utils.check_parameter_valid( 'range', symmetry, [0, 1.0], 'rampwave symmetry', 'fraction') _utils.check_parameter_valid( 'range', phase, [0, 360], 'rampwave phase', 'degrees') # Ensure offset does not cause signal to exceed allowable 2.0Vpp range upper_voltage = offset + (amplitude / 2.0) lower_voltage = offset - (amplitude / 2.0) if (upper_voltage > 1.0) or (lower_voltage < -1.0): raise ValueOutOfRangeException( "Rampwave offset limited by amplitude " "(max output range 2.0Vpp).") self.gen_squarewave(ch, amplitude, frequency, offset=offset, duty=symmetry, risetime=symmetry, falltime=1 - symmetry, phase=phase) @needs_commit def sync_phase(self): """ Synchronize the phase of both output channels. The phase of both channels is reset to their respestive phase offset values. """ self.enable_reset_ch1 = True self.enable_reset_ch2 = True @needs_commit def gen_off(self, ch=None): """ Turn Waveform Generator output(s) off. The channel will be turned on when configuring the waveform type but can be turned off using this function. If *ch* is None (the default), both channels will be turned off, otherwise just the one specified by the argument. :type ch: int; {1,2} or None :param ch: Channel to turn off, or both. """ _utils.check_parameter_valid( 'set', ch, [1, 2], 'output channel', allow_none=True) if ch is None or ch == 1: self.enable_ch1 = False if ch is None or ch == 2: self.enable_ch2 = False class WaveformGenerator(BasicWaveformGenerator): """ Waveform Generator instrument object. To run a new Waveform Generator instrument, this should be instantiated and deployed via a connected :any:`Moku` object using :any: `deploy_instrument`. Alternatively, a pre-configured instrument object can be obtained by discovering an already running Waveform Generator instrument on a Moku:Lab device via :any:`discover_instrument`. .. automethod:: pymoku.instruments.WaveformGenerator.__init__ .. attribute:: type :annotation: = "signal_generator" Name of this instrument. """ def __init__(self): """ Create a new WaveformGenerator instance, ready to be attached to a Moku.""" super(WaveformGenerator, self).__init__() self._register_accessors(_wavegen_mod_reg_handlers) # Define any (non-register-mapped) properties that are used when # committing as a commit is called when the instrument is set running self.trig_volts_ch1 = 0.0 self.trig_volts_ch2 = 0.0 self._trigger1 = Trigger(self, 28) self._trigger2 = Trigger(self, 45) self._sweepmod1 = SweepGenerator(self, 34) self._sweepmod2 = SweepGenerator(self, 51) @needs_commit def set_defaults(self): super(WaveformGenerator, self).set_defaults() self._init_trig_modulation(1) self._init_trig_modulation(2) self.phasedly_en_ch1 = 1 self.phasedly_en_ch2 = 1 self.sine_trigdly_ch1 = 0 self.sine_trigdly_ch2 = 0 def _init_trig_modulation(self, ch): # initialise the state of all modules used in modulation/trigger/sweep # modes if ch == 1: # Set AM/FM/PM and sweep enable to zero: self.amod_enable_ch1 = False self.fmod_enable_ch1 = False self.pmod_enable_ch1 = False self.sweep_enable_ch1 = False # Default trigger module values: self._trigger1.trigtype = 0 self._trigger1.edge = 0 self._trigger1.pulsetype = 0 self._trigger1.hysteresis = 0 self._trigger1.timer = 0 self._trigger1.holdoff = 0 self._trigger1.auto_holdoff = 0 self._trigger1.ntrigger = 0 self._trigger1.ntrigger_mode = 0 self._trigger1.level = 0 self._trigger1.duration = 0 # Default modulating sweep generator values: self._sweepmod1.waveform = 0 self._sweepmod1.waitfortrig = 0 self._sweepmod1.holdlast = 0 self._sweepmod1.direction = 0 self._sweepmod1.logsweep = 0 self._sweepmod1.start = 0 self._sweepmod1.stop = 0 self._sweepmod1.step = 0 self._sweepmod1.duration = 0 # Trigger/modulation/gate source/threshold default values: self.trig_source_ch1 = _WG_TRIG_ADC1 self.mod_source_ch1 = _WG_MOD_ADC1 self.gate_thresh_ch1 = 0 self.mod_depth_ch1 = 0 # Default waveform sweep generator values that are touched in # modulation/trigger/sweep modes: self._sweep1.waitfortrig = 0 self._sweep1.duration = 0 self._sweep1.holdlast = 0 # Gated mode flag used to toggle amplitude division by 2 on # the FPGA self.gate_mode_ch1 = 0 # Trigger mode flag to enable calibration calculations in # _update_dependent_regs function self.trig_sweep_mode_ch1 = 0 # Phase delay flag, trig delay flag self.phasedly_en_ch1 = 1 self.sine_trigdly_ch1 = 0 else: # Set AM/FM/PM and sweep enable to zero: self.amod_enable_ch2 = False self.fmod_enable_ch2 = False self.pmod_enable_ch2 = False self.sweep_enable_ch2 = False # Default trigger module values: self._trigger2.trigtype = 0 self._trigger2.edge = 0 self._trigger2.pulsetype = 0 self._trigger2.hysteresis = 0 self._trigger2.timer = 0 self._trigger2.holdoff = 0 self._trigger2.auto_holdoff = 0 self._trigger2.ntrigger = 0 self._trigger2.ntrigger_mode = 0 self._trigger2.level = 0 self._trigger2.duration = 0 # Default modulating sweep generator values: self._sweepmod2.waveform = 0 self._sweepmod2.waitfortrig = 0 self._sweepmod2.holdlast = 0 self._sweepmod2.direction = 0 self._sweepmod2.logsweep = 0 self._sweepmod2.start = 0 self._sweepmod2.stop = 0 self._sweepmod2.step = 0 self._sweepmod2.duration = 0 # Trigger/modulation/gate source/threshold default values: self.trig_source_ch2 = _WG_TRIG_ADC2 self.mod_source_ch2 = _WG_MOD_ADC2 self.gate_thresh_ch2 = 0 self.mod_depth_ch2 = 0 # Default waveform sweep generator values that are touched in # modulation/trigger/sweep modes: self._sweep2.waitfortrig = 0 self._sweep2.duration = 0 self._sweep2.holdlast = 0 # Gated mode flag used to toggle amplitude division by 2 on # the FPGA self.gate_mode_ch2 = 0 # Trigger mode flag to enable calibration calculations in # _update_dependent_regs function self.trig_sweep_mode_ch2 = 0 # Phase delay flag, trig delay flag self.phasedly_en_ch2 = 1 self.sine_trigdly_ch2 = 0 @needs_commit @deprecated(category='param', message="'in' and 'out' trigger sources have been deprecated." " Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.") def set_trigger(self, ch, mode, ncycles=1, sweep_start_freq=None, sweep_end_freq=0, sweep_duration=1.0e-3, trigger_source='adc1', trigger_threshold=0.0, internal_trig_period=1.0, internal_trig_high=0.5): """ Configure gated, start, ncycle or sweep trigger mode on target channel. The trigger event can come from an ADC input channel, the opposite generated waveform, the external trigger input (for hardware that supports that) or a internally-generated clock of configurable period. The trigger event can be used in several different ways: - *gated*: The output waveform is only generated while the trigger is asserted - *start*: The output waveform is enabled once the trigger event fires - *ncycle*: The output waveform starts at a trigger event and completes the given number of cycles, before turning off and re-arming - *sweep*: The trigger event starts the waveform generation at the *sweep_start_freq*, before automatically sweeping the frequency to *sweep_end_freq* over the course of *sweep_duration* seconds. :type ch: int :param ch: target channel. :type mode: string, {'gated', 'start', 'ncycle', 'sweep', 'off'} :param mode: Select the mode in which the trigger is operated. :type ncycles: int, [1, 1e6] :param ncycles: integer number of signal repetitions in ncycle mode. :type sweep_start_freq: float, [0.0,250.0e6], hertz :param sweep_start_freq: starting sweep frequency, set to current waveform frequency if not specified. Value range may vary for different waveforms. :type sweep_end_freq: float, [0.0,250.0e6], hertz :param sweep_end_freq: finishing sweep frequency. Value range may vary for different waveforms. :type sweep_duration: float, [1.0e-3,1000.0], seconds :param sweep_duration: sweep duration in seconds. :type trigger_source: string {'adc1','adc2', 'dac1', 'dac2', 'external', 'internal', 'in', 'out'} :param trigger_source: defines which source should be used as triggering signal. In and out sources are deprecated. :type trigger_threshold: float, [-5, 5], volts :param trigger_threshold: The threshold value range dependes on the source and the attenution used. Values ranges might be less for different settings. :type internal_trig_period: float, [0,1e11], seconds :param internal_trig_period: period of the internal trigger clock, if used. :type internal_trig_high: float, [0,1e11], seconds :param internal_trig_high: High time of the internal trigger clock, if used. Must be less than the internal trigger period. """ _utils.check_parameter_valid('set', ch, [1, 2], 'output channel') _utils.check_parameter_valid( 'set', mode, ['gated', 'start', 'ncycle', 'sweep'], 'trigger mode') _utils.check_parameter_valid( 'set', trigger_source, ['adc1', 'adc2', 'dac1', 'dac2', 'external', 'internal', 'in', 'out'], 'trigger source') _utils.check_parameter_valid('range', ncycles, [1, 1e6], 'ncycles') _utils.check_parameter_valid( 'range', sweep_duration, [0.001, 1000.0], 'sweep duration', 'seconds') _utils.check_parameter_valid( 'range', internal_trig_period, [100.0e-9, 1000.0], 'internal trigger period', 'seconds') _utils.check_parameter_valid( 'range', internal_trig_high, [10.0e-9, 1000.0], 'internal trigger high time', 'seconds') if trigger_source in ['in', 'out']: warnings.warn( message="'in' and 'out' trigger sources have been deprecated. " "Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.", category=DeprecationWarning, stacklevel=1 ) # 'in' and 'out' trigger sources are deprecated sources. # Convert to adc/dac source type: if ch == 1: if trigger_source == 'in': trigger_source = 'adc1' elif trigger_source == 'out': trigger_source = 'dac2' if ch == 2: if trigger_source == 'in': trigger_source = 'adc2' elif trigger_source == 'out': trigger_source = 'dac1' # Can't use current channel as trigger mode source: if ch == 1 and trigger_source == 'dac1': raise ValueOutOfRangeException( "dac1 cannot be used as the trigger source for trigger " "mode on channel 1.") elif ch == 2 and trigger_source == 'dac2': raise ValueOutOfRangeException( "dac2 cannot be used as the trigger source for trigger " "mode on channel 2.") # Can't use modulation with trigger/sweep modes self.set_modulate_trig_off(ch) # Configure trigger and source settings: if ch == 1: _WG_TRIG_ADC = _WG_TRIG_ADC2 _WG_TRIG_DAC = _WG_TRIG_DAC1 else: _WG_TRIG_ADC = _WG_TRIG_ADC1 _WG_TRIG_DAC = _WG_TRIG_DAC2 _str_to_trigger_source = { 'adc1': _WG_TRIG_ADC1, 'adc2': _WG_TRIG_ADC2, 'dac1': _WG_TRIG_DAC1, 'dac2': _WG_TRIG_DAC2, 'external': _WG_TRIG_EXT, 'internal': _WG_TRIG_INTER } trigger_source = _utils.str_to_val(_str_to_trigger_source, trigger_source, 'trigger source') if trigger_source is _WG_TRIG_ADC: _utils.check_parameter_valid('range', trigger_threshold, [_WG_TRIGLVL_ADC_MIN, _WG_TRIGLVL_ADC_MAX], 'trigger threshold', 'Volts') elif trigger_source is _WG_TRIG_DAC: _utils.check_parameter_valid('range', trigger_threshold, [_WG_TRIGLVL_DAC_MIN, _WG_TRIGLVL_DAC_MAX], 'trigger threshold', 'Volts') # The internal trigger's duty cycle is only used in gated burst mode. # Duty cycle is limited such that the duty period is not # less than 8 ns and not greater than the trigger period minus 8 ns. if internal_trig_high > internal_trig_period: raise ValueOutOfRangeException( "Internal trigger high must be less" " than or equal to the internal trigger period.") if (internal_trig_period - internal_trig_high) <= 8.0e-9: internal_trig_high = internal_trig_period - 10.0e-9 if ch == 1: self._trigger1.trigtype = 0 self._trigger1.edge = 0 self.trig_sweep_mode_ch1 = 1 elif ch == 2: self._trigger1.trigtype = 0 self._trigger1.edge = 0 self.trig_sweep_mode_ch2 = 1 # Configure trigger mode settings: _str_to_trigger_mode = { 'gated': _WG_TRIG_MODE_GATE, 'start': _WG_TRIG_MODE_START, 'ncycle': _WG_TRIG_MODE_NCYCLE, 'sweep': _WG_TRIG_MODE_SWEEP } mode = _utils.str_to_val(_str_to_trigger_mode, mode, 'trigger mode') # set status light register if ch == 1: self.adc1_statuslight = True if ( trigger_source == _WG_TRIG_ADC1) else False else: self.adc2_statuslight = True if ( trigger_source == _WG_TRIG_ADC2) else False if sweep_start_freq is None or mode != _WG_TRIG_MODE_SWEEP: channel_frequency = (self._sweep1.step * _WG_FREQSCALE) \ if ch == 1 else (self._sweep2.step * _WG_FREQSCALE) else: channel_frequency = sweep_start_freq waveform = self.waveform_type_ch1 if ch == 1 else \ self.waveform_type_ch2 # if waveform is a sinewave certain ranges do change if waveform == _WG_WAVE_SINE: _utils.check_parameter_valid('range', sweep_end_freq, [0.0, 250.0e6], 'sweep finishing frequency', 'frequency') _utils.check_parameter_valid('range', channel_frequency, [0.0, 250.0e6], 'sweep starting frequency', 'frequency') else: _utils.check_parameter_valid('range', sweep_end_freq, [0.0, 100.0e6], 'sweep finishing frequency', 'frequency') _utils.check_parameter_valid('range', channel_frequency, [0.0, 100.0e6], 'sweep starting frequency', 'frequency') # minimum frequency deviation in sweep mode is 1 mHz if abs(channel_frequency - sweep_end_freq) < 1.0e-3: raise ValueOutOfRangeException( "Frequency deviation in sweep mode is restricted to values " "greater than 1 mHz.") if mode == _WG_TRIG_MODE_GATE: self._set_trigger_gated(ch, waveform, trigger_source, trigger_threshold, internal_trig_period, internal_trig_high) elif mode == _WG_TRIG_MODE_START: self._set_trigger_start(ch, trigger_source, trigger_threshold) elif mode == _WG_TRIG_MODE_NCYCLE: self._set_trigger_ncycle(ch, channel_frequency, ncycles, trigger_threshold, trigger_source, internal_trig_period) elif mode == _WG_TRIG_MODE_SWEEP: self._set_trigger_sweep(ch, waveform, trigger_source, sweep_end_freq, channel_frequency, sweep_duration, trigger_threshold) def _set_trigger_gated(self, ch, waveform, trigger_source, trigger_threshold, internal_trig_period, internal_trig_high): # Threshold calculations. Calibration is applied in # _update_dependent_regs if trigger_source == _WG_TRIG_EXT: trigger_threshold = 0 elif trigger_source == _WG_TRIG_INTER: trigger_threshold = -2 ** 47 + ( 1.0 - internal_trig_high / internal_trig_period) * ( 2 ** 48 - 1) if ch == 1: self._sweepmod1.step = 1 / internal_trig_period / _WG_FREQSCALE self._sweepmod1.waveform = 2 self._sweepmod1.direction = 1 else: self._sweepmod2.step = 1 / internal_trig_period / _WG_FREQSCALE self._sweepmod2.waveform = 2 self._sweepmod2.direction = 1 if ch == 1: self.amod_enable_ch1 = True self.mod_source_ch1 = _WG_MOD_GATE self.mod_depth_uncalibrated_ch1 = 1.0 self._sweep1.waitfortrig = 0 self.trig_source_ch1 = trigger_source self.gate_thresh_uncalibrated_ch1 = trigger_threshold self.gate_mode_ch1 = 1 elif ch == 2: self.amod_enable_ch2 = True self.mod_source_ch2 = _WG_MOD_GATE self.mod_depth_uncalibrated_ch2 = 1.0 self._sweep2.waitfortrig = 0 self.trig_source_ch2 = trigger_source self.gate_thresh_uncalibrated_ch2 = trigger_threshold self.gate_mode_ch2 = 1 def _set_trigger_start(self, ch, trigger_source, trigger_threshold): # Internal trigger source cannot be used for burst start mode: if trigger_source == _WG_TRIG_INTER: raise ValueOutOfRangeException("The internal trigger source cannot" " be used in start burst mode.") # Calculate threshold level and configure modulating sweep generator. # Calibration is added to threshold in _set_dependent_regs. if trigger_source == _WG_TRIG_EXT: trigger_threshold = 0 if ch == 1: self._sweepmod1.direction = 1 elif ch == 2: self._sweepmod2.direction = 1 if ch == 1: self.trigger_threshold_uncalibrated_ch1 = trigger_threshold self.trig_source_ch1 = trigger_source self._sweep1.waitfortrig = 1 self._sweep1.duration = 0 self.enable_reset_ch1 = True self.phasedly_en_ch1 = 0 self.sine_trigdly_ch1 = 1 if self.waveform_type_ch1 == \ _WG_WAVE_SINE else 0 elif ch == 2: self.trigger_threshold_uncalibrated_ch2 = trigger_threshold self.trig_source_ch2 = trigger_source self._sweep2.waitfortrig = 1 self._sweep2.duration = 0 self.enable_reset_ch2 = True self.phasedly_en_ch2 = 0 self.sine_trigdly_ch2 = 1 if self.waveform_type_ch2 == \ _WG_WAVE_SINE else 0 def _set_trigger_ncycle(self, ch, channel_frequency, ncycles, trigger_threshold, trigger_source, internal_trig_period): # Waveform frequencies are restricted to <= 10 MHz in Ncycle burst # mode: if channel_frequency > 10.0e6: raise ValueOutOfRangeException( "Waveform frequencies are restricted to 10 MHz or less in" " Ncycle burst mode.") # Calculate threshold level and configure modulating sweep generator. # Calibration is added to threshold in _set_dependent_regs. if trigger_source == _WG_TRIG_EXT: trigger_threshold = 0 elif trigger_source == _WG_TRIG_INTER: trigger_threshold = 0 if ch == 1: self._set_sweepgenerator(sweepgen=self._sweepmod1, waveform=2, waitfortrig=0, frequency=1.0 / internal_trig_period, offset=0, logsweep=0, duration=0, holdlast=0) self._sweepmod1.direction = 1 elif ch == 2: self._set_sweepgenerator(sweepgen=self._sweepmod2, waveform=2, waitfortrig=0, frequency=1.0 / internal_trig_period, offset=0, logsweep=0, duration=0, holdlast=0) self._sweepmod2.direction = 1 # ensure combination of signal frequency and Ncycles doesn't cause # 64 bit register overflow: FPGA_cycles = (math.floor( 125e6 / channel_frequency * ncycles) - 1) if \ channel_frequency != 0.0 else 0 if FPGA_cycles > 2**63 - 1: raise ValueOutOfRangeException("NCycle Register Overflow") if ch == 1: self.trigger_threshold_uncalibrated_ch1 = trigger_threshold self.trig_source_ch1 = trigger_source self._sweep1.waitfortrig = 1 self._sweep1.duration = FPGA_cycles self._sweep1.holdlast = 0 self.enable_reset_ch1 = True self.phasedly_en_ch1 = 0 self.sine_trigdly_ch1 = 1 if \ self.waveform_type_ch1 == _WG_WAVE_SINE else 0 elif ch == 2: self.trigger_threshold_uncalibrated_ch2 = trigger_threshold self.trig_source_ch2 = trigger_source self._sweep2.waitfortrig = 1 self._sweep2.duration = FPGA_cycles self._sweep2.holdlast = 0 self.enable_reset_ch2 = True self.phasedly_en_ch2 = 0 self.sine_trigdly_ch2 = 1 if \ self.waveform_type_ch2 == _WG_WAVE_SINE else 0 def _set_trigger_sweep(self, ch, waveform, trigger_source, sweep_end_freq, channel_frequency, sweep_duration, trigger_threshold): # Calculate threshold level and enable/disable continuous sweep. # Calibration is added to threshold in _set_dependent_regs. if trigger_source == _WG_TRIG_EXT: trigger_threshold = 0 mod_continuous_sweep = 1 elif trigger_source == _WG_TRIG_INTER: trigger_threshold = 1 mod_continuous_sweep = 0 else: mod_continuous_sweep = 1 # calculate sweep parameters: mod_start_freq = 0 range_shift = 0 deltafreq_persecond = (sweep_end_freq - channel_frequency) / ( sweep_duration) mod_step = abs(2.0**64 / 1e18 * deltafreq_persecond) mod_duration_FPGAcycles = math.floor(sweep_duration * 125e6) mod_stop_freq = mod_step * 1e9 * sweep_duration range_shift = min( math.floor(abs(math.log(max(mod_step / 2.0**64, mod_stop_freq / 2.0**64), 2))), 63) mod_step *= 2**range_shift mod_stop_freq *= 2**range_shift # check if reverse sweep: if (sweep_end_freq - channel_frequency) < 0: mod_direction = 1 else: mod_direction = 0 if ch == 1: self._set_sweepgenerator(sweepgen=self._sweep1, frequency=channel_frequency, waitfortrig=0) self._sweepmod1.waitfortrig = mod_continuous_sweep self._sweepmod1.start = mod_start_freq self._sweepmod1.stop = mod_stop_freq self._sweepmod1.step = mod_step self._sweepmod1.duration = mod_duration_FPGAcycles self._sweepmod1.direction = 0 self.reverse_sweep_ch1 = mod_direction self._sweepmod1.waveform = 2 self._sweepmod1.holdlast = 0 self.amod_enable_ch1 = False self.pmod_enable_ch1 = False self.fmod_enable_ch1 = False self.sweep_enable_ch1 = True self.trig_source_ch1 = trigger_source self.trigger_threshold_uncalibrated_ch1 = trigger_threshold self.range_shift_ch1 = range_shift else: self._set_sweepgenerator(sweepgen=self._sweep2, frequency=channel_frequency, waitfortrig=0) self._sweepmod2.waitfortrig = mod_continuous_sweep self._sweepmod2.start = mod_start_freq self._sweepmod2.stop = mod_stop_freq self._sweepmod2.step = mod_step self._sweepmod2.duration = mod_duration_FPGAcycles self._sweepmod2.direction = 0 self.reverse_sweep_ch2 = mod_direction self._sweepmod2.waveform = 2 self._sweepmod2.holdlast = 0 self.amod_enable_ch2 = False self.pmod_enable_ch2 = False self.fmod_enable_ch2 = False self.sweep_enable_ch2 = True self.trig_source_ch2 = trigger_source self.trigger_threshold_uncalibrated_ch2 = trigger_threshold self.range_shift_ch2 = range_shift @needs_commit @deprecated(category='method', message="'gen_modulate_off' has been " "deprecated. Use set_modulate_trig_off instead.") def gen_modulate_off(self, ch=None): """ 'gen_modulate_off' has been deprecated. Use set_modulate_trig_off instead. Turn off modulation for the specified output channel. If *ch* is None (the default), both channels will be turned off, otherwise just the one specified by the argument. :type ch: int; {1,2} or None :param ch: Output channel to turn modulation off. """ # warnings.warn("'gen_modulate_off' has been deprecated. Use # set_modulate_trig_off instead.", DeprecationWarning) self.set_modulate_trig_off(ch) @needs_commit @deprecated(category='method', message="'gen_trigger_off' has been " "deprecated. Use set_modulate_trig_off instead.") def gen_trigger_off(self, ch=None): """ 'gen_trigger_off' has been deprecated. Use set_modulate_trig_off instead." Turn off trigger/sweep mode for the specified output channel. If *ch* is None (the default), both channels will be turned off, otherwise just the one specified by the argument. :type ch: int; {1,2} or None :param ch: Output channel to turn trigger/sweep mode off """ # warnings.warn("'gen_trigger_off' has been deprecated. Use # set_modulate_trig_off instead.", DeprecationWarning) self.set_modulate_trig_off(ch) @needs_commit def set_modulate_trig_off(self, ch=None): """ Turn off modulation and trigger modes for the specified output channel. If *ch* is None (the default), both channels will be turned off, otherwise just the one specified by the argument. :type ch: int; {1,2} or None :param ch: Output channel to turn modulation off. """ _utils.check_parameter_valid('set', ch, [1, 2], 'output channel', allow_none=True) self._init_trig_modulation(ch) @needs_commit @deprecated(category='param', message="'in' and 'out' modulation sources have been " "deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.") def gen_modulate(self, ch, mtype, source, depth, frequency=0.0): """ Set up modulation on an output channel. :type ch: int; {1,2} :param ch: Channel to modulate :type mtype: string, {'amplitude', 'frequency', 'phase'} :param mtype: Modulation type. Respectively Off, Amplitude, Frequency and Phase modulation. :type source: string, {'adc1', 'adc2', 'dac1', 'dac2', 'internal', 'in', 'out'} :param source: Modulation source. Respectively Internal Sinewave, associated input channel or opposite output channel. In and out sources are deprecated. :type depth: float 0-1, 0-125MHz or 0 - 360 deg :param depth: Modulation depth (depends on modulation type): Fractional modulation depth, Frequency Deviation/Volt or +/- phase shift/Volt :type frequency: float :param frequency: Frequency of internally-generated sine wave modulation. This parameter is ignored if the source is set to ADC or DAC. :raises ValueOutOfRangeException: if the channel number is invalid or modulation parameters can't be achieved """ _utils.check_parameter_valid('set', ch, [1, 2], 'modulation channel') _utils.check_parameter_valid( 'range', frequency, [0, 250e6], 'internal modulation frequency') _utils.check_parameter_valid( 'set', mtype, ['amplitude', 'frequency', 'phase'], 'modulation type') _utils.check_parameter_valid( 'set', source, ['adc1', 'adc2', 'dac1', 'dac2', 'internal', 'in', 'out'], 'modulation source') if source in ['in', 'out']: warnings.warn( message="'in' and 'out' modulation sources have been " "deprecated. Use 'adc1', 'adc2', 'dac1' or 'dac2' instead.", category=DeprecationWarning, stacklevel=1 ) # 'in' and 'out' sources are deprecated sources. Convert to adc/dac # source type: if ch == 1: if source == 'in': source = 'adc1' elif source == 'out': source = 'dac2' if ch == 2: if source == 'in': source = 'adc2' elif source == 'out': source = 'dac1' # Can't use current channel as trigger mode source: if ch == 1 and source == 'dac1': raise ValueOutOfRangeException( "dac1 cannot be used as the modulation source for channel 1.") elif ch == 2 and source == 'dac2': raise ValueOutOfRangeException( "dac2 cannot be used as the modulation source for channel 2.") _str_to_modsource = { 'adc1': _WG_MOD_ADC1, 'adc2': _WG_MOD_ADC2, 'dac1': _WG_MOD_DAC1, 'dac2': _WG_MOD_DAC2, 'internal': _WG_MOD_INTER } _str_to_modtype = { 'amplitude': _WG_MOD_AMPL, 'frequency': _WG_MOD_FREQ, 'phase': _WG_MOD_PHASE } source = _utils.str_to_val( _str_to_modsource, source, 'modulation source') mtype = _utils.str_to_val( _str_to_modtype, mtype, 'modulation source') # Maximum achievable modulation depth is limited when frontend # attenuation is not enabled if self.atten_compensate_ch1 == 0: logging.warning("+/- 0.5 V voltage range is selected on input " "channel 1. Maximum achievable modulation depth " "may be limited.") if self.atten_compensate_ch2 == 0: logging.warning("+/- 0.5 V voltage range is selected on input " "channel 2. Maximum achievable modulation depth " "may be limited.") # Calculate the depth value depending on modulation source and type. # Calibration calculations for frontend variations done in # _update_dependent_regs. depth_parameter = 0.0 if mtype == _WG_MOD_AMPL: _utils.check_parameter_valid('range', depth, [0.0, 1.0], 'amplitude modulation depth', 'fraction') depth_parameter = depth elif mtype == _WG_MOD_FREQ: _utils.check_parameter_valid( 'range', depth, [0.0, _WG_MOD_FREQ_MAX], 'frequency modulation depth', 'Hz/V') depth_parameter = depth / (DAC_SMP_RATE / 8.0) elif mtype == _WG_MOD_PHASE: _utils.check_parameter_valid( 'range', depth, [0.0, 360.0], 'phase modulation depth', 'degrees/V') depth_parameter = depth / 360.0 # Can't use trigger/sweep modes at the same time as modulation self.set_modulate_trig_off(ch) if ch == 1: self.mod_depth_uncalibrated_ch1 = depth_parameter self.mod_source_ch1 = source self.amod_enable_ch1 = True if mtype == _WG_MOD_AMPL else False self.fmod_enable_ch1 = True if mtype == _WG_MOD_FREQ else False self.pmod_enable_ch1 = True if mtype == _WG_MOD_PHASE else False self.sweep_enable_ch1 = False if source == _WG_MOD_INTER: self._set_sweepgenerator(sweepgen=self._sweepmod1, waveform=2, waitfortrig=0, frequency=frequency, offset=0, logsweep=0, duration=0) self.adc1_statuslight = True if \ source == _WG_MODSOURCE_ADC else False elif ch == 2: self.mod_depth_uncalibrated_ch2 = depth_parameter self.mod_source_ch2 = source self.amod_enable_ch2 = True if mtype == _WG_MOD_AMPL else False self.fmod_enable_ch2 = True if mtype == _WG_MOD_FREQ else False self.pmod_enable_ch2 = True if mtype == _WG_MOD_PHASE else False self.sweep_enable_ch2 = False if source == _WG_MOD_INTER: self._set_sweepgenerator(sweepgen=self._sweepmod2, waveform=2, waitfortrig=0, frequency=frequency, offset=0, logsweep=0, duration=0) self.adc2_statuslight = True if \ source == _WG_MODSOURCE_ADC else False def _get_mod_depth_uncalibrated(self, ch): # Calculate mod depth based on instrument state. Used when connecting # to running device. dac1, dac2 = self._dac_gains() adc1, adc2 = self._adc_gains() mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1 else 1.0) * adc1, 2.0**11 / (8.0 if self.atten_compensate_ch2 else 1.0) * adc2, 2.0**14 * dac1, 2.0**14 * dac2, 1.0, 1.0] if ch == 1: mod_depth_uncalibrated = self.mod_depth_ch1 / \ mod_source_scalers[self.mod_source_ch1] / _WG_MOD_DEPTH_MAX else: mod_depth_uncalibrated = self.mod_depth_ch2 / \ mod_source_scalers[self.mod_source_ch2] / _WG_MOD_DEPTH_MAX return mod_depth_uncalibrated def _get_gate_thresh_uncalibrated(self, ch): # Calculate gate threshold based on instrument state. Used when # connecting to running device. dac1, dac2 = self._dac_gains() adc1, adc2 = self._adc_gains() gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0] if ch == 1: gate_thresh_uncalibrated = self.gate_thresh_ch1 * \ gate_source_scalers[self.trig_source_ch1] else: gate_thresh_uncalibrated = self.gate_thresh_ch2 * \ gate_source_scalers[self.trig_source_ch2] return gate_thresh_uncalibrated def _get_trig_thresh_uncalibrated(self, ch): # Calculate trig threshold based on instrument state. Used when # connecting to running device. dac1, dac2 = self._dac_gains() adc1, adc2 = self._adc_gains() trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0] if ch == 1: trig_threshold_uncalibrated = self._trigger1.level * \ trig_source_scalers[self.trig_source_ch1] else: trig_threshold_uncalibrated = self._trigger2.level * \ trig_source_scalers[self.trig_source_ch2] return trig_threshold_uncalibrated def _update_dependent_regs(self): # Get the calibration coefficients of the front end dac1, dac2 = self._dac_gains() adc1, adc2 = self._adc_gains() # Frontend attenuation flag for modulation self.atten_compensate_ch1 = 1 if self._get_frontend(1)[1] else 0 self.atten_compensate_ch2 = 1 if self._get_frontend(2)[1] else 0 # Scaling source parameter arrays for each trigger/modulation mode. mod_source_scalers = [2.0**11 / (8.0 if self.atten_compensate_ch1 else 1.0) * adc1, 2.0**11 / (8.0 if self.atten_compensate_ch2 else 1.0) * adc2, 2.0**14 * dac1, 2.0**14 * dac2, 1.0, 1.0] gate_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0] trig_source_scalers = [adc1, adc2, dac1 * 16, dac2 * 16, 1.0, 1.0] # Channel 1 modulation depth if (self.amod_enable_ch1 is True or self.pmod_enable_ch1 is True or ( self.fmod_enable_ch1 is True)): try: self.mod_depth_uncalibrated_ch1 except AttributeError: self.mod_depth_uncalibrated_ch1 = \ self._get_mod_depth_uncalibrated(1) self.mod_depth_ch1 = self.mod_depth_uncalibrated_ch1 * \ mod_source_scalers[self.mod_source_ch1] * _WG_MOD_DEPTH_MAX # Channel 2 modulation depth if (self.amod_enable_ch2 is True or self.pmod_enable_ch2 is True or ( self.fmod_enable_ch2 is True)): try: self.mod_depth_uncalibrated_ch2 except AttributeError: self.mod_depth_uncalibrated_ch2 = \ self._get_mod_depth_uncalibrated(2) self.mod_depth_ch2 = self.mod_depth_uncalibrated_ch2 * \ mod_source_scalers[self.mod_source_ch2] * _WG_MOD_DEPTH_MAX # Channel 1 gate threshold if self.gate_mode_ch1 == 1: try: self.gate_thresh_uncalibrated_ch1 except AttributeError: self.gate_thresh_uncalibrated_ch1 = \ self._get_gate_thresh_uncalibrated(1) self.gate_thresh_ch1 = self.gate_thresh_uncalibrated_ch1 / \ gate_source_scalers[self.trig_source_ch1] # Channel 2 gate threshold if self.gate_mode_ch2 == 1: try: self.gate_thresh_uncalibrated_ch2 except AttributeError: self.gate_thresh_uncalibrated_ch2 = \ self._get_gate_thresh_uncalibrated(2) self.gate_thresh_ch2 = self.gate_thresh_uncalibrated_ch2 / \ gate_source_scalers[self.trig_source_ch2] # Channel 1 N cycle/start/sweep mode trigger threshold if (self.trig_sweep_mode_ch1 == 1 and self.gate_mode_ch1 != 1): try: self.trigger_threshold_uncalibrated_ch1 except AttributeError: self.trigger_threshold_uncalibrated_ch1 = \ self._get_trig_thresh_uncalibrated(1) self._trigger1.level = self.trigger_threshold_uncalibrated_ch1 / \ trig_source_scalers[self.trig_source_ch1] # Channel 2 N cycle/start/sweep mode trigger threshold if (self.trig_sweep_mode_ch2 == 1 and self.gate_mode_ch2 != 1): try: self.trigger_threshold_uncalibrated_ch2 except AttributeError: self.trigger_threshold_uncalibrated_ch2 = \ self._get_trig_thresh_uncalibrated(2) self._trigger2.level = self.trigger_threshold_uncalibrated_ch2 / \ trig_source_scalers[self.trig_source_ch2] def commit(self): self._update_dependent_regs() # Commit the register values to the device super(WaveformGenerator, self).commit() # Bring in the docstring from the superclass for our docco. commit.__doc__ = MokuInstrument.commit.__doc__ _wavegen_reg_handlers = { # channel 1 control: # modulation controls 'adc1_statuslight': (REG_BASE_MOD_0, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'amod_enable_ch1': (REG_BASE_MOD_0, to_reg_unsigned(1, 1), from_reg_unsigned(1, 1)), 'fmod_enable_ch1': (REG_BASE_MOD_0, to_reg_unsigned(2, 1), from_reg_unsigned(2, 1)), 'pmod_enable_ch1': (REG_BASE_MOD_0, to_reg_unsigned(3, 1), from_reg_unsigned(3, 1)), 'sweep_enable_ch1': (REG_BASE_MOD_0, to_reg_unsigned(4, 1), from_reg_unsigned(4, 1)), 'reverse_sweep_ch1': (REG_BASE_MOD_0, to_reg_unsigned(5, 1), from_reg_unsigned(5, 1)), 'mod_source_ch1': (REG_BASE_MOD_0, to_reg_unsigned(6, 3), from_reg_unsigned(6, 3)), 'atten_compensate_ch1': (REG_BASE_MOD_0, to_reg_unsigned(9, 1), from_reg_unsigned(9, 1)), 'trig_source_ch1': (REG_BASE_MOD_0, to_reg_unsigned(10, 3), from_reg_unsigned(10, 3)), 'range_shift_ch1': (REG_BASE_MOD_0, to_reg_unsigned(13, 6), from_reg_unsigned(13, 6)), 'sine_trigdly_ch1': (REG_BASE_MOD_0, to_reg_unsigned(19, 1), from_reg_unsigned(19, 1)), 'phasedly_en_ch1': (REG_BASE_MOD_0, to_reg_unsigned(20, 1), from_reg_unsigned(20, 1)), 'trig_sweep_mode_ch1': (REG_BASE_MOD_0, to_reg_unsigned(29, 1), from_reg_unsigned(29, 1)), 'gate_mode_ch1': (REG_BASE_MOD_0, to_reg_unsigned(30, 1), from_reg_unsigned(30, 1)), 'mod_depth_ch1': (REG_BASE_MOD_0 + 1, to_reg_unsigned(0, 32), from_reg_unsigned(0, 32)), 'gate_thresh_ch1': ((REG_GATETHRESH_H_CH1, REG_GATETHRESH_L_CH1), to_reg_signed(16, 48), from_reg_signed(16, 48)), # waveform controls 'enable_ch1': (REG_BASE_WAV_0, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'waveform_type_ch1': (REG_BASE_WAV_0, to_reg_unsigned(1, 1), from_reg_unsigned(1, 1)), 'amplitude_ch1': (REG_BASE_WAV_0 + 1, to_reg_signed(0, 18, xform=lambda obj, a: 2 * a / obj._dac_gains()[0]), from_reg_signed(0, 18, xform=lambda obj, a: 2 * a * obj._dac_gains()[0])), 'offset_ch1': (REG_BASE_WAV_0 + 2, to_reg_signed(0, 16, xform=lambda obj, a: a / obj._dac_gains()[0]), from_reg_signed(0, 16, xform=lambda obj, a: a * obj._dac_gains()[0])), 't0_ch1': ((REG_BASE_WAV_0 + 13, REG_BASE_WAV_0 + 12), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 't1_ch1': ((REG_BASE_WAV_0 + 15, REG_BASE_WAV_0 + 14), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 't2_ch1': ((REG_BASE_WAV_0 + 17, REG_BASE_WAV_0 + 16), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 'riserate_ch1': ((REG_BASE_WAV_0 + 19, REG_BASE_WAV_0 + 18), to_reg_signed(0, 64, xform=lambda obj, o: (o**-1) * _WG_RISESCALE), from_reg_signed(0, 64, xform=lambda obj, o: (o / _WG_RISESCALE)**-1)), 'fallrate_ch1': ((REG_BASE_WAV_0 + 21, REG_BASE_WAV_0 + 20), to_reg_signed(0, 64, xform=lambda obj, o: (o**-1) * _WG_RISESCALE), from_reg_signed(0, 64, xform=lambda obj, o: (o / _WG_RISESCALE)**-1)), 'enable_reset_ch1': (REG_BASE_WAV_0 + 22, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'phase_dly_ch1': (REG_BASE_WAV_0 + 23, to_reg_unsigned(0, 32), from_reg_unsigned(0, 32)), # channel 2 control: # modulation controls 'adc2_statuslight': (REG_BASE_MOD_1, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'amod_enable_ch2': (REG_BASE_MOD_1, to_reg_unsigned(1, 1), from_reg_unsigned(1, 1)), 'fmod_enable_ch2': (REG_BASE_MOD_1, to_reg_unsigned(2, 1), from_reg_unsigned(2, 1)), 'pmod_enable_ch2': (REG_BASE_MOD_1, to_reg_unsigned(3, 1), from_reg_unsigned(3, 1)), 'sweep_enable_ch2': (REG_BASE_MOD_1, to_reg_unsigned(4, 1), from_reg_unsigned(4, 1)), 'reverse_sweep_ch2': (REG_BASE_MOD_1, to_reg_unsigned(5, 1), from_reg_unsigned(5, 1)), 'mod_source_ch2': (REG_BASE_MOD_1, to_reg_unsigned(6, 3), from_reg_unsigned(6, 3)), 'atten_compensate_ch2': (REG_BASE_MOD_1, to_reg_unsigned(9, 1), from_reg_unsigned(9, 1)), 'trig_source_ch2': (REG_BASE_MOD_1, to_reg_unsigned(10, 3), from_reg_unsigned(10, 3)), 'range_shift_ch2': (REG_BASE_MOD_1, to_reg_unsigned(13, 6), from_reg_unsigned(13, 6)), 'sine_trigdly_ch2': (REG_BASE_MOD_1, to_reg_unsigned(19, 1), from_reg_unsigned(19, 1)), 'phasedly_en_ch2': (REG_BASE_MOD_1, to_reg_unsigned(20, 1), from_reg_unsigned(20, 1)), 'trig_sweep_mode_ch2': (REG_BASE_MOD_1, to_reg_unsigned(29, 1), from_reg_unsigned(29, 1)), 'gate_mode_ch2': (REG_BASE_MOD_1, to_reg_unsigned(30, 1), from_reg_unsigned(30, 1)), 'mod_depth_ch2': ((REG_BASE_MOD_1 + 1), to_reg_unsigned(0, 32), from_reg_unsigned(0, 32)), 'gate_thresh_ch2': ((REG_GATETHRESH_H_CH2, REG_GATETHRESH_L_CH2), to_reg_signed(16, 48), from_reg_signed(16, 48)), # waveform controls 'enable_ch2': (REG_BASE_WAV_1, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'waveform_type_ch2': (REG_BASE_WAV_1, to_reg_unsigned(1, 1), from_reg_unsigned(1, 1)), 'amplitude_ch2': ((REG_BASE_WAV_1 + 1), to_reg_signed(0, 18, xform=lambda obj, a: 2 * a / obj._dac_gains()[1]), from_reg_signed(0, 18, xform=lambda obj, a: 2 * a * obj._dac_gains()[1])), 'offset_ch2': ((REG_BASE_WAV_1 + 2), to_reg_signed(0, 16, xform=lambda obj, a: a / obj._dac_gains()[1]), from_reg_signed(0, 16, xform=lambda obj, a: a * obj._dac_gains()[1])), 't0_ch2': (((REG_BASE_WAV_1 + 13), (REG_BASE_WAV_1 + 12)), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 't1_ch2': ((REG_BASE_WAV_1 + 15, REG_BASE_WAV_1 + 14), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 't2_ch2': ((REG_BASE_WAV_1 + 17, REG_BASE_WAV_1 + 16), to_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR), from_reg_unsigned(0, 48, xform=lambda obj, o: o * _WG_PERIODSCALE_SQR)), 'riserate_ch2': ((REG_BASE_WAV_1 + 19, REG_BASE_WAV_1 + 18), to_reg_signed(0, 64, xform=lambda obj, o: (o**-1) * _WG_RISESCALE), from_reg_signed(0, 64, xform=lambda obj, o: (o / _WG_RISESCALE)**-1)), 'fallrate_ch2': ((REG_BASE_WAV_1 + 21, REG_BASE_WAV_1 + 20), to_reg_signed(0, 64, xform=lambda obj, o: (o**-1) * _WG_RISESCALE), from_reg_signed(0, 64, xform=lambda obj, o: (o / _WG_RISESCALE)**-1)), 'enable_reset_ch2': (REG_BASE_WAV_1 + 22, to_reg_unsigned(0, 1), from_reg_unsigned(0, 1)), 'phase_dly_ch2': (REG_BASE_WAV_1 + 23, to_reg_unsigned(0, 32), from_reg_unsigned(0, 32)) } _wavegen_mod_reg_handlers = {}
mit
-7,956,985,272,048,114,000
38.26213
79
0.532666
false
3.653196
false
false
false
targueriano/neuroIFC
neuro-ifc_1.0.16_amd64/usr/local/neuro-ifc/src/Treinamento.py
1
4811
#!/usr/bin/env python #-*- coding: utf-8 -*- #autor Taylan Branco Meurer import neurolab class Treinamento(object): def __init__(self, net, inputs, targets, epocas, show, goal, lr, lr_inc, lr_dec, mc, rr): self.net = net self.inputs = inputs self.targets = targets self.epocas = epocas self.show = show self.objetivo = goal self.taxaAprendizado = lr self.taxaIncremento = lr_inc self.taxaDecremento = lr_dec self.taxaImpulso = mc self.taxaRegularizacao = rr self.errors = list() def treinar(self, regra): if regra == "delta": self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), lr=self.taxaAprendizado.get_value() ) return self.errors elif regra == "gd": self.net.trainf = neurolab.train.train_gd print self.net.trainf self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), lr=self.taxaAprendizado.get_value() ) return self.errors elif regra == "gdm": self.net.trainf = neurolab.train.train_gdm self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), lr=self.taxaAprendizado.get_value(), mc=self.taxaImpulso.get_value(), rr=self.taxaRegularizacao.get_value() ) return self.errors elif regra == "gda": self.net.trainf = neurolab.train.train_gda self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), lr=self.taxaAprendizado.get_value(), lr_inc=self.taxaIncremento.get_value(), lr_dec=self.taxaDecremento.get_value(), rr=self.taxaRegularizacao.get_value() ) return self.errors elif regra == "gdx": self.net.trainf = neurolab.train.train_gdx print self.net.trainf self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), lr=self.taxaAprendizado.get_value(), lr_inc=self.taxaIncremento.get_value(), lr_dec=self.taxaDecremento.get_value(), mc=self.taxaImpulso.get_value(), rr=self.taxaRegularizacao.get_value() ) return self.errors elif regra == "rprop": self.net.trainf = neurolab.train.train_rprop self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), lr=self.taxaAprendizado.get_value(), ) return self.errors elif regra == "bfgs": self.net.trainf = neurolab.train.train_bfgs self.errors = self.net.train(self.inputs, self.targets, epochs=self.epocas.get_value_as_int(), show=self.show.get_value_as_int(), goal=self.objetivo.get_value(), rr=self.taxaRegularizacao.get_value() ) return self.errors
gpl-3.0
-5,539,195,246,021,007,000
49.114583
93
0.437331
false
4.20542
false
false
false
OscarPDR/projects_morelab
projects/views.py
1
21846
# coding: utf-8 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger from django.core.urlresolvers import reverse from django.http import HttpResponseRedirect from django.shortcuts import render_to_response, get_object_or_404 from django.template import RequestContext from django.template.defaultfilters import slugify from django.contrib.auth.decorators import login_required from django.conf import settings from email.mime.image import MIMEImage from django.core.mail import EmailMultiAlternatives from django.template.loader import render_to_string from django.utils.html import strip_tags from .models import Project, FundingAmount, AssignedEmployee, ConsortiumMember from .forms import ProjectForm, ProjectSearchForm, FundingAmountFormSet, AssignedEmployeeFormSet, ConsortiumMemberFormSet from employees.models import Employee from organizations.models import Organization from funding_programs.models import FundingProgram # Create your views here. PAGINATION_NUMBER = settings.PROJECTS_PAGINATION ######################### # View: project_index ######################### def project_index(request): projects = Project.objects.all().order_by('title') if request.method == 'POST': form = ProjectSearchForm(request.POST) if form.is_valid(): query = form.cleaned_data['text'] query = slugify(query) projs = [] for project in projects: if query in slugify(project.title): projs.append(project) projects = projs else: form = ProjectSearchForm() paginator = Paginator(projects, PAGINATION_NUMBER) page = request.GET.get('page') try: projects = paginator.page(page) except PageNotAnInteger: # If page is not an integer, deliver first page. projects = paginator.page(1) except EmptyPage: # If page is out of range (e.g. 9999), deliver last page of results. projects = paginator.page(paginator.num_pages) return render_to_response("projects/index.html", { "projects": projects, 'form': form, }, context_instance = RequestContext(request)) ######################### # View: add_project ######################### @login_required def add_project(request): error_badges = [] project = None current_year = None end_year = None project_form = ProjectForm(prefix='project_form') funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset') assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset') consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset') if request.POST: project_form = ProjectForm(request.POST, prefix='project_form') # start project_form validation if project_form.is_valid(): project = project_form.save(commit=False) funding_amount_formset = FundingAmountFormSet(request.POST, instance=project, prefix='funding_amount_formset') assigned_employee_formset = AssignedEmployeeFormSet(request.POST, instance=project, prefix='assigned_employee_formset') consortium_member_formset = ConsortiumMemberFormSet(request.POST, instance=project, prefix='consortium_member_formset') cd_p = project_form.cleaned_data current_year = cd_p['start_year'] end_year = cd_p['end_year'] project.project_type = cd_p['project_type'].encode('utf-8') project.title = cd_p['title'].encode('utf-8') project.description = cd_p['description'].encode('utf-8') project.homepage = cd_p['homepage'] project.start_month = cd_p['start_month'] project.start_year = cd_p['start_year'] project.end_month = cd_p['end_month'] project.end_year = cd_p['end_year'] project.status = cd_p['status'].encode('utf-8') project.project_code = cd_p['project_code'].encode('utf-8') project.total_funds = cd_p['total_funds'] project.total_funds_deusto = cd_p['total_funds_deusto'] project.observations = cd_p['observations'].encode('utf-8') project.funding_program = cd_p['funding_program'] project.project_leader = cd_p['project_leader'] try: project.logo = request.FILES['project_form-logo'] except: pass project.save() if 'project' in error_badges: error_badges.remove('project') if 'funding_program' in error_badges: error_badges.remove('funding_program') else: if request.POST.get('project_form_funding_program') is None: error_badges.append('funding_program') else: error_badges.remove('funding_program') error_badges.append('project') # end project_form validation # start funding_amount_formset validation if funding_amount_formset.is_valid(): total_funding = 0 for funding_amount_form in funding_amount_formset: if (len(funding_amount_form.cleaned_data) > 0) and (current_year <= end_year): cd_fa = funding_amount_form.cleaned_data funding_amount = FundingAmount( project=project, amount=cd_fa['amount'], year=current_year, ) total_funding += funding_amount.amount funding_amount.save() current_year += 1 else: print "No fundings amounts to save" project.total_funds_deusto = total_funding project.save() # end funding_amount_formset validation # start assigned_employee_formset validation if assigned_employee_formset.is_valid(): for assigned_employee_form in assigned_employee_formset: if (len(assigned_employee_form.cleaned_data) > 0): cd_ae = assigned_employee_form.cleaned_data assigned_employee_form.project = project assigned_employee_form.employee = cd_ae['employee'] assigned_employee_form.role = cd_ae['role'] assigned_employee_form.save() else: print "No assigned employees to save" assigned_employee_formset.save() if 'assigned_employees' in error_badges: error_badges.remove('assigned_employees') else: error_badges.append('assigned_employees') try: project.delete() except: pass # end assigned_employee_formset validation # start consortium_member_formset validation if consortium_member_formset.is_valid(): for consortium_member_form in consortium_member_formset: if (len(consortium_member_form.cleaned_data) > 0): cd_cm = consortium_member_form.cleaned_data consortium_member = ConsortiumMember( project=project, organization=cd_cm['organization'], ) consortium_member.save() else: print "No consortium members to save" if 'consortium_members' in error_badges: error_badges.remove('consortium_members') else: error_badges.append('consortium_members') try: project.delete() except: pass # start consortium_member_formset validation try: return HttpResponseRedirect(reverse('email_project', args = (project.slug,))) except: pass else: project_form = ProjectForm(prefix='project_form') funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset') assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset') consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset') return render_to_response("projects/add.html", { 'error_badges': error_badges, 'project_form': project_form, 'funding_amount_formset': funding_amount_formset, 'assigned_employee_formset': assigned_employee_formset, 'consortium_member_formset': consortium_member_formset, }, context_instance = RequestContext(request)) ######################### # View: project_info ######################### def project_info(request, slug): project = get_object_or_404(Project, slug=slug) funding_program = FundingProgram.objects.get(id=project.funding_program_id) lprs = AssignedEmployee.objects.filter(project_id=project.id, role='Principal researcher').values('employee_id') principal_researchers = Employee.objects.filter(id__in=lprs).order_by('name', 'first_surname', 'second_surname') lpms = AssignedEmployee.objects.filter(project_id=project.id, role='Project manager').values('employee_id') project_managers = Employee.objects.filter(id__in=lpms).order_by('name', 'first_surname', 'second_surname') rs = AssignedEmployee.objects.filter(project_id=project.id, role='Researcher').values('employee_id') researchers = Employee.objects.filter(id__in=rs).order_by('name', 'first_surname', 'second_surname') funding_amounts = FundingAmount.objects.filter(project_id=project.id) consortium_members = ConsortiumMember.objects.filter(project_id=project.id) return render_to_response("projects/info.html", { 'project': project, 'funding_program': funding_program, 'principal_researchers': principal_researchers, 'project_managers': project_managers, 'researchers': researchers, 'funding_amounts': funding_amounts, 'consortium_members': consortium_members, }, context_instance = RequestContext(request)) ######################### # View: edit_project ######################### @login_required def edit_project(request, slug): error_badges = [] project = get_object_or_404(Project, slug=slug) assigned_employees = AssignedEmployee.objects.filter(project_id=project.id) consortium_members = ConsortiumMember.objects.filter(project_id=project.id) funding_amounts = FundingAmount.objects.filter(project_id=project.id).order_by('year') current_year = 3000 end_year = 2999 project_form = ProjectForm(prefix='project_form') funding_amount_formset = FundingAmountFormSet(instance=Project(), prefix='funding_amount_formset') assigned_employee_formset = AssignedEmployeeFormSet(instance=Project(), prefix='assigned_employee_formset') consortium_member_formset = ConsortiumMemberFormSet(instance=Project(), prefix='consortium_member_formset') if request.POST: project_form = ProjectForm(request.POST, prefix='project_form') # start project_form validation if project_form.is_valid(): funding_amount_formset = FundingAmountFormSet(request.POST, instance=project, prefix='funding_amount_formset') assigned_employee_formset = AssignedEmployeeFormSet(request.POST, instance=project, prefix='assigned_employee_formset') consortium_member_formset = ConsortiumMemberFormSet(request.POST, instance=project, prefix='consortium_member_formset') cd_p = project_form.cleaned_data current_year = cd_p['start_year'] end_year = cd_p['end_year'] project.project_type = cd_p['project_type'].encode('utf-8') project.title = cd_p['title'].encode('utf-8') project.description = cd_p['description'].encode('utf-8') project.homepage = cd_p['homepage'] project.start_month = cd_p['start_month'] project.start_year = cd_p['start_year'] project.end_month = cd_p['end_month'] project.end_year = cd_p['end_year'] project.status = cd_p['status'].encode('utf-8') project.project_code = cd_p['project_code'].encode('utf-8') project.total_funds = cd_p['total_funds'] project.total_funds_deusto = cd_p['total_funds_deusto'] project.observations = cd_p['observations'].encode('utf-8') project.funding_program = cd_p['funding_program'] project.project_leader = cd_p['project_leader'] try: project.logo = request.FILES['project_form-logo'] except: pass project.save() if 'project' in error_badges: error_badges.remove('project') else: error_badges.append('project') # end project_form validation # start funding_amount_formset validation if funding_amount_formset.is_valid(): total_funding = 0 for funding_amount_form in funding_amount_formset: if (len(funding_amount_form.cleaned_data) > 0) and (current_year <= end_year): cd_fa = funding_amount_form.cleaned_data funding_amount = FundingAmount.objects.get(project_id=project.id, year=current_year) funding_amount.amount = cd_fa['amount'] total_funding += cd_fa['amount'] funding_amount.save() current_year += 1 else: print "No fundings amounts to save" if 'funding_amount' in error_badges: error_badges.remove('funding_amount') project.total_funds_deusto = total_funding project.save() else: error_badges.append('funding_amount') # end funding_amount_formset validation # start assigned_employee_formset validation if assigned_employee_formset.is_valid(): for assigned_employee_form in assigned_employee_formset: if (len(assigned_employee_form.cleaned_data) > 0): cd_ae = assigned_employee_form.cleaned_data assigned_employee_form.project = project assigned_employee_form.employee = cd_ae['employee'] assigned_employee_form.role = cd_ae['role'] assigned_employee_form.save() else: print "No assigned employees to save" assigned_employee_formset.save() if 'assigned_employees' in error_badges: error_badges.remove('assigned_employees') else: error_badges.append('assigned_employees') # end assigned_employee_formset validation # start consortium_member_formset validation if consortium_member_formset.is_valid(): for consortium_member_form in consortium_member_formset: if (len(consortium_member_form.cleaned_data) > 0): cd_cm = consortium_member_form.cleaned_data consortium_member_form.project = project consortium_member_form.organization = cd_cm['organization'] consortium_member_form.save() else: print "No consortium members to save" if 'consortium_members' in error_badges: error_badges.remove('consortium_members') else: error_badges.append('consortium_members') # start consortium_member_formset validation try: return HttpResponseRedirect(reverse('email_project', args=(project.slug,))) except: pass else: project_data = { 'project_type': project.project_type, 'title': project.title, 'slug': project.slug, 'description': project.description, 'homepage': project.homepage, 'start_month': project.start_month, 'start_year': project.start_year, 'end_month': project.end_month, 'end_year': project.end_year, 'status': project.status, 'project_code': project.project_code, 'total_funds': project.total_funds, 'total_funds_deusto': project.total_funds_deusto, 'observations': project.observations, 'funding_program': project.funding_program, 'project_leader': project.project_leader, } # FORMS project_form = ProjectForm( prefix='project_form', initial=project_data, ) funding_amount_formset = FundingAmountFormSet( instance=Project(), prefix='funding_amount_formset' ) assigned_employee_formset = AssignedEmployeeFormSet( instance=Project(), prefix='assigned_employee_formset' ) consortium_member_formset = ConsortiumMemberFormSet( instance=Project(), prefix='consortium_member_formset' ) return render_to_response("projects/edit.html", { 'project': project, 'project_form': project_form, 'funding_amounts': funding_amounts, 'funding_amount_formset': funding_amount_formset, 'assigned_employees': assigned_employees, 'assigned_employee_formset': assigned_employee_formset, 'consortium_members': consortium_members, 'consortium_member_formset': consortium_member_formset, }, context_instance = RequestContext(request)) ######################### # View: email_project ######################### @login_required def email_project(request, slug): project = get_object_or_404(Project, slug=slug) funding_program = FundingProgram.objects.get(id=project.funding_program_id) lpms = AssignedEmployee.objects.filter(project_id=project.id, role='Project manager').values('employee_id') project_managers = Employee.objects.filter(id__in=lpms).order_by('name', 'first_surname', 'second_surname') lprs = AssignedEmployee.objects.filter(project_id=project.id, role='Principal researcher').values('employee_id') principal_researchers = Employee.objects.filter(id__in=lprs).order_by('name', 'first_surname', 'second_surname') project_leader = Organization.objects.get(id=project.project_leader_id) consortium_members = [] for consortium_member in ConsortiumMember.objects.all().filter(project_id=project.id): org = Organization.objects.get(id=consortium_member.organization.id) consortium_members.append(org.name) html_content = render_to_string('projects/project_email_template.html', { 'project': project, 'funding_program': funding_program, 'project_managers': project_managers, 'principal_researchers': principal_researchers, 'project_leader': project_leader, 'consortium_members': consortium_members, }) text_content = strip_tags(html_content) msg = EmailMultiAlternatives( '[NEW PROJECT]: ' + project.title, # subject text_content, # message settings.PROJECTS_SENDER_EMAIL, # from settings.PROJECTS_RECEPTOR_EMAILS, # to ) try: image_file = open(project.logo.path, 'rb') msg_image = MIMEImage(image_file.read()) image_file.close() msg_image.add_header('Content-ID', '<image>', filename=project.logo.path) msg.attach(msg_image) except: pass try: image_file = open(funding_program.logo.path, 'rb') msg_image = MIMEImage(image_file.read()) image_file.close() msg_image.add_header('Content-ID', '<image>', filename = funding_program.logo.path) msg.attach(msg_image) except: pass msg.attach_alternative(html_content, "text/html") msg.send() return HttpResponseRedirect(reverse('project_index')) ######################### # View: delete_project ######################### @login_required def delete_project(request, slug): project = get_object_or_404(Project, slug=slug) project.delete() return HttpResponseRedirect(reverse('project_index')) ######################### # View: delete_employee_from_project ######################### @login_required def delete_employee_from_project(request, employee_slug, project_slug): project = get_object_or_404(Project, slug=project_slug) employee = get_object_or_404(Employee, slug=employee_slug) assigned_employee = get_object_or_404(AssignedEmployee, project_id=project.id, employee_id=employee.id) assigned_employee.delete() return HttpResponseRedirect(reverse('edit_project', args=(project.slug,))) ######################### # View: delete_employee_from_project ######################### @login_required def delete_organization_from_project(request, organization_slug, project_slug): project = get_object_or_404(Project, slug=project_slug) organization = get_object_or_404(Organization, slug=organization_slug) consortium_member = get_object_or_404(ConsortiumMember, project_id=project.id, organization_id=organization.id) consortium_member.delete() return HttpResponseRedirect(reverse('edit_project', args=(project.slug,)))
gpl-3.0
-7,591,338,815,435,761,000
34.637847
131
0.61018
false
4.078029
false
false
false
chrisdjscott/Atoman
atoman/system/lattice.py
1
15979
""" Lattice module, with Lattice object and utilities @author: Chris Scott """ from __future__ import absolute_import from __future__ import unicode_literals from __future__ import division import logging import copy import numpy as np from .atoms import elements from ..algebra import vectors from . import _lattice from . import _output from six.moves import range class Lattice(object): """ The Lattice object. """ def __init__(self): self.NAtoms = 0 self.cellDims = np.array([100, 100, 100], np.float64) self.specieList = [] self.specieCount = np.empty(0, np.int32) self.specieMass = np.empty(0, np.float64) self.specieCovalentRadius = np.empty(0, np.float64) self.specieRGB = np.empty((0, 3), np.float64) self.specieAtomicNumber = np.empty(0, np.int32) self.minPos = np.zeros(3, np.float64) self.maxPos = np.zeros(3, np.float64) self.atomID = np.empty(0, np.int32) self.specie = np.empty(0, np.int32) self.pos = np.empty(0, np.float64) self.charge = np.empty(0, np.float64) self.scalarsDict = {} self.scalarsFiles = {} self.vectorsDict = {} self.vectorsFiles = {} self.attributes = {} self.PBC = np.ones(3, np.int32) def wrapAtoms(self): """ Wrap atoms that have left the periodic cell. """ return _lattice.wrapAtoms(self.NAtoms, self.pos, self.cellDims, self.PBC) def atomSeparation(self, index1, index2, pbc): """ Calculate the separation between two atoms. Parameters ---------- index1, index2 : integer Indexes of the atoms you want to calculate the separation between. Returns ------- atomSeparation : float The separation between the two atoms. This function will return 'None' if the indexes are out of range. Raises ------ IndexError If the specified indexes are too large. """ if index1 < self.NAtoms and index2 < self.NAtoms: atomSeparation = vectors.separation(self.atomPos(index1), self.atomPos(index2), self.cellDims, pbc) else: raise IndexError("Atom index(es) out of range: (%d or %d) >= %d" % (index1, index2, self.NAtoms)) return atomSeparation def reset(self, NAtoms): """ Reinitialise arrays and counters """ self.NAtoms = NAtoms self.atomID = np.empty(NAtoms, np.int32) self.specie = np.empty(NAtoms, np.int32) self.pos = np.empty(3 * NAtoms, np.float64) self.charge = np.zeros(NAtoms, np.float64) self.specieList = [] self.specieCount = np.empty(0, np.int32) self.specieMass = np.empty(0, np.float64) self.specieCovalentRadius = np.empty(0, np.float64) self.specieRGB = np.empty((0, 3), np.float64) self.specieAtomicNumber = np.empty(0, np.int32) self.minPos = np.zeros(3, np.float64) self.maxPos = np.zeros(3, np.float64) self.cellDims = np.zeros(3, np.float64) self.scalarsDict = {} self.scalarsFiles = {} self.vectorsDict = {} self.vectorsFiles = {} self.attributes = {} self.PBC = np.ones(3, np.int32) def calcTemperature(self, NMoving=None): """ Calculate temperature in K """ logger = logging.getLogger(__name__) logger.debug("Calculating temperature of Lattice") if "Kinetic energy" in self.scalarsDict: logger.debug("Got 'Kinetic energy' array from scalarsDict") ke = self.scalarsDict["Kinetic energy"] elif "KE" in self.scalarsDict: logger.debug("Got 'KE' array from scalarsDict") ke = self.scalarsDict["KE"] else: logger.debug("No kinetic energy information stored on Lattice") return None if NMoving is None: NMoving = self.NAtoms keSum = np.sum(ke) if keSum == 0: temperature = 0.0 else: boltzmann = 8.6173324e-5 temperature = 2.0 * keSum / (3.0 * boltzmann * NMoving) return temperature def density(self): """ Return density of lattice """ vol = self.volume() if vol == 0: return return self.NAtoms / vol def volume(self): """ Return volume of lattice """ return self.cellDims[0] * self.cellDims[1] * self.cellDims[2] def addSpecie(self, sym, count=None): """ Add specie to specie list """ if sym in self.specieList: if count is not None: specInd = self.specieIndex(sym) self.specieCount[specInd] = count return if count is None: count = 0 self.specieList.append(sym) self.specieCount = np.append(self.specieCount, np.int32(count)) self.specieMass = np.append(self.specieMass, elements.atomicMass(sym)) self.specieCovalentRadius = np.append(self.specieCovalentRadius, elements.covalentRadius(sym)) rgbtemp = elements.RGB(sym) rgbnew = np.empty((1, 3), np.float64) rgbnew[0][0] = rgbtemp[0] rgbnew[0][1] = rgbtemp[1] rgbnew[0][2] = rgbtemp[2] self.specieRGB = np.append(self.specieRGB, rgbnew, axis=0) def addAtom(self, sym, pos, charge, atomID=None, scalarVals={}, vectorVals={}): """ Add an atom to the lattice """ if sym not in self.specieList: self.addSpecie(sym) # atom ID if atomID is None: atomID = self.NAtoms specInd = self.getSpecieIndex(sym) self.specieCount[specInd] += 1 pos = np.asarray(pos, dtype=np.float64) self.atomID = np.append(self.atomID, np.int32(atomID)) self.specie = np.append(self.specie, np.int32(specInd)) self.pos = np.append(self.pos, pos) self.charge = np.append(self.charge, np.float64(charge)) # wrap positions # min/max pos!!?? for i in range(3): self.minPos[i] = min(self.minPos[i], pos[i]) self.maxPos[i] = max(self.maxPos[i], pos[i]) self.NAtoms += 1 logger = logging.getLogger(__name__) for scalarName in list(self.scalarsDict.keys()): if scalarName in scalarVals: newval = scalarVals[scalarName] self.scalarsDict[scalarName] = np.append(self.scalarsDict[scalarName], np.float64(newval)) else: self.scalarsDict.pop(scalarName) logger.warning("Removing '%s' scalars from Lattice (addAtom)", scalarName) for vectorName in list(self.vectorsDict.keys()): newval = [] if vectorName in vectorVals: newval = vectorVals[vectorName] if len(newval) == 3: self.vectorsDict[vectorName] = np.append(self.vectorsDict[vectorName], np.asarray(newval, dtype=np.float64)) else: self.vectorsDict.pop(vectorName) logger.warning("Removing '%s' vectors from Lattice (addAtom)", vectorName) def removeAtom(self, index): """ Remove an atom """ specInd = self.specie[index] self.atomID = np.delete(self.atomID, index) self.specie = np.delete(self.specie, index) self.pos = np.delete(self.pos, [3 * index, 3 * index + 1, 3 * index + 2]) self.charge = np.delete(self.charge, index) self.NAtoms -= 1 # modify specie list / counter if required self.specieCount[specInd] -= 1 if self.specieCount[specInd] == 0: self.removeSpecie(specInd) for scalarName in list(self.scalarsDict.keys()): self.scalarsDict[scalarName] = np.delete(self.scalarsDict[scalarName], index) for vectorName in list(self.vectorsDict.keys()): self.vectorsDict[vectorName] = np.delete(self.vectorsDict[vectorName], [3 * index, 3 * index + 1, 3 * index + 2]) def removeSpecie(self, index): """ Remove a specie from the specie list. """ self.specieCount = np.delete(self.specieCount, index) self.specieList.pop(index) self.specieCovalentRadius = np.delete(self.specieCovalentRadius, index) self.specieMass = np.delete(self.specieMass, index) # self.specieMassAMU = np.delete(self.specieMassAMU, index) self.specieRGB = np.delete(self.specieRGB, index, axis=0) for i in range(self.NAtoms): if self.specie[i] > index: self.specie[i] -= 1 def calcForce(self, forceConfig): """ Calculate force on lattice. """ pass # if type(forceConfig) is not forces.ForceConfig: # print "FORCE CONFIG WRONG TYPE" # return 113 # # return forces.calc_force(self, forceConfig) def atomPos(self, index): """ Return pointer to atom position within pos array: [xpos, ypos, zpos]. """ atomPos = None if index < self.NAtoms: atomPos = self.pos[3 * index:3 * index + 3] return atomPos def atomSym(self, index): """ Returns symbol of given atom. """ atomSym = None if index < self.NAtoms: atomSym = self.specieList[self.specie[index]] return atomSym def getSpecieIndex(self, sym): """ Return index of specie in specie list. """ if sym not in self.specieList: raise ValueError("Species '%s' is not in the species list" % sym) index = None for i in range(len(self.specieList)): if self.specieList[i] == sym: index = i break return index def setDims(self, dimsarray): self.cellDims[0] = float(dimsarray[0]) self.cellDims[1] = float(dimsarray[1]) self.cellDims[2] = float(dimsarray[2]) def refreshElementProperties(self): """ Refresh element properties. """ for i, sym in enumerate(self.specieList): self.specieMass[i] = elements.atomicMass(sym) self.specieCovalentRadius[i] = elements.covalentRadius(sym) self.specieAtomicNumber[i] = elements.atomicNumber(sym) rgbtemp = elements.RGB(sym) self.specieRGB[i][0] = rgbtemp[0] self.specieRGB[i][1] = rgbtemp[1] self.specieRGB[i][2] = rgbtemp[2] def toLKMC(self, storeEnergies=False): """ Convert the Lattice to LKMC.Lattice. Returns None if cannot load LKMC. """ # try to load LKMC try: from LKMC import Lattice from LKMC import Atoms except ImportError: lkmcLattice = None else: lkmcLattice = Lattice.Lattice(0, storeEnergies=storeEnergies) lkmcLattice.NAtoms = self.NAtoms lkmcLattice.pos = self.pos lkmcLattice.specie = self.specie lkmcLattice.specieList = self.specieList lkmcLattice.specieCount = self.specieCount lkmcLattice.charge = self.charge lkmcLattice.minPos = self.minPos lkmcLattice.maxPos = self.maxPos lkmcLattice.cellDims[0] = self.cellDims[0] lkmcLattice.cellDims[4] = self.cellDims[1] lkmcLattice.cellDims[8] = self.cellDims[2] lkmcLattice.force = np.empty(3 * self.NAtoms, np.float64) lkmcLattice.specieCovalentRadius = self.specieCovalentRadius lkmcLattice.specieRGB = self.specieRGB lkmcLattice.specieMass = np.empty(len(self.specieList), np.float64) lkmcLattice.specieMassAMU = np.empty(len(self.specieList), np.float64) for i, sym in enumerate(self.specieList): lkmcLattice.specieMass[i] = Atoms.atomicMass(sym) lkmcLattice.specieMassAMU[i] = Atoms.atomicMassAMU(sym) def writeLattice(self, filename, visibleAtoms=None): """ Write the Lattice to the given file. If visibleAtoms is passed only write those atoms. """ # full lattice or just visible atoms if visibleAtoms is None: writeFullLattice = 1 visibleAtoms = np.empty(0, np.int32) else: writeFullLattice = 0 # call C function to write Lattice _output.writeLattice(filename, visibleAtoms, self.cellDims, self.specieList, self.specie, self.pos, self.charge, writeFullLattice) def clone(self, lattice): """ Copy given lattice into this instance """ if lattice.NAtoms != self.NAtoms: self.reset(lattice.NAtoms) NAtoms = lattice.NAtoms # copy dims self.cellDims[0] = lattice.cellDims[0] self.cellDims[1] = lattice.cellDims[1] self.cellDims[2] = lattice.cellDims[2] # specie stuff NSpecies = len(lattice.specieList) self.specieList = [] self.specieCount = np.zeros(NSpecies, np.int32) self.specieMass = np.empty(NSpecies, np.float64) self.specieCovalentRadius = np.empty(NSpecies, np.float64) self.specieAtomicNumber = np.zeros(NSpecies, np.int32) self.specieRGB = np.empty((NSpecies, 3), np.float64) for i in range(NSpecies): self.specieList.append(lattice.specieList[i]) self.specieCount[i] = lattice.specieCount[i] self.specieMass[i] = lattice.specieMass[i] self.specieCovalentRadius[i] = lattice.specieCovalentRadius[i] self.specieAtomicNumber[i] = lattice.specieAtomicNumber[i] for j in range(3): self.specieRGB[i][j] = lattice.specieRGB[i][j] # atom data self.atomID = np.empty(NAtoms, np.int32) self.specie = np.empty(NAtoms, np.int32) self.pos = np.empty(3 * NAtoms, np.float64) self.charge = np.empty(NAtoms, np.float64) for i in range(NAtoms): self.atomID[i] = lattice.atomID[i] self.specie[i] = lattice.specie[i] self.charge[i] = lattice.charge[i] for j in range(3): self.pos[3 * i + j] = lattice.pos[3 * i + j] self.minPos[0] = lattice.minPos[0] self.minPos[1] = lattice.minPos[1] self.minPos[2] = lattice.minPos[2] self.maxPos[0] = lattice.maxPos[0] self.maxPos[1] = lattice.maxPos[1] self.maxPos[2] = lattice.maxPos[2] self.scalarsDict = copy.deepcopy(lattice.scalarsDict) self.vectorsDict = copy.deepcopy(lattice.vectorsDict) self.scalarsFiles = copy.deepcopy(lattice.scalarsFiles) self.vectorsFiles = copy.deepcopy(lattice.vectorsFiles) self.attributes = copy.deepcopy(lattice.attributes) self.PBC = copy.deepcopy(lattice.PBC)
mit
-2,791,847,265,881,904,600
32.569328
120
0.553476
false
3.681797
false
false
false
OpenNetworkingFoundation/PIF-Open-Intermediate-Representation
pif_ir/bir/tests/test_parser.py
1
2151
#!/usr/bin/env python import logging import struct from pif_ir.bir.objects.bir_struct import BIRStruct from pif_ir.bir.objects.packet_instance import PacketInstance from pif_ir.bir.utils.bir_parser import BIRParser from test_common import yaml_eth_struct_dict def fail(case): logging.error("Test Case {}: Failed".format(case)) exit(1) logging.basicConfig(level=logging.DEBUG) logging.info("RUNNING TEST: %s" % __file__) eth_data = struct.pack("BBBBBB", 0xAA, 0xAA, 0xAA, 0xAA, 0xAA, 0xAA) eth_data += struct.pack("BBBBBB", 0xBB, 0xBB, 0xBB, 0xBB, 0xBB, 0xBB) eth_data += struct.pack("BB", 0x08, 0x00) ipv4_data = struct.pack("BBBB", 0x40, 0xFF, 0x00, 0x05) ipv4_data += struct.pack("BBBB", 0x11, 0x11, 0x11, 0x11) ipv4_data += struct.pack("BBBB", 0xFE, 0x11, 0x00, 0x00) ipv4_data += struct.pack("BBBB", 0xFF, 0xFF, 0xFF, 0xFF) ipv4_data += struct.pack("BBBB", 0xEE, 0xEE, 0xEE, 0xEE) udp_data = struct.pack("BB", 0x22, 0x22) udp_data += struct.pack("BB", 0x33, 0x33) udp_data += struct.pack("BB", 0x44, 0x44) udp_data += struct.pack("BB", 0x55, 0x55) pkt = bytearray(eth_data + ipv4_data + udp_data) packet = PacketInstance(pkt, {}, None) header = BIRStruct('eth', yaml_eth_struct_dict) parser = BIRParser() if parser.eval_cond("0x800 == 0x8800", header, packet) != False: fail(0) if parser.eval_cond("0x0800 == 0x800", header, packet) != True: fail(1) if parser.eval_cond("0 == 0x0", header, packet) != True: fail(2) if parser.eval_cond("1 == 0x1", header, packet) != True: fail(3) if parser.eval_cond("(10 > 11--)", header, packet) != False: fail(4) if parser.eval_cond("10 >= 11--", header, packet) != True: fail(5) if parser.eval_inst("(~(0xA + 10) & 0xFF)", header, packet) != 235: fail(6) if parser.eval_inst("10++ + 11", header, packet) != 22: fail(7) if parser.eval_cond("(type_ == 0x0800)", header, packet) != True: fail(8) if parser.eval_cond("type_ != 0x0800", header, packet) != False: fail(9) if parser.eval_inst("(type_ + 1) & 0xFF00", header, packet) != 0x0800: fail(10) if parser.eval_inst("type_++ & 0xFF00", header, packet) != 0x0800: fail(11)
apache-2.0
-7,633,070,394,601,520,000
38.833333
76
0.65086
false
2.579137
false
false
false
FedoraScientific/salome-smesh
doc/salome/examples/creating_meshes_ex03.py
1
2218
# Change priority of submeshes in Mesh import salome salome.salome_init() import GEOM from salome.geom import geomBuilder geompy = geomBuilder.New(salome.myStudy) import SMESH, SALOMEDS from salome.smesh import smeshBuilder smesh = smeshBuilder.New(salome.myStudy) Box_1 = geompy.MakeBoxDXDYDZ(200, 200, 200) [Face_1,Face_2,Face_3,Face_4,Face_5,Face_6] = geompy.SubShapeAllSorted(Box_1, geompy.ShapeType["FACE"]) # create Mesh object on Box shape Mesh_1 = smesh.Mesh(Box_1) # assign mesh algorithms Regular_1D = Mesh_1.Segment() Nb_Segments_1 = Regular_1D.NumberOfSegments(20) Nb_Segments_1.SetDistrType( 0 ) MEFISTO_2D = Mesh_1.Triangle() Max_Element_Area_1 = MEFISTO_2D.MaxElementArea(1200) Tetrahedron = Mesh_1.Tetrahedron() Max_Element_Volume_1 = Tetrahedron.MaxElementVolume(40000) # create submesh and assign algorithms on Face_1 Regular_1D_1 = Mesh_1.Segment(geom=Face_1) Nb_Segments_2 = Regular_1D_1.NumberOfSegments(4) Nb_Segments_2.SetDistrType( 0 ) MEFISTO_2D_1 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_1) Length_From_Edges_2D = MEFISTO_2D_1.LengthFromEdges() SubMesh_1 = MEFISTO_2D_1.GetSubMesh() # create submesh and assign algorithms on Face_2 Regular_1D_2 = Mesh_1.Segment(geom=Face_2) Nb_Segments_3 = Regular_1D_2.NumberOfSegments(8) Nb_Segments_3.SetDistrType( 0 ) MEFISTO_2D_2 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_2) Length_From_Edges_2D_1 = MEFISTO_2D_2.LengthFromEdges() SubMesh_2 = MEFISTO_2D_2.GetSubMesh() # create submesh and assign algorithms on Face_3 Regular_1D_3 = Mesh_1.Segment(geom=Face_3) Nb_Segments_4 = Regular_1D_3.NumberOfSegments(12) Nb_Segments_4.SetDistrType( 0 ) MEFISTO_2D_3 = Mesh_1.Triangle(algo=smeshBuilder.MEFISTO,geom=Face_3) Length_From_Edges_2D_2 = MEFISTO_2D_3.LengthFromEdges() SubMesh_3 = MEFISTO_2D_3.GetSubMesh() # check exisiting submesh priority order [ [ SubMesh_1, SubMesh_3, SubMesh_2 ] ] = Mesh_1.GetMeshOrder() # set new submesh order isDone = Mesh_1.SetMeshOrder( [ [ SubMesh_1, SubMesh_2, SubMesh_3 ] ]) # compute mesh isDone = Mesh_1.Compute() # clear mesh result and compute with other submesh order Mesh_1.Clear() isDone = Mesh_1.SetMeshOrder( [ [ SubMesh_2, SubMesh_1, SubMesh_3 ] ]) isDone = Mesh_1.Compute()
lgpl-2.1
-7,202,010,096,023,731,000
34.774194
103
0.756087
false
2.464444
false
true
false
Scriptkiddi/Ankipubsub-Client
pubsub/models/Template.py
1
2259
__author__ = 'fritz' import json from pubsub.database.models import db, AnkiPubSubTemplate from copy import deepcopy class Template(): def __init__(self, name, answer_format, question_format, deck, ord, back_answer_format, back_question_format): try: db.connect() expression = (AnkiPubSubTemplate.answer_format == answer_format) & \ (AnkiPubSubTemplate.name == name) & \ (AnkiPubSubTemplate.question_format == question_format) & \ (AnkiPubSubTemplate.ord == ord) & \ (AnkiPubSubTemplate.back_answer_format == back_answer_format) & \ (AnkiPubSubTemplate.back_question_format == back_question_format) template = AnkiPubSubTemplate.select().where(expression).get() self.remote_id = template.remote_id except AnkiPubSubTemplate.DoesNotExist: self.remote_id = None finally: db.close() self.answer_format = answer_format self.name = name self.question_format = question_format # self.deck = deck self.ord = int(ord) self.back_answer_format = back_answer_format self.back_question_format = back_question_format def json(self): dic = deepcopy(self.__dict__) dic.update({"remote_id": str(self.remote_id)}) return json.dumps(dic) def save(self): db.connect() template, created = AnkiPubSubTemplate.get_or_create(remote_id=self.remote_id, answer_format=self.answer_format, name=self.name, question_format=self.question_format, ord=self.ord, back_answer_format=self.back_answer_format, back_question_format=self.back_question_format) db.close()
gpl-3.0
-4,366,288,978,875,604,000
42.442308
108
0.486056
false
4.806383
false
false
false
google/grr
grr/server/grr_response_server/flow_utils_test.py
1
2306
#!/usr/bin/env python """Tests for flow utils classes.""" from absl import app from grr_response_core.lib import rdfvalue from grr_response_core.lib.rdfvalues import client as rdf_client from grr_response_server import flow_utils from grr.test_lib import flow_test_lib from grr.test_lib import test_lib class TestInterpolatePath(flow_test_lib.FlowTestsBaseclass): """Tests for path interpolation.""" def _MakeKnowledgeBase(self): kb = rdf_client.KnowledgeBase() kb.users.Append( rdf_client.User( username="test", userdomain="TESTDOMAIN", full_name="test user", homedir="c:\\Users\\test", last_logon=rdfvalue.RDFDatetime.FromHumanReadable("2012-11-10"))) kb.users.Append( rdf_client.User( username="test2", userdomain="TESTDOMAIN", full_name="test user 2", homedir="c:\\Users\\test2", last_logon=100)) return kb def testBasicInterpolation(self): """Test Basic.""" kb = self._MakeKnowledgeBase() path = "{systemroot}\\test" new_path = flow_utils.InterpolatePath(path, kb, users=None) self.assertEqual(new_path.lower(), "c:\\windows\\test") new_path = flow_utils.InterpolatePath("{does_not_exist}", kb) self.assertEqual(new_path, "") def testUserInterpolation(self): """User interpolation returns a list of paths.""" kb = self._MakeKnowledgeBase() path = "{homedir}\\dir" new_path = flow_utils.InterpolatePath(path, kb, users=["test"]) self.assertEqual(new_path[0].lower(), "c:\\users\\test\\dir") path = "{systemroot}\\{last_logon}\\dir" new_path = flow_utils.InterpolatePath(path, kb, users=["test"]) self.assertEqual(new_path[0].lower(), "c:\\windows\\2012-11-10 00:00:00\\dir") path = "{homedir}\\a" new_path = flow_utils.InterpolatePath(path, kb, users=["test", "test2"]) self.assertLen(new_path, 2) self.assertEqual(new_path[0].lower(), "c:\\users\\test\\a") self.assertEqual(new_path[1].lower(), "c:\\users\\test2\\a") new_path = flow_utils.InterpolatePath( "{does_not_exist}", kb, users=["test"]) self.assertEqual(new_path, []) def main(argv): test_lib.main(argv) if __name__ == "__main__": app.run(main)
apache-2.0
2,136,674,623,070,125,000
31.027778
77
0.630095
false
3.436662
true
false
false
texastribune/tt_disposal_wells
example/example/settings.py
1
5210
# Django settings for example project. # Setup a ``project_dir`` function import os from dj_settings_helpers import create_project_dir project_dir = create_project_dir(os.path.join(os.path.dirname(__file__), '..', '..')) DEBUG = True TEMPLATE_DEBUG = DEBUG ADMINS = ( # ('Your Name', 'your_email@example.com'), ) MANAGERS = ADMINS import dj_database_url DATABASE_URL = os.environ.get('DATABASE_URL', 'sqlite:///%s' % project_dir('project.db')) DATABASES = {'default': dj_database_url.parse(DATABASE_URL), } # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/Chicago' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/home/media/media.lawrence.com/media/" MEDIA_ROOT = '' # URL that handles the media served from MEDIA_ROOT. Make sure to use a # trailing slash. # Examples: "http://media.lawrence.com/media/", "http://example.com/media/" MEDIA_URL = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/home/media/media.lawrence.com/static/" STATIC_ROOT = '' # URL prefix for static files. # Example: "http://media.lawrence.com/static/" STATIC_URL = '/static/' # Additional locations of static files STATICFILES_DIRS = ( # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # Make this unique, and don't share it with anybody. SECRET_KEY = 'drlfrd0yo@x^b7z(bnae5q=3bo(od!#5nsm0%l@-^9y=3l@9cu' # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', # Uncomment the next line for simple clickjacking protection: # 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'example.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'example.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. project_dir('templates'), ) INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', # Uncomment the next line to enable the admin: # 'django.contrib.admin', # Uncomment the next line to enable admin documentation: # 'django.contrib.admindocs', # Custom application being tested 'tt_disposal_wells', # The app with all of the tests and any example customizations 'example_usage', ) # A sample logging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } }
apache-2.0
-4,731,053,268,762,224,000
31.974684
88
0.702495
false
3.661279
false
false
false
eyaler/tensorpack
tensorpack/utils/palette.py
1
1914
# -*- coding: utf-8 -*- # File: palette.py import numpy as np __all__ = ['PALETTE_RGB'] # copied from https://stackoverflow.com/questions/2328339/how-to-generate-n-different-colors-for-any-natural-number-n PALETTE_HEX = [ "#000000", "#FFFF00", "#1CE6FF", "#FF34FF", "#FF4A46", "#008941", "#006FA6", "#A30059", "#FFDBE5", "#7A4900", "#0000A6", "#63FFAC", "#B79762", "#004D43", "#8FB0FF", "#997D87", "#5A0007", "#809693", "#FEFFE6", "#1B4400", "#4FC601", "#3B5DFF", "#4A3B53", "#FF2F80", "#61615A", "#BA0900", "#6B7900", "#00C2A0", "#FFAA92", "#FF90C9", "#B903AA", "#D16100", "#DDEFFF", "#000035", "#7B4F4B", "#A1C299", "#300018", "#0AA6D8", "#013349", "#00846F", "#372101", "#FFB500", "#C2FFED", "#A079BF", "#CC0744", "#C0B9B2", "#C2FF99", "#001E09", "#00489C", "#6F0062", "#0CBD66", "#EEC3FF", "#456D75", "#B77B68", "#7A87A1", "#788D66", "#885578", "#FAD09F", "#FF8A9A", "#D157A0", "#BEC459", "#456648", "#0086ED", "#886F4C", "#34362D", "#B4A8BD", "#00A6AA", "#452C2C", "#636375", "#A3C8C9", "#FF913F", "#938A81", "#575329", "#00FECF", "#B05B6F", "#8CD0FF", "#3B9700", "#04F757", "#C8A1A1", "#1E6E00", "#7900D7", "#A77500", "#6367A9", "#A05837", "#6B002C", "#772600", "#D790FF", "#9B9700", "#549E79", "#FFF69F", "#201625", "#72418F", "#BC23FF", "#99ADC0", "#3A2465", "#922329", "#5B4534", "#FDE8DC", "#404E55", "#0089A3", "#CB7E98", "#A4E804", "#324E72", "#6A3A4C", "#83AB58", "#001C1E", "#D1F7CE", "#004B28", "#C8D0F6", "#A3A489", "#806C66", "#222800", "#BF5650", "#E83000", "#66796D", "#DA007C", "#FF1A59", "#8ADBB4", "#1E0200", "#5B4E51", "#C895C5", "#320033", "#FF6832", "#66E1D3", "#CFCDAC", "#D0AC94", "#7ED379", "#012C58"] def _parse_hex_color(s): r = int(s[1:3], 16) g = int(s[3:5], 16) b = int(s[5:7], 16) return (r, g, b) PALETTE_RGB = np.asarray( list(map(_parse_hex_color, PALETTE_HEX)), dtype='int32')
apache-2.0
-6,845,373,950,139,188,000
49.368421
117
0.541797
false
2.117257
false
false
false
google-research/google-research
tf3d/instance_segmentation/model_utils.py
1
4801
# coding=utf-8 # Copyright 2021 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Instance segmentation model utility functions.""" import tensorflow as tf from tf3d import standard_fields from tf3d.instance_segmentation import postprocessor from tf3d.utils import mask_utils from tf3d.utils import voxel_utils def mask_valid_voxels(inputs, outputs): """Mask the voxels that are valid and in image view.""" valid_mask = mask_utils.num_voxels_mask(inputs=inputs) mask_utils.apply_mask_to_output_voxel_tensors( outputs=outputs, valid_mask=valid_mask) def mask_valid_points(inputs, outputs): """Mask the voxels that are valid and in image view.""" valid_mask = mask_utils.num_points_mask(inputs=inputs) mask_utils.apply_mask_to_output_point_tensors( outputs=outputs, valid_mask=valid_mask) def postprocess(inputs, outputs, is_training, num_furthest_voxel_samples, sampler_score_vs_distance_coef, embedding_similarity_strategy, embedding_similarity_threshold, score_threshold, apply_nms, nms_iou_threshold): """Post-processor function. Args: inputs: A dictionary containing input tensors. outputs: A dictionary containing predicted tensors. is_training: If during training stage or not. num_furthest_voxel_samples: Number of voxels to be sampled using furthest voxel sampling in the postprocessor. sampler_score_vs_distance_coef: The coefficient that balances the weight between furthest voxel sampling and highest score sampling in the postprocessor. embedding_similarity_strategy: Embedding similarity strategy. embedding_similarity_threshold: Similarity threshold used to decide if two point embedding vectors belong to the same instance. score_threshold: Instance score threshold used throughout postprocessing. apply_nms: If True, it will apply non-maximum suppression to the final predictions. nms_iou_threshold: Intersection over union threshold used in non-maximum suppression. """ if not is_training: # Squeeze output voxel properties. for key in standard_fields.get_output_voxel_fields(): if key in outputs and outputs[key] is not None: outputs[key] = tf.squeeze(outputs[key], axis=0) # Squeeze output point properties. for key in standard_fields.get_output_point_fields(): if key in outputs and outputs[key] is not None: outputs[key] = tf.squeeze(outputs[key], axis=0) # Squeeze output object properties. for key in standard_fields.get_output_object_fields(): if key in outputs and outputs[key] is not None: outputs[key] = tf.squeeze(outputs[key], axis=0) # Mask the valid voxels mask_valid_voxels(inputs=inputs, outputs=outputs) # Mask the valid points mask_valid_points(inputs=inputs, outputs=outputs) # NMS postprocessor.postprocess( outputs=outputs, num_furthest_voxel_samples=num_furthest_voxel_samples, sampler_score_vs_distance_coef=sampler_score_vs_distance_coef, embedding_similarity_strategy=embedding_similarity_strategy, embedding_similarity_threshold=embedding_similarity_threshold, apply_nms=apply_nms, nms_score_threshold=score_threshold, nms_iou_threshold=nms_iou_threshold) # Add instance segment point masks at eval time if standard_fields.InputDataFields.points_to_voxel_mapping in inputs: instance_segments_point_mask = ( voxel_utils.sparse_voxel_grid_to_pointcloud( voxel_features=tf.expand_dims( tf.transpose(outputs[standard_fields.DetectionResultFields .instance_segments_voxel_mask]), axis=0), segment_ids=inputs[ standard_fields.InputDataFields.points_to_voxel_mapping], num_valid_voxels=inputs[ standard_fields.InputDataFields.num_valid_voxels], num_valid_points=inputs[ standard_fields.InputDataFields.num_valid_points])) outputs[standard_fields.DetectionResultFields .instance_segments_point_mask] = tf.transpose( tf.squeeze(instance_segments_point_mask, axis=0))
apache-2.0
5,561,826,398,023,108,000
41.486726
78
0.705478
false
3.954695
false
false
false
simbtrix/mxnix
project/crossSectionView/rectangleView.py
1
4807
''' Created on 14.03.2016 @author: mkennert ''' from kivy.properties import NumericProperty from kivy.uix.gridlayout import GridLayout from crossSectionView.aview import AView from ownComponents.design import Design from ownComponents.ownGraph import OwnGraph from plot.dashedLine import DashedLine from plot.line import LinePlot class CSRectangleView(GridLayout, AView): ''' the class CSRectangleView was developed to show the rectangle-shape of the cross-section ''' # height of the cross-section ch = NumericProperty(0.5) # width of the cross-section cw = NumericProperty(0.25) ''' constructor ''' def __init__(self, **kwargs): super(CSRectangleView, self).__init__(**kwargs) self.cols = 1 ''' the method create_graph create the graph, where you can add the layers. the method should be called only once at the beginning ''' def create_graph(self): self.epsX = self.cw / 2e1 self.graph = OwnGraph(xlabel=self.xlabelStr, ylabel=self.ylabelStr, x_ticks_major=0.05, y_ticks_major=0.05, y_grid_label=True, x_grid_label=True, padding=5, xmin=0, xmax=self.cw + 2 * self.epsX, ymin=0, ymax=1.04 * self.ch) self.add_widget(self.graph) self.p = LinePlot(color=[0, 0, 0]) self.p.points = self.draw_rectangle() self.graph.add_plot(self.p) ''' the method add_layer was developed to add new layer at the cross section ''' def add_layer(self, y, csArea, material): #if the y-coordinate is out of range if y >= self.ch or y <= 0: self.csShape.show_error_message() else: line = DashedLine(color=[1, 0, 0, 1], points=[(self.epsX, y), (self.cw + self.epsX, y)]) self.create_layer(y, csArea, self.cw, material, line) ''' edit a layer which is already exist ''' def edit_layer(self, y, material, csArea): #if the y-coordinate is out of range if y >= self.ch or y <= 0: self.csShape.show_error_message() else: self.focusLayer.line.points = [(self.epsX, y), (self.cw + self.epsX, y)] self.update_layer_properties(y, material, csArea) ''' add a bar to the cross-section ''' def add_bar(self, x, y, csArea, material): epsY = self.ch / Design.barProcent epsX = self.cw / Design.barProcent #if the coordinates are out of range if y + epsY > self.ch or y - epsY < 0 or x + epsX > self.cw + self.epsX or x - epsX < self.epsX: self.csShape.show_error_message() else: self.create_bar(x, y, csArea, material, epsX, epsY) ''' edit a bar which is already exist ''' def edit_bar(self, x, y, csArea, material): epsY = self.ch / Design.barProcent epsX = self.cw / Design.barProcent #if the coordinates are out of range if y + epsY > self.ch or y - epsY < 0 or x + epsX > self.cw + self.epsX or x - epsX < self.epsX: self.csShape.show_error_message() else: self.update_bar_properties(x, y, csArea, material, epsX, epsY) ''' the method update_height change the height of the cross section shape and update the layers ''' def update_height(self, value): self.ch = value self.graph.y_ticks_major = value / 5. self.graph.ymax = self.ch * 1.04 self.p.points = self.draw_rectangle() self.delete_reinforcement() ''' the method update_width change the width of the cross section shape and update the layers ''' def update_width(self, value): self.cw = value self.epsX = self.cw / 2e1 self.graph.x_ticks_major = value / 5. self.graph.xmax = self.cw + 2 * self.epsX self.p.points = self.draw_rectangle() self.delete_reinforcement() ''' give the user the possibility to focus a layer or a bar ''' def on_touch_down(self, touch): x0, y0 = self.graph._plot_area.pos # position of the lowerleft gw, gh = self.graph._plot_area.size # graph size x = (touch.x - x0) / gw * (self.cw + 2 * self.epsX) y = (touch.y - y0) / gh * self.graph.ymax self.touch_reaction(x, y, self.cw, self.ch) ''' draw the rectangle ''' def draw_rectangle(self): return [(self.epsX, 0), (self.epsX, self.ch), (self.cw + self.epsX, self.ch), (self.cw + self.epsX, 0), (self.epsX, 0)]
gpl-3.0
8,672,656,484,019,362,000
31.158621
127
0.564801
false
3.550222
false
false
false
WillisXChen/django-oscar
oscar/lib/python2.7/site-packages/phonenumbers/shortnumberinfo.py
1
15445
"""Methods for getting information about short phone numbers, such as short codes and emergency numbers. Note most commercial short numbers are not handled here, but by phonenumberutil.py """ # Based on original Java code: # java/src/com/google/i18n/phonenumbers/ShortNumberInfo.java # Copyright (C) 2013 The Libphonenumber Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re from .re_util import fullmatch from .util import U_EMPTY_STRING from .phonemetadata import PhoneMetadata from .phonenumberutil import _extract_possible_number, _PLUS_CHARS_PATTERN from .phonenumberutil import normalize_digits_only, region_codes_for_country_code from .phonenumberutil import national_significant_number from .phonenumberutil import _is_number_possible_for_desc, _is_number_matching_desc # In these countries, if extra digits are added to an emergency number, it no longer connects # to the emergency service. _REGIONS_WHERE_EMERGENCY_NUMBERS_MUST_BE_EXACT = set(["BR", "CL", "NI"]) class ShortNumberCost(object): """Cost categories of short numbers.""" TOLL_FREE = 0 STANDARD_RATE = 1 PREMIUM_RATE = 2 UNKNOWN_COST = 3 def is_possible_short_number_for_region(short_number, region_dialing_from): """Check whether a short number is a possible number when dialled from a region, given the number in the form of a string, and the region where the number is dialed from. This provides a more lenient check than is_valid_short_number_for_region. Arguments: short_number -- the short number to check as a string region_dialing_from -- the region from which the number is dialed Return whether the number is a possible short number. """ metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: return False general_desc = metadata.general_desc return _is_number_possible_for_desc(short_number, general_desc) def is_possible_short_number(numobj): """Check whether a short number is a possible number. If a country calling code is shared by multiple regions, this returns True if it's possible in any of them. This provides a more lenient check than is_valid_short_number. Arguments: numobj -- the short number to check Return whether the number is a possible short number. """ region_codes = region_codes_for_country_code(numobj.country_code) short_number = national_significant_number(numobj) for region in region_codes: metadata = PhoneMetadata.short_metadata_for_region(region) if _is_number_possible_for_desc(short_number, metadata.general_desc): return True return False def is_valid_short_number_for_region(short_number, region_dialing_from): """Tests whether a short number matches a valid pattern in a region. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. Arguments: short_number -- the short number to check as a string region_dialing_from -- the region from which the number is dialed Return whether the short number matches a valid pattern """ metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: return False general_desc = metadata.general_desc if (general_desc.national_number_pattern is None or not _is_number_matching_desc(short_number, general_desc)): return False short_number_desc = metadata.short_code if short_number_desc.national_number_pattern is None: # pragma no cover return False return _is_number_matching_desc(short_number, short_number_desc) def is_valid_short_number(numobj): """Tests whether a short number matches a valid pattern. If a country calling code is shared by multiple regions, this returns True if it's valid in any of them. Note that this doesn't verify the number is actually in use, which is impossible to tell by just looking at the number itself. See is_valid_short_number_for_region for details. Arguments: numobj - the short number for which we want to test the validity Return whether the short number matches a valid pattern """ region_codes = region_codes_for_country_code(numobj.country_code) short_number = national_significant_number(numobj) region_code = _region_code_for_short_number_from_region_list(numobj, region_codes) if len(region_codes) > 1 and region_code is not None: # If a matching region had been found for the phone number from among two or more regions, # then we have already implicitly verified its validity for that region. return True return is_valid_short_number_for_region(short_number, region_code) def expected_cost_for_region(short_number, region_dialing_from): """Gets the expected cost category of a short number when dialled from a region (however, nothing is implied about its validity). If it is important that the number is valid, then its validity must first be checked using is_valid_short_number_for_region. Note that emergency numbers are always considered toll-free. Example usage: short_number = "110" region_code = "FR" if phonenumbers.is_valid_short_number_for_region(short_number, region_code): cost = phonenumbers.expected_cost(short_number, region_code) # ShortNumberCost # Do something with the cost information here. Arguments: short_number -- the short number for which we want to know the expected cost category region_dialing_from -- the region from which the number is dialed Return the expected cost category for that region of the short number. Returns UNKNOWN_COST if the number does not match a cost category. Note that an invalid number may match any cost category. """ # Note that region_dialing_from may be None, in which case metadata will also be None. metadata = PhoneMetadata.short_metadata_for_region(region_dialing_from) if metadata is None: return ShortNumberCost.UNKNOWN_COST # The cost categories are tested in order of decreasing expense, since if # for some reason the patterns overlap the most expensive matching cost # category should be returned. if _is_number_matching_desc(short_number, metadata.premium_rate): return ShortNumberCost.PREMIUM_RATE if _is_number_matching_desc(short_number, metadata.standard_rate): return ShortNumberCost.STANDARD_RATE if _is_number_matching_desc(short_number, metadata.toll_free): return ShortNumberCost.TOLL_FREE if is_emergency_number(short_number, region_dialing_from): # Emergency numbers are implicitly toll-free. return ShortNumberCost.TOLL_FREE return ShortNumberCost.UNKNOWN_COST def expected_cost(numobj): """Gets the expected cost category of a short number (however, nothing is implied about its validity). If the country calling code is unique to a region, this method behaves exactly the same as get_expected_cost_for_region. However, if the country calling code is shared by multiple regions, then it returns the highest cost in the sequence PREMIUM_RATE, UNKNOWN_COST, STANDARD_RATE, TOLL_FREE. The reason for the position of UNKNOWN_COST in this order is that if a number is UNKNOWN_COST in one region but STANDARD_RATE or TOLL_FREE in another, its expected cost cannot be estimated as one of the latter since it might be a PREMIUM_RATE number. For example, if a number is STANDARD_RATE in the US, but TOLL_FREE in Canada, the expected cost returned by this method will be STANDARD_RATE, since the NANPA countries share the same country calling code. Note: If the region from which the number is dialed is known, it is highly preferable to call expected_cost_for_region instead. Arguments: numobj -- the short number for which we want to know the expected cost category Return the highest expected cost category of the short number in the region(s) with the given country calling code """ region_codes = region_codes_for_country_code(numobj.country_code) if len(region_codes) == 0: return ShortNumberCost.UNKNOWN_COST short_number = national_significant_number(numobj) if len(region_codes) == 1: return expected_cost_for_region(short_number, region_codes[0]) cost = ShortNumberCost.TOLL_FREE for region_code in region_codes: cost_for_region = expected_cost_for_region(short_number, region_code) if cost_for_region == ShortNumberCost.PREMIUM_RATE: return ShortNumberCost.PREMIUM_RATE elif cost_for_region == ShortNumberCost.UNKNOWN_COST: return ShortNumberCost.UNKNOWN_COST elif cost_for_region == ShortNumberCost.STANDARD_RATE: if cost != ShortNumberCost.UNKNOWN_COST: cost = ShortNumberCost.STANDARD_RATE elif cost_for_region == ShortNumberCost.TOLL_FREE: # Do nothing pass else: # pragma no cover raise Exception("Unrecognized cost for region: %s", cost_for_region) return cost def _region_code_for_short_number_from_region_list(numobj, region_codes): """Helper method to get the region code for a given phone number, from a list of possible region codes. If the list contains more than one region, the first region for which the number is valid is returned. """ if len(region_codes) == 0: return None elif len(region_codes) == 1: return region_codes[0] national_number = national_significant_number(numobj) for region_code in region_codes: metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is not None and _is_number_matching_desc(national_number, metadata.short_code): # The number is valid for this region. return region_code return None def _example_short_number(region_code): """Gets a valid short number for the specified region. Arguments: region_code -- the region for which an example short number is needed. Returns a valid short number for the specified region. Returns an empty string when the metadata does not contain such information. """ metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = metadata.short_code if desc.example_number is not None: return desc.example_number return U_EMPTY_STRING def _example_short_number_for_cost(region_code, cost): """Gets a valid short number for the specified cost category. Arguments: region_code -- the region for which an example short number is needed. cost -- the cost category of number that is needed. Returns a valid short number for the specified region and cost category. Returns an empty string when the metadata does not contain such information, or the cost is UNKNOWN_COST. """ metadata = PhoneMetadata.short_metadata_for_region(region_code) if metadata is None: return U_EMPTY_STRING desc = None if cost == ShortNumberCost.TOLL_FREE: desc = metadata.toll_free elif cost == ShortNumberCost.STANDARD_RATE: desc = metadata.standard_rate elif cost == ShortNumberCost.PREMIUM_RATE: desc = metadata.premium_rate else: # ShortNumberCost.UNKNOWN_COST numbers are computed by the process of # elimination from the other cost categoried. pass if desc is not None and desc.example_number is not None: return desc.example_number return U_EMPTY_STRING def connects_to_emergency_number(number, region_code): """Returns whether the number might be used to connect to an emergency service in the given region. This function takes into account cases where the number might contain formatting, or might have additional digits appended (when it is okay to do that in the region specified). Arguments: number -- The phone number to test. region_code -- The region where the phone number is being dialed. Returns whether the number might be used to connect to an emergency service in the given region. """ return _matches_emergency_number_helper(number, region_code, True) # Allows prefix match def is_emergency_number(number, region_code): """Returns true if the number exactly matches an emergency service number in the given region. This method takes into account cases where the number might contain formatting, but doesn't allow additional digits to be appended. Arguments: number -- The phone number to test. region_code -- The region where the phone number is being dialed. Returns if the number exactly matches an emergency services number in the given region. """ return _matches_emergency_number_helper(number, region_code, False) # Doesn't allow prefix match def _matches_emergency_number_helper(number, region_code, allow_prefix_match): number = _extract_possible_number(number) if _PLUS_CHARS_PATTERN.match(number): # Returns False if the number starts with a plus sign. We don't # believe dialing the country code before emergency numbers # (e.g. +1911) works, but later, if that proves to work, we can add # additional logic here to handle it. return False metadata = PhoneMetadata.short_metadata_for_region(region_code.upper(), None) if metadata is None or metadata.emergency is None: return False emergency_number_pattern = re.compile(metadata.emergency.national_number_pattern) normalized_number = normalize_digits_only(number) if not allow_prefix_match or region_code in _REGIONS_WHERE_EMERGENCY_NUMBERS_MUST_BE_EXACT: return fullmatch(emergency_number_pattern, normalized_number) is not None else: return emergency_number_pattern.match(normalized_number) is not None def is_carrier_specific(numobj): """Given a valid short number, determines whether it is carrier-specific (however, nothing is implied about its validity). If it is important that the number is valid, then its validity must first be checked using is_valid_short_number or is_valid_short_number_for_region. Arguments: numobj -- the valid short number to check Returns whether the short number is carrier-specific (assuming the input was a valid short number). """ region_codes = region_codes_for_country_code(numobj.country_code) region_code = _region_code_for_short_number_from_region_list(numobj, region_codes) national_number = national_significant_number(numobj) metadata = PhoneMetadata.short_metadata_for_region(region_code) return (metadata is not None and _is_number_matching_desc(national_number, metadata.carrier_specific))
bsd-3-clause
3,237,084,498,279,031,300
41.665746
106
0.722888
false
3.990956
true
false
false
tomdyson/wagtail-modeltranslation
wagtail_modeltranslation/fields.py
1
16968
# -*- coding: utf-8 -*- from django import forms from django.core.exceptions import ImproperlyConfigured from django.db.models import fields from wagtail.wagtailcore.fields import StreamField from django.utils import six from wagtail_modeltranslation import settings as mt_settings from wagtail_modeltranslation.utils import ( get_language, build_localized_fieldname, build_localized_verbose_name, resolution_order) from wagtail_modeltranslation.widgets import ClearableWidgetWrapper SUPPORTED_FIELDS = ( fields.CharField, # Above implies also CommaSeparatedIntegerField, EmailField, FilePathField, SlugField # and URLField as they are subclasses of CharField. fields.TextField, fields.IntegerField, # Above implies also BigIntegerField, SmallIntegerField, PositiveIntegerField and # PositiveSmallIntegerField, as they are subclasses of IntegerField. fields.BooleanField, fields.NullBooleanField, fields.FloatField, fields.DecimalField, fields.IPAddressField, fields.GenericIPAddressField, fields.DateField, fields.DateTimeField, fields.TimeField, fields.files.FileField, fields.files.ImageField, fields.related.ForeignKey, # Above implies also OneToOneField # Wagtail StreamField StreamField ) class NONE: """ Used for fallback options when they are not provided (``None`` can be given as a fallback or undefined value) or to mark that a nullable value is not yet known and needs to be computed (e.g. field default). """ pass def create_translation_field(model, field_name, lang, empty_value): """ Translation field factory. Returns a ``TranslationField`` based on a fieldname and a language. The list of supported fields can be extended by defining a tuple of field names in the projects settings.py like this:: MODELTRANSLATION_CUSTOM_FIELDS = ('MyField', 'MyOtherField',) If the class is neither a subclass of fields in ``SUPPORTED_FIELDS``, nor in ``CUSTOM_FIELDS`` an ``ImproperlyConfigured`` exception will be raised. """ if empty_value not in ('', 'both', None, NONE): raise ImproperlyConfigured('%s is not a valid empty_value.' % empty_value) field = model._meta.get_field(field_name) cls_name = field.__class__.__name__ if not (isinstance(field, SUPPORTED_FIELDS) or cls_name in mt_settings.CUSTOM_FIELDS): raise ImproperlyConfigured( '%s is not supported by modeltranslation.' % cls_name) translation_class = field_factory(field.__class__) return translation_class(translated_field=field, language=lang, empty_value=empty_value) def field_factory(baseclass): class TranslationFieldSpecific(TranslationField, baseclass): pass # Reflect baseclass name of returned subclass TranslationFieldSpecific.__name__ = 'Translation%s' % baseclass.__name__ return TranslationFieldSpecific class TranslationField(object): """ The translation field functions as a proxy to the original field which is wrapped. For every field defined in the model's ``TranslationOptions`` localized versions of that field are added to the model depending on the languages given in ``settings.LANGUAGES``. If for example there is a model ``News`` with a field ``title`` which is registered for translation and the ``settings.LANGUAGES`` contains the ``de`` and ``en`` languages, the fields ``title_de`` and ``title_en`` will be added to the model class. These fields are realized using this descriptor. The translation field needs to know which language it contains therefore that needs to be specified when the field is created. """ def __init__(self, translated_field, language, empty_value, *args, **kwargs): from wagtail_modeltranslation.translator import translator # Update the dict of this field with the content of the original one # This might be a bit radical?! Seems to work though... self.__dict__.update(translated_field.__dict__) # Store the originally wrapped field for later self.translated_field = translated_field self.language = language self.empty_value = empty_value if empty_value is NONE: self.empty_value = None if translated_field.null else '' # Default behaviour is that all translations are optional if not isinstance(self, fields.BooleanField): # TODO: Do we really want to enforce null *at all*? Shouldn't this # better honour the null setting of the translated field? self.null = True self.blank = True # Take required_languages translation option into account trans_opts = translator.get_options_for_model(self.model) if trans_opts.required_languages: required_languages = trans_opts.required_languages if isinstance(trans_opts.required_languages, (tuple, list)): # All fields if self.language in required_languages: # self.null = False self.blank = False else: # Certain fields only # Try current language - if not present, try 'default' key try: req_fields = required_languages[self.language] except KeyError: req_fields = required_languages.get('default', ()) if self.name in req_fields: # TODO: We might have to handle the whole thing through the # FieldsAggregationMetaClass, as fields can be inherited. # self.null = False self.blank = False # Adjust the name of this field to reflect the language self.attname = build_localized_fieldname(self.translated_field.name, language) self.name = self.attname if self.translated_field.db_column: self.db_column = build_localized_fieldname(self.translated_field.db_column, language) self.column = self.db_column # Copy the verbose name and append a language suffix # (will show up e.g. in the admin). self.verbose_name = build_localized_verbose_name(translated_field.verbose_name, language) # ForeignKey support - rewrite related_name if self.rel and self.related and not self.rel.is_hidden(): import copy current = self.related.get_accessor_name() self.rel = copy.copy(self.rel) # Since fields cannot share the same rel object. # self.related doesn't need to be copied, as it will be recreated in # ``RelatedField.do_related_class`` if self.rel.related_name is None: # For implicit related_name use different query field name loc_related_query_name = build_localized_fieldname( self.related_query_name(), self.language) self.related_query_name = lambda: loc_related_query_name self.rel.related_name = build_localized_fieldname(current, self.language) self.rel.field = self # Django 1.6 if hasattr(self.rel.to._meta, '_related_objects_cache'): del self.rel.to._meta._related_objects_cache # Django 1.5 changed definition of __hash__ for fields to be fine with hash requirements. # It spoiled our machinery, since TranslationField has the same creation_counter as its # original field and fields didn't get added to sets. # So here we override __eq__ and __hash__ to fix the issue while retaining fine with # http://docs.python.org/2.7/reference/datamodel.html#object.__hash__ def __eq__(self, other): if isinstance(other, fields.Field): return (self.creation_counter == other.creation_counter and self.language == getattr(other, 'language', None)) return super(TranslationField, self).__eq__(other) def __ne__(self, other): return not self.__eq__(other) def __hash__(self): return hash((self.creation_counter, self.language)) def formfield(self, *args, **kwargs): """ Returns proper formfield, according to empty_values setting (only for ``forms.CharField`` subclasses). There are 3 different formfields: - CharField that stores all empty values as empty strings; - NullCharField that stores all empty values as None (Null); - NullableField that can store both None and empty string. By default, if no empty_values was specified in model's translation options, NullCharField would be used if the original field is nullable, CharField otherwise. This can be overridden by setting empty_values to '' or None. Setting 'both' will result in NullableField being used. Textual widgets (subclassing ``TextInput`` or ``Textarea``) used for nullable fields are enriched with a clear checkbox, allowing ``None`` values to be preserved rather than saved as empty strings. The ``forms.CharField`` somewhat surprising behaviour is documented as a "won't fix": https://code.djangoproject.com/ticket/9590. """ formfield = super(TranslationField, self).formfield(*args, **kwargs) if isinstance(formfield, forms.CharField): if self.empty_value is None: from wagtail_modeltranslation.forms import NullCharField form_class = formfield.__class__ kwargs['form_class'] = type( 'Null%s' % form_class.__name__, (NullCharField, form_class), {}) formfield = super(TranslationField, self).formfield(*args, **kwargs) elif self.empty_value == 'both': from wagtail_modeltranslation.forms import NullableField form_class = formfield.__class__ kwargs['form_class'] = type( 'Nullable%s' % form_class.__name__, (NullableField, form_class), {}) formfield = super(TranslationField, self).formfield(*args, **kwargs) if isinstance(formfield.widget, (forms.TextInput, forms.Textarea)): formfield.widget = ClearableWidgetWrapper(formfield.widget) return formfield def save_form_data(self, instance, data, check=True): # Allow 3rd-party apps forms to be saved using only translated field name. # When translated field (e.g. 'name') is specified and translation field (e.g. 'name_en') # not, we assume that form was saved without knowledge of modeltranslation and we make # things right: # Translated field is saved first, settings respective translation field value. Then # translation field is being saved without value - and we handle this here (only for # active language). # Questionable fields are stored in special variable, which is later handled by clean_fields # method on the model. if check and self.language == get_language() and getattr(instance, self.name) and not data: if not hasattr(instance, '_mt_form_pending_clear'): instance._mt_form_pending_clear = {} instance._mt_form_pending_clear[self.name] = data else: super(TranslationField, self).save_form_data(instance, data) def deconstruct(self): name, path, args, kwargs = self.translated_field.deconstruct() if self.null is True: kwargs.update({'null': True}) if 'db_column' in kwargs: kwargs['db_column'] = self.db_column return six.text_type(self.name), path, args, kwargs def south_field_triple(self): """ Returns a suitable description of this field for South. """ # We'll just introspect the _actual_ field. from south.modelsinspector import introspector try: # Check if the field provides its own 'field_class': field_class = self.translated_field.south_field_triple()[0] except AttributeError: field_class = '%s.%s' % (self.translated_field.__class__.__module__, self.translated_field.__class__.__name__) args, kwargs = introspector(self) # That's our definition! return (field_class, args, kwargs) class TranslationFieldDescriptor(object): """ A descriptor used for the original translated field. """ def __init__(self, field, fallback_languages=None, fallback_value=NONE, fallback_undefined=NONE): """ Stores fallback options and the original field, so we know it's name and default. """ self.field = field self.fallback_languages = fallback_languages self.fallback_value = fallback_value self.fallback_undefined = fallback_undefined def __set__(self, instance, value): """ Updates the translation field for the current language. """ if getattr(instance, '_mt_init', False): # When assignment takes place in model instance constructor, don't set value. # This is essential for only/defer to work, but I think it's sensible anyway. return loc_field_name = build_localized_fieldname(self.field.name, get_language()) setattr(instance, loc_field_name, value) def meaningful_value(self, val, undefined): """ Check if val is considered non-empty. """ if isinstance(val, fields.files.FieldFile): return val.name and not ( isinstance(undefined, fields.files.FieldFile) and val == undefined) return val is not None and val != undefined def __get__(self, instance, owner): """ Returns value from the translation field for the current language, or value for some another language according to fallback languages, or the custom fallback value, or field's default value. """ if instance is None: return self default = NONE undefined = self.fallback_undefined if undefined is NONE: default = self.field.get_default() undefined = default langs = resolution_order(get_language(), self.fallback_languages) for lang in langs: loc_field_name = build_localized_fieldname(self.field.name, lang) val = getattr(instance, loc_field_name, None) if self.meaningful_value(val, undefined): return val if mt_settings.ENABLE_FALLBACKS and self.fallback_value is not NONE: return self.fallback_value else: if default is NONE: default = self.field.get_default() # Some fields like FileField behave strange, as their get_default() doesn't return # instance of attr_class, but rather None or ''. # Normally this case is handled in the descriptor, but since we have overridden it, we # must mock it up. if (isinstance(self.field, fields.files.FileField) and not isinstance(default, self.field.attr_class)): return self.field.attr_class(instance, self.field, default) return default class TranslatedRelationIdDescriptor(object): """ A descriptor used for the original '_id' attribute of a translated ForeignKey field. """ def __init__(self, field_name, fallback_languages): self.field_name = field_name # The name of the original field (excluding '_id') self.fallback_languages = fallback_languages def __set__(self, instance, value): lang = get_language() loc_field_name = build_localized_fieldname(self.field_name, lang) # Localized field name with '_id' loc_attname = instance._meta.get_field(loc_field_name).get_attname() setattr(instance, loc_attname, value) def __get__(self, instance, owner): if instance is None: return self langs = resolution_order(get_language(), self.fallback_languages) for lang in langs: loc_field_name = build_localized_fieldname(self.field_name, lang) # Localized field name with '_id' loc_attname = instance._meta.get_field(loc_field_name).get_attname() val = getattr(instance, loc_attname, None) if val is not None: return val return None class LanguageCacheSingleObjectDescriptor(object): """ A Mixin for RelatedObjectDescriptors which use current language in cache lookups. """ accessor = None # needs to be set on instance @property def cache_name(self): lang = get_language() cache = build_localized_fieldname(self.accessor, lang) return "_%s_cache" % cache
bsd-3-clause
1,340,835,948,762,362,000
43.1875
100
0.643388
false
4.546624
false
false
false
burrsettles/ml-talks-duolingo
03_clustering/voting_em.py
1
3646
""" Burr Settles Duolingo ML Dev Talk #3: Clustering EM-GMM (expectaction maximization with Gaussian mixture models) clustering example using scikit-learn. """ import argparse import math import json import numpy as np from bs4 import BeautifulSoup from sklearn.mixture import GaussianMixture # cluster colors (for map visualizations, up to 8) COLORS = '#56A9F6 #73BE49 #F4D23E #F18E2E #EA5E5B #B26EDF #DDDEE0 #53585F'.split() def hex_to_rgb(value): """Return (red, green, blue) for the color given as #rrggbb.""" value = value.lstrip('#') lv = len(value) return tuple(int(value[i:i + lv // 3], 16) for i in range(0, lv, lv // 3)) def rgb_to_hex(red, green, blue): """Return color as #rrggbb for the given color values.""" return '#%02x%02x%02x' % (red, green, blue) def read_vote_data(votingfile): features = None states = [] abbr = [] matrix = [] with open(votingfile, 'rU') as ins: for line in ins: bits = line.strip().split(',') if features is None: features = bits[2:] else: states.append(bits[0]) abbr.append(bits[1]) matrix.append([float(x) for x in bits[2:]]) return features, states, abbr, np.array(matrix) def make_map_file(mapfile, state_cluster_map, num_clusters=None): num_clusters = num_clusters or max(state_cluster_map.values())+1 svg = open(mapfile, 'r').read() soup = BeautifulSoup(svg, "html5lib") paths = soup.findAll('path') for p in paths: if p['id'] in state_cluster_map.keys(): dist = list(state_cluster_map[p['id']]) dist = [math.sqrt(math.sqrt(math.sqrt(math.sqrt(x)))) for x in dist] dist = [x / sum(dist) for x in dist] (r, g, b) = (0., 0., 0.) for i, prob in enumerate(dist): (r_, g_, b_) = hex_to_rgb(COLORS[i]) r += prob * r_ g += prob * g_ b += prob * b_ color = str(rgb_to_hex(r, g, b)) p['style'] = 'fill:%s;display:inline' % color f = open('figs/gmm_%d.svg' % num_clusters,"w") f.write(soup.prettify()) f.close() parser = argparse.ArgumentParser(description='Fit a SpacedRepetitionModel to data.') parser.add_argument('-n', action='store', dest='num_clusters', type=int, default=4, help='number of clusters') if __name__ == '__main__': args = parser.parse_args() features, states, abbr, X = read_vote_data('data/state_vote_data.csv') # cluster the data gmm = GaussianMixture(n_components=args.num_clusters, covariance_type='spherical', max_iter=5, init_params='random', random_state=0).fit(X) # print cluster assignment distributions for each state preds = gmm.predict_proba(X) entropy = 0. for i, st in enumerate(states): print '%s\t%s\t%s' % (abbr[i], '{:<30}'.format(st), str(preds[i])) for x in preds[i]: try: entropy -= x * math.log(x, 2) except: pass entropy /= len(states) print 'entropy:', entropy # print mean values for each cluster for k, c in enumerate(gmm.means_): vector = dict(zip(features, c)) print '\nCLUSTER %d' % k print '\t'.join(['']+[str(x) for x in range(1980,2017,4)]) for party in 'dem rep 3rd'.split(): dat = ['%.2f' % vector['%d_%s' % (year, party)] for year in range(1980,2017,4)] print '\t'.join([party]+dat) # visualize clusters in a map make_map_file('figs/Blank_US_Map_with_borders.svg', dict(zip(abbr, preds)), args.num_clusters)
gpl-3.0
-6,921,857,075,692,596,000
33.396226
143
0.582008
false
3.249554
false
false
false
firestrand/pybrain-gpu
pybraingpu/datasets/supervised.py
1
4176
__author__ = 'Thomas Rueckstiess, ruecksti@in.tum.de' from random import sample from scipy import isscalar from dataset import DataSet from pybraingpu.utilities import fListToString class SupervisedDataSet(DataSet): """SupervisedDataSets have two fields, one for input and one for the target. """ def __init__(self, inp, target): """Initialize an empty supervised dataset. Pass `inp` and `target` to specify the dimensions of the input and target vectors.""" DataSet.__init__(self) if isscalar(inp): # add input and target fields and link them self.addField('input', inp) self.addField('target', target) else: self.setField('input', inp) self.setField('target', target) self.linkFields(['input', 'target']) # reset the index marker self.index = 0 # the input and target dimensions self.indim = self.getDimension('input') self.outdim = self.getDimension('target') def __reduce__(self): _, _, state, _, _ = super(SupervisedDataSet, self).__reduce__() creator = self.__class__ args = self.indim, self.outdim return creator, args, state, iter([]), iter({}) def addSample(self, inp, target): """Add a new sample consisting of `input` and `target`.""" self.appendLinked(inp, target) def getSample(self, index=None): """Return a sample at `index` or the current sample.""" return self.getLinked(index) def setField(self, label, arr, **kwargs): """Set the given array `arr` as the new array of the field specfied by `label`.""" DataSet.setField(self, label, arr, **kwargs) # refresh dimensions, in case any of these fields were modified if label == 'input': self.indim = self.getDimension('input') elif label == 'target': self.outdim = self.getDimension('target') def _provideSequences(self): """Return an iterator over sequence lists, although the dataset contains only single samples.""" return iter(map(lambda x: [x], iter(self))) def evaluateMSE(self, f, **args): """Evaluate the predictions of a function on the dataset and return the Mean Squared Error, incorporating importance.""" ponderation = 0. totalError = 0 for seq in self._provideSequences(): e, p = self._evaluateSequence(f, seq, **args) totalError += e ponderation += p assert ponderation > 0 return totalError / ponderation def _evaluateSequence(self, f, seq, verbose=False): """Return the ponderated MSE over one sequence.""" totalError = 0. ponderation = 0. for input, target in seq: res = f(input) e = 0.5 * sum((target - res).flatten() ** 2) totalError += e ponderation += len(target) if verbose: print 'out: ', fListToString(list(res)) print 'correct:', fListToString(target) print 'error: % .8f' % e return totalError, ponderation def evaluateModuleMSE(self, module, averageOver=1, **args): """Evaluate the predictions of a module on a dataset and return the MSE (potentially average over a number of epochs).""" res = 0. for dummy in range(averageOver): module.reset() res += self.evaluateMSE(module.activate, **args) return res / averageOver def splitWithProportion(self, proportion=0.5): """Produce two new datasets, the first one containing the fraction given by `proportion` of the samples.""" leftIndices = set(sample(range(len(self)), int(len(self) * proportion))) leftDs = self.copy() leftDs.clear() rightDs = leftDs.copy() index = 0 for sp in self: if index in leftIndices: leftDs.addSample(*sp) else: rightDs.addSample(*sp) index += 1 return leftDs, rightDs
bsd-3-clause
1,078,351,008,453,465,500
34.692308
80
0.58501
false
4.184369
false
false
false
GraveRaven/hivemind
hivemindsrc/ants.py
1
15536
#!/bin/env python """ The MIT License Copyright (c) 2010 The Chicago Tribune & Contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ from multiprocessing import Pool import os import re import socket import time import sys IS_PY2 = sys.version_info.major == 2 if IS_PY2: from urllib2 import urlopen, Request from StringIO import StringIO else: from urllib.request import urlopen, Request from io import StringIO import base64 import csv import random import ssl from contextlib import contextmanager import traceback import boto.ec2 import boto.exception import paramiko STATE_FILENAME = os.path.expanduser('~/.ants') # Utilities @contextmanager def _redirect_stdout(outfile=None): save_stdout = sys.stdout sys.stdout = outfile or StringIO() yield sys.stdout = save_stdout def _read_server_list(): instance_ids = [] if not os.path.isfile(STATE_FILENAME): return (None, None, None, None) with open(STATE_FILENAME, 'r') as f: username = f.readline().strip() key_name = f.readline().strip() zone = f.readline().strip() text = f.read() instance_ids = [i for i in text.split('\n') if i != ''] print('Read %i bees from the roster.' % len(instance_ids)) return (username, key_name, zone, instance_ids) def _write_server_list(username, key_name, zone, instances): with open(STATE_FILENAME, 'w') as f: f.write('%s\n' % username) f.write('%s\n' % key_name) f.write('%s\n' % zone) f.write('\n'.join([instance.id for instance in instances])) def _delete_server_list(): os.remove(STATE_FILENAME) def _get_pem_path(key): return os.path.expanduser('~/.ssh/%s.pem' % key) def _get_region(zone): return zone if 'gov' in zone else zone[:-1] # chop off the "d" in the "us-east-1d" to get the "Region" def _get_security_group_id(connection, security_group_name, subnet): if not security_group_name: print('The bees need a security group to run under. Need to open a port from where you are to the target subnet.') return security_groups = connection.get_all_security_groups(filters={'group-name': [security_group_name]}) if not security_groups: print('The bees need a security group to run under. The one specified was not found.') return group = security_groups[0] if security_groups else None return group.id # Methods def up(count, group, zone, image_id, instance_type, username, key_name, subnet, bid = None): """ Startup the load testing server. """ existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list() count = int(count) if existing_username == username and existing_key_name == key_name and existing_zone == zone: ec2_connection = boto.ec2.connect_to_region(_get_region(zone)) existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids) existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations]) # User, key and zone match existing values and instance ids are found on state file if count <= len(existing_instances): # Count is less than the amount of existing instances. No need to create new ones. print('Ants are already assembled and awaiting orders.') return else: # Count is greater than the amount of existing instances. Need to create the only the extra instances. count -= len(existing_instances) elif instance_ids: # Instances found on state file but user, key and/or zone not matching existing value. # State file only stores one user/key/zone config combination so instances are unusable. print('Taking down {} unusable ants.'.format(len(instance_ids))) # Redirect prints in down() to devnull to avoid duplicate messages with _redirect_stdout(): down() # down() deletes existing state file so _read_server_list() returns a blank state existing_username, existing_key_name, existing_zone, instance_ids = _read_server_list() pem_path = _get_pem_path(key_name) if not os.path.isfile(pem_path): print('Warning. No key file found for %s. You will need to add this key to your SSH agent to connect.' % pem_path) print('Connecting to the hive.') try: ec2_connection = boto.ec2.connect_to_region(_get_region(zone)) except boto.exception.NoAuthHandlerFound as e: print("Authenciation config error, perhaps you do not have a ~/.boto file with correct permissions?") print(e.message) return e except Exception as e: print("Unknown error occured:") print(e.message) return e if ec2_connection == None: raise Exception("Invalid zone specified? Unable to connect to region using zone name") groupId = group if subnet is None else _get_security_group_id(ec2_connection, group, subnet) print("GroupId found: %s" % groupId) placement = None if 'gov' in zone else zone print("Placement: %s" % placement) if bid: print('Attempting to call up %i spot ants, this can take a while...' % count) spot_requests = ec2_connection.request_spot_instances( image_id=image_id, price=bid, count=count, key_name=key_name, security_group_ids=[groupId], instance_type=instance_type, placement=placement, subnet_id=subnet) # it can take a few seconds before the spot requests are fully processed time.sleep(5) instances = _wait_for_spot_request_fulfillment(ec2_connection, spot_requests) else: print('Attempting to call up %i ants.' % count) try: reservation = ec2_connection.run_instances( image_id=image_id, min_count=count, max_count=count, key_name=key_name, security_group_ids=[groupId], instance_type=instance_type, placement=placement, subnet_id=subnet) except boto.exception.EC2ResponseError as e: print("Unable to call ants:", e.message) return e instances = reservation.instances if instance_ids: existing_reservations = ec2_connection.get_all_instances(instance_ids=instance_ids) existing_instances = filter(lambda i: i.state == 'running', [r.instances[0] for r in existing_reservations]) map(instances.append, existing_instances) dead_instances = filter(lambda i: i not in [j.id for j in existing_instances], instance_ids) map(instance_ids.pop, [instance_ids.index(i) for i in dead_instances]) print('Waiting for ants to spawn...') instance_ids = instance_ids or [] for instance in [i for i in instances if i.state == 'pending']: instance.update() while instance.state != 'running': print('.') time.sleep(5) instance.update() instance_ids.append(instance.id) print('Ant %s is ready.' % instance.id) ec2_connection.create_tags(instance_ids, { "Name": "an ant!" }) _write_server_list(username, key_name, zone, instances) print('The hive has assembled %i ants.' % len(instances)) def report(): """ Report the status of the load testing servers. """ username, key_name, zone, instance_ids = _read_server_list() if not instance_ids: print('No ants have been mobilized.') return ec2_connection = boto.ec2.connect_to_region(_get_region(zone)) reservations = ec2_connection.get_all_instances(instance_ids=instance_ids) instances = [] for reservation in reservations: instances.extend(reservation.instances) for instance in instances: print('Ant %s: %s @ %s' % (instance.id, instance.state, instance.ip_address)) def down(): """ Shutdown the load testing server. """ username, key_name, zone, instance_ids = _read_server_list() if not instance_ids: print('No ants have been mobilized.') return print('Connecting to the hive.') ec2_connection = boto.ec2.connect_to_region(_get_region(zone)) print('Calling off the hive.') terminated_instance_ids = ec2_connection.terminate_instances( instance_ids=instance_ids) print('Stood down %i ants.' % len(terminated_instance_ids)) _delete_server_list() def _wait_for_spot_request_fulfillment(conn, requests, fulfilled_requests = []): """ Wait until all spot requests are fulfilled. Once all spot requests are fulfilled, return a list of corresponding spot instances. """ if len(requests) == 0: reservations = conn.get_all_instances(instance_ids = [r.instance_id for r in fulfilled_requests]) return [r.instances[0] for r in reservations] else: time.sleep(10) print('.') requests = conn.get_all_spot_instance_requests(request_ids=[req.id for req in requests]) for req in requests: if req.status.code == 'fulfilled': fulfilled_requests.append(req) print("spot ant `{}` joined the hive.".format(req.instance_id)) return _wait_for_spot_request_fulfillment(conn, [r for r in requests if r not in fulfilled_requests], fulfilled_requests) def _execute_order(params): print('Ant %i is joining the hive.' % params['i']) try: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None if not os.path.isfile(pem_path): client.load_system_host_keys() client.connect(params['instance_name'], username=params['username']) else: client.connect( params['instance_name'], username=params['username'], key_filename=pem_path) print('Ant %i is executing order' % params['i']) stdin, stdout, stderr = client.exec_command(params['order']) #response = {} # paramiko's read() returns bytes which need to be converted back to a str #ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8') print(stdout.read().decode('utf-8')) client.close() except socket.error as e: return e except Exception as e: traceback.print_exc() print() raise e def _execute_order_file(params): upload_path = "/tmp/" print('Ant %i is joining the hive.' % params['i']) try: client = paramiko.SSHClient() client.set_missing_host_key_policy(paramiko.AutoAddPolicy()) pem_path = params.get('key_name') and _get_pem_path(params['key_name']) or None if not os.path.isfile(pem_path): client.load_system_host_keys() client.connect(params['instance_name'], username=params['username']) else: client.connect( params['instance_name'], username=params['username'], key_filename=pem_path) order_file = params['order_file'] filename = os.path.basename(order_file) print('Ant %s uploading file %s to %s' % (params['i'], order_file, upload_path + filename)) command = 'scp -i %s -o StrictHostKeyChecking=no %s %s@%s:%s' % (_get_pem_path(params['key_name']), order_file, params['username'], params['instance_name'], upload_path) os.system(command) print('Ant %s executing file %s' % (params['i'], upload_path + filename)) stdin, stdout, stderr = client.exec_command('chmod +x %s'% upload_path + filename) stdin, stdout, stderr = client.exec_command(upload_path + filename) #response = {} # paramiko's read() returns bytes which need to be converted back to a str #ab_results = IS_PY2 and stdout.read() or stdout.read().decode('utf-8') print(stdout.read().decode('utf-8')) client.close() except socket.error as e: return e except Exception as e: traceback.print_exc() print() raise e def order(orders, order_files): username, key_name, zone, instance_ids = _read_server_list() if not instance_ids: print('No ants are ready for orders.') return print('Connecting to the hive.') ec2_connection = boto.ec2.connect_to_region(_get_region(zone)) print('Assembling ants.') reservations = ec2_connection.get_all_instances(instance_ids=instance_ids) instances = [] for reservation in reservations: instances.extend(reservation.instances) instance_count = len(instances) params = [] #Start with executing order if not orders == None: for order in orders: del params[:] for i, instance in enumerate(instances): params.append({ 'i': i, 'instance_id': instance.id, 'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name, 'username': username, 'key_name': key_name, 'order': order }) print('Organizing the hive.') # Spin up processes for connecting to EC2 instances pool = Pool(len(params)) results = pool.map(_execute_order, params) #Now run order files if not order_files == None: for order_file in order_files: print('Filename: %s' % order_file) del params[:] for i, instance in enumerate(instances): params.append({ 'i': i, 'instance_id': instance.id, 'instance_name': instance.private_dns_name if instance.public_dns_name == "" else instance.public_dns_name, 'username': username, 'key_name': key_name, 'order_file': order_file }) #print('Running order file %s' % order_file) print('Organizing the hive.') # Spin up processes for connecting to EC2 instances pool = Pool(len(params)) results = pool.map(_execute_order_file, params) print('The hive is awaiting new orders.') sys.exit(0)
mit
2,145,380,531,466,934,800
33.678571
177
0.630021
false
3.964277
false
false
false
kayhayen/Nuitka
nuitka/tools/quality/pylint/__main__.py
1
4063
#!/usr/bin/env python # Copyright 2021, Kay Hayen, mailto:kay.hayen@gmail.com # # Part of "Nuitka", an optimizing Python compiler that is compatible and # integrates with CPython, but also works on its own. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """ Main program for PyLint checker tool. """ from __future__ import print_function import sys from optparse import OptionParser from nuitka.PythonVersions import python_version from nuitka.tools.Basics import addPYTHONPATH, getHomePath, goHome, setupPATH from nuitka.tools.quality.Git import getModifiedPaths from nuitka.tools.quality.pylint import PyLint from nuitka.tools.quality.ScanSources import isPythonFile, scanTargets from nuitka.tools.testing.Common import hasModule, setup from nuitka.utils.FileOperations import resolveShellPatternToFilenames def main(): setup(go_main=False) # So PyLint finds nuitka package. addPYTHONPATH(getHomePath()) setupPATH() parser = OptionParser() parser.add_option( "--diff", action="store_true", dest="diff", default=False, help="""\ Analyse the changed files in git. Default is %default.""", ) parser.add_option( "--show-todos", "--todos", action="store_true", dest="todos", default=False, help="""\ Show TODO items. Default is %default.""", ) parser.add_option( "--verbose", action="store_true", dest="verbose", default=False, help="""\ Be verbose in output. Default is %default.""", ) parser.add_option( "--one-by-one", action="store_true", dest="one_by_one", default=False, help="""\ Check files one by one. Default is %default.""", ) parser.add_option( "--not-installed-is-no-error", action="store_true", dest="not_installed_is_no_error", default=False, help="""\ Insist on PyLint to be installed. Default is %default.""", ) options, positional_args = parser.parse_args() if options.not_installed_is_no_error and not hasModule("pylint"): print("PyLint is not installed for this interpreter version: SKIPPED") sys.exit(0) if positional_args: if options.diff: sys.exit("Error, no filenames argument allowed in git diff mode.") else: goHome() if options.diff: positional_args = [ filename for filename in getModifiedPaths() if isPythonFile(filename) ] else: positional_args = ["bin", "nuitka", "setup.py", "tests/*/run_all.py"] positional_args = sum( ( resolveShellPatternToFilenames(positional_arg) for positional_arg in positional_args ), [], ) if not positional_args: sys.exit("No files found.") print("Working on:", positional_args) ignore_list = [] # Avoid checking the Python2 runner along with the one for Python3, it has name collisions. if python_version >= 0x300: ignore_list.append("nuitka") filenames = list( scanTargets( positional_args, suffixes=(".py", ".scons"), ignore_list=ignore_list ) ) PyLint.executePyLint( filenames=filenames, show_todos=options.todos, verbose=options.verbose, one_by_one=options.one_by_one, ) if not filenames: sys.exit("No files found.") sys.exit(PyLint.our_exit_code)
apache-2.0
-8,395,116,404,153,469,000
26.828767
95
0.630815
false
3.888038
false
false
false
DedMemez/ODS-August-2017
building/DistributedPaintShopInterior.py
1
1302
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.building.DistributedPaintShopInterior from direct.distributed.DistributedObject import DistributedObject from direct.actor.Actor import Actor from RandomBuilding import RandomBuilding class DistributedPaintShopInterior(DistributedObject, RandomBuilding): def announceGenerate(self): DistributedObject.announceGenerate(self) self.setup() def setup(self): randomGen = self.getRandomGen() colors = self.getColors() self.interior = loader.loadModel('phase_4/models/modules/PaintShopInterior') self.interior.reparentTo(render) self.mixer = Actor('phase_4/models/props/pos_PS_Mixer_zero', {'mix': 'phase_4/models/props/pos_PS_Mixer_mix'}) self.mixer.reparentTo(self.interior) self.mixer.setPlayRate(2.1, 'mix') self.mixer.loop('mix', fromFrame=20, toFrame=160) if settings['smoothAnimations']: self.mixer.setBlend(frameBlend=True) self.setupDoor(randomGen, colors, self.interior, -0.25) self.resetNPCs() def disable(self): self.mixer.removeNode() del self.mixer self.interior.removeNode() del self.interior DistributedObject.disable(self)
apache-2.0
-2,224,954,339,351,331,600
40.064516
118
0.685868
false
3.626741
false
false
false
Wolfterro/Criador-De-Postagens
src/old/v1.0/CriadorDePostagens.py
1
13285
# -*- coding: utf-8 -*- ''' The MIT License (MIT) Copyright (c) 2017 Wolfgang Almeida <wolfgang.almeida@yahoo.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' #=================================== # Criado por: Wolfterro # Versão: 1.0 - Python 2.x # Data: 26/03/2017 #=================================== from PyQt4 import QtCore, QtGui import sys # Imports do programa # =================== from WindowHandler import WindowHandler from GlobalVars import GlobalVars # Definindo a codificação padrão para UTF-8. # ========================================== reload(sys) sys.setdefaultencoding('utf-8') # Codificação do programa. # ======================== try: _fromUtf8 = QtCore.QString.fromUtf8 except AttributeError: def _fromUtf8(s): return s try: _encoding = QtGui.QApplication.UnicodeUTF8 def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig, _encoding) except AttributeError: def _translate(context, text, disambig): return QtGui.QApplication.translate(context, text, disambig) # Classe principal do Programa gerado pelo Qt Designer. # ===================================================== class Ui_MainWindow(object): def setupUi(self, MainWindow, Handler): MainWindow.setObjectName(_fromUtf8("MainWindow")) MainWindow.resize(700, 820) icon = QtGui.QIcon() icon.addPixmap(QtGui.QPixmap(_fromUtf8("Icon.ico")), QtGui.QIcon.Normal, QtGui.QIcon.Off) MainWindow.setWindowIcon(icon) self.centralwidget = QtGui.QWidget(MainWindow) self.centralwidget.setObjectName(_fromUtf8("centralwidget")) self.gridLayout_5 = QtGui.QGridLayout(self.centralwidget) self.gridLayout_5.setObjectName(_fromUtf8("gridLayout_5")) self.groupBox = QtGui.QGroupBox(self.centralwidget) self.groupBox.setObjectName(_fromUtf8("groupBox")) self.gridLayout_4 = QtGui.QGridLayout(self.groupBox) self.gridLayout_4.setObjectName(_fromUtf8("gridLayout_4")) self.lineEdit = QtGui.QLineEdit(self.groupBox) self.lineEdit.setObjectName(_fromUtf8("lineEdit")) self.gridLayout_4.addWidget(self.lineEdit, 0, 0, 1, 1) self.gridLayout_5.addWidget(self.groupBox, 0, 0, 1, 1) self.groupBox_2 = QtGui.QGroupBox(self.centralwidget) self.groupBox_2.setObjectName(_fromUtf8("groupBox_2")) self.gridLayout_3 = QtGui.QGridLayout(self.groupBox_2) self.gridLayout_3.setObjectName(_fromUtf8("gridLayout_3")) self.lineEdit_2 = QtGui.QLineEdit(self.groupBox_2) self.lineEdit_2.setObjectName(_fromUtf8("lineEdit_2")) self.gridLayout_3.addWidget(self.lineEdit_2, 0, 0, 1, 1) self.gridLayout_5.addWidget(self.groupBox_2, 1, 0, 1, 1) self.groupBox_3 = QtGui.QGroupBox(self.centralwidget) self.groupBox_3.setObjectName(_fromUtf8("groupBox_3")) self.gridLayout_2 = QtGui.QGridLayout(self.groupBox_3) self.gridLayout_2.setObjectName(_fromUtf8("gridLayout_2")) self.textEdit = QtGui.QTextEdit(self.groupBox_3) self.textEdit.setObjectName(_fromUtf8("textEdit")) self.gridLayout_2.addWidget(self.textEdit, 0, 0, 1, 1) self.gridLayout_5.addWidget(self.groupBox_3, 2, 0, 1, 1) self.groupBox_4 = QtGui.QGroupBox(self.centralwidget) self.groupBox_4.setObjectName(_fromUtf8("groupBox_4")) self.gridLayout = QtGui.QGridLayout(self.groupBox_4) self.gridLayout.setObjectName(_fromUtf8("gridLayout")) self.pushButton = QtGui.QPushButton(self.groupBox_4) self.pushButton.setStyleSheet(_fromUtf8("QPushButton {\n" " font-weight: bold;\n" "}")) self.pushButton.setObjectName(_fromUtf8("pushButton")) self.gridLayout.addWidget(self.pushButton, 0, 0, 1, 1) self.pushButton_2 = QtGui.QPushButton(self.groupBox_4) self.pushButton_2.setStyleSheet(_fromUtf8("QPushButton {\n" " font-style: italic;\n" "}")) self.pushButton_2.setObjectName(_fromUtf8("pushButton_2")) self.gridLayout.addWidget(self.pushButton_2, 0, 1, 1, 1) self.pushButton_3 = QtGui.QPushButton(self.groupBox_4) self.pushButton_3.setStyleSheet(_fromUtf8("QPushButton {\n" " text-decoration: underline;\n" "}")) self.pushButton_3.setObjectName(_fromUtf8("pushButton_3")) self.gridLayout.addWidget(self.pushButton_3, 0, 2, 1, 2) self.pushButton_4 = QtGui.QPushButton(self.groupBox_4) self.pushButton_4.setStyleSheet(_fromUtf8("")) self.pushButton_4.setObjectName(_fromUtf8("pushButton_4")) self.gridLayout.addWidget(self.pushButton_4, 0, 4, 1, 2) self.pushButton_5 = QtGui.QPushButton(self.groupBox_4) self.pushButton_5.setObjectName(_fromUtf8("pushButton_5")) self.gridLayout.addWidget(self.pushButton_5, 0, 6, 1, 1) self.pushButton_6 = QtGui.QPushButton(self.groupBox_4) self.pushButton_6.setObjectName(_fromUtf8("pushButton_6")) self.gridLayout.addWidget(self.pushButton_6, 0, 7, 1, 2) self.pushButton_7 = QtGui.QPushButton(self.groupBox_4) self.pushButton_7.setObjectName(_fromUtf8("pushButton_7")) self.gridLayout.addWidget(self.pushButton_7, 0, 9, 1, 1) self.pushButton_14 = QtGui.QPushButton(self.groupBox_4) self.pushButton_14.setObjectName(_fromUtf8("pushButton_14")) self.gridLayout.addWidget(self.pushButton_14, 0, 10, 1, 1) self.pushButton_8 = QtGui.QPushButton(self.groupBox_4) self.pushButton_8.setObjectName(_fromUtf8("pushButton_8")) self.gridLayout.addWidget(self.pushButton_8, 1, 0, 1, 1) self.pushButton_9 = QtGui.QPushButton(self.groupBox_4) self.pushButton_9.setObjectName(_fromUtf8("pushButton_9")) self.gridLayout.addWidget(self.pushButton_9, 1, 1, 1, 2) self.pushButton_10 = QtGui.QPushButton(self.groupBox_4) self.pushButton_10.setObjectName(_fromUtf8("pushButton_10")) self.gridLayout.addWidget(self.pushButton_10, 1, 3, 1, 2) self.pushButton_11 = QtGui.QPushButton(self.groupBox_4) self.pushButton_11.setObjectName(_fromUtf8("pushButton_11")) self.gridLayout.addWidget(self.pushButton_11, 1, 5, 1, 2) self.pushButton_12 = QtGui.QPushButton(self.groupBox_4) self.pushButton_12.setObjectName(_fromUtf8("pushButton_12")) self.gridLayout.addWidget(self.pushButton_12, 1, 7, 1, 1) self.pushButton_13 = QtGui.QPushButton(self.groupBox_4) self.pushButton_13.setObjectName(_fromUtf8("pushButton_13")) self.gridLayout.addWidget(self.pushButton_13, 1, 8, 1, 2) self.pushButton_15 = QtGui.QPushButton(self.groupBox_4) self.pushButton_15.setObjectName(_fromUtf8("pushButton_15")) self.gridLayout.addWidget(self.pushButton_15, 1, 10, 1, 1) self.gridLayout_5.addWidget(self.groupBox_4, 3, 0, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtGui.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 691, 20)) self.menubar.setObjectName(_fromUtf8("menubar")) self.menuArquivo = QtGui.QMenu(self.menubar) self.menuArquivo.setObjectName(_fromUtf8("menuArquivo")) self.menuFormatar = QtGui.QMenu(self.menubar) self.menuFormatar.setObjectName(_fromUtf8("menuArquivo")) MainWindow.setMenuBar(self.menubar) self.statusbar = QtGui.QStatusBar(MainWindow) self.statusbar.setObjectName(_fromUtf8("statusbar")) MainWindow.setStatusBar(self.statusbar) self.actionSalvar_Como = QtGui.QAction(MainWindow) self.actionSalvar_Como.setObjectName(_fromUtf8("actionSalvar_Como")) self.actionSair = QtGui.QAction(MainWindow) self.actionSair.setObjectName(_fromUtf8("actionSair")) self.actionFonte = QtGui.QAction(MainWindow) self.actionFonte.setObjectName(_fromUtf8("actionFonte")) self.menuArquivo.addAction(self.actionSalvar_Como) self.menuArquivo.addAction(self.actionSair) self.menuFormatar.addAction(self.actionFonte) self.menubar.addAction(self.menuArquivo.menuAction()) self.menubar.addAction(self.menuFormatar.menuAction()) # Adicionando evento 'clicked.connect' aos botões da janela # ========================================================= self.pushButton.clicked.connect(lambda: Handler.InsertTag(u"<b></b>", True)) self.pushButton_2.clicked.connect(lambda: Handler.InsertTag(u"<i></i>", True)) self.pushButton_3.clicked.connect(lambda: Handler.InsertTag(u"<u></u>", True)) self.pushButton_4.clicked.connect(lambda: Handler.InsertTag(u"<del></del>", True)) self.pushButton_5.clicked.connect(lambda: Handler.InsertTag(u"<img class=\"img-responsive\" src=\"INSIRA O CAMINHO DA IMAGEM AQUI\" alt=\"NOME DA IMAGEM\"></img>", True)) self.pushButton_6.clicked.connect(lambda: Handler.InsertTag(u"<a href=\"INSIRA O LINK AQUI\" target=\"_blank\"></a>", True)) self.pushButton_7.clicked.connect(lambda: Handler.InsertTag(u"<p></p>", True)) self.pushButton_8.clicked.connect(lambda: Handler.InsertTag(u"<h1></h1>", True)) self.pushButton_9.clicked.connect(lambda: Handler.InsertTag(u"<h2></h2>", True)) self.pushButton_10.clicked.connect(lambda: Handler.InsertTag(u"<h3></h3>", True)) self.pushButton_11.clicked.connect(lambda: Handler.InsertTag(u"<center></center>", False)) self.pushButton_12.clicked.connect(lambda: Handler.InsertTag(u"<video><source src=\"INSIRA O CAMINHO DO VÍDEO AQUI\" type=\"video/mp4\"></video>", True)) self.pushButton_13.clicked.connect(lambda: Handler.InsertTag(u"<audio><source src=\"INSIRA O CAMINHO DO ÁUDIO AQUI\" type=\"audio/mpeg\"></audio>", True)) self.pushButton_14.clicked.connect(lambda: Handler.InsertTag(u"<br>", True)) self.pushButton_15.clicked.connect(lambda: Handler.InsertTag(u"<hr>", True)) # Adicionando evento 'triggered.connect' aos menus da janela # ========================================================== self.actionSair.triggered.connect(Handler.ExitProgram) self.actionSalvar_Como.triggered.connect(Handler.GetValuesAndSaveAs) self.actionFonte.triggered.connect(Handler.ChangeFont) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): MainWindow.setWindowTitle(_translate("MainWindow", "Criador de Postagens - v%s" % (GlobalVars.Version), None)) self.groupBox.setTitle(_translate("MainWindow", "Título", None)) self.groupBox_2.setTitle(_translate("MainWindow", "Subtítulo", None)) self.groupBox_3.setTitle(_translate("MainWindow", "Postagem", None)) self.groupBox_4.setTitle(_translate("MainWindow", "Ferramentas de Postagem", None)) self.pushButton.setText(_translate("MainWindow", "B", None)) self.pushButton_2.setText(_translate("MainWindow", "i", None)) self.pushButton_3.setText(_translate("MainWindow", "u", None)) self.pushButton_4.setText(_translate("MainWindow", "<del>", None)) self.pushButton_5.setText(_translate("MainWindow", "<img>", None)) self.pushButton_6.setText(_translate("MainWindow", "<a>", None)) self.pushButton_7.setText(_translate("MainWindow", "<p>", None)) self.pushButton_14.setText(_translate("MainWindow", "<br>", None)) self.pushButton_8.setText(_translate("MainWindow", "<h1>", None)) self.pushButton_9.setText(_translate("MainWindow", "<h2>", None)) self.pushButton_10.setText(_translate("MainWindow", "<h3>", None)) self.pushButton_11.setText(_translate("MainWindow", "<center>", None)) self.pushButton_12.setText(_translate("MainWindow", "<video>", None)) self.pushButton_13.setText(_translate("MainWindow", "<audio>", None)) self.pushButton_15.setText(_translate("MainWindow", "<hr>", None)) self.menuArquivo.setTitle(_translate("MainWindow", "Arquivo", None)) self.menuFormatar.setTitle(_translate("MainWindow", "Formatar", None)) self.actionSalvar_Como.setText(_translate("MainWindow", "Salvar Como...", None)) self.actionSair.setText(_translate("MainWindow", "Sair", None)) self.actionFonte.setText(_translate("MainWindow", "Fonte...", None)) if __name__ == "__main__": app = QtGui.QApplication(sys.argv) MainWindow = QtGui.QMainWindow() ui = Ui_MainWindow() # Os métodos do programa serão definidos pelo Handler # --------------------------------------------------- Handler = WindowHandler(ui) # Definindo locale do programa # ---------------------------- translator = QtCore.QTranslator() locale = QtCore.QLocale.system().name() translator.load('qt_%s' % locale, QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.TranslationsPath)) app.installTranslator(translator) ui.setupUi(MainWindow, Handler) MainWindow.show() sys.exit(app.exec_())
mit
7,671,738,107,317,394,000
49.851563
172
0.714361
false
3.332998
false
false
false
davidfischer-ch/django-imagefit
imagefit/models.py
1
3608
from __future__ import division from imagefit.conf import ext_to_format, settings from PIL import Image as PilImage import mimetypes try: import StringIO except ImportError: import io as StringIO import re import os class Image(object): """ Represents an Image file on the system. """ def __init__(self, path, cache=None, cached_name=None, *args, **kwargs): self.path = path self.pil = PilImage.open(path) self.cache = cache self.cached_name = cached_name # force RGB if self.pil.mode not in ('L', 'RGB'): self.pil = self.pil.convert('RGB') @property def mimetype(self): return mimetypes.guess_type(self.path)[0] @property def modified(self): return os.path.getmtime(self.path) @property def is_cached(self): return self.cache and self.cached_name in self.cache def resize(self, width=None, height=None): return self.pil.thumbnail( (int(width), int(height)), PilImage.ANTIALIAS) def crop(self, width=None, height=None): img_w, img_h = self.pil.size # don't crop an image than is smaller than requested size if img_w < width and img_h < height: return self.pil elif img_w < width: width = img_w elif img_h < height: height = img_h delta_w = img_w / width delta_h = img_h / height delta = delta_w if delta_w < delta_h else delta_h new_w = img_w / delta new_h = img_h / delta self.resize(new_w, new_h) box_diff = ((new_w - width) / 2, (new_h - height) / 2) box = ( int(box_diff[0]), int(box_diff[1]), int(new_w - box_diff[0]), int(new_h - box_diff[1])) self.pil = self.pil.crop(box) return self.pil def render(self): """ Renders the file content """ if self.is_cached: return self.cache.get(self.cached_name) else: image_str = StringIO.StringIO() self.pil.save(image_str, ext_to_format(self.cached_name)) return image_str.getvalue() def save(self): """ Save the image to the cache if provided and not cached yet. """ if self.cache and not self.is_cached: image_str = StringIO.StringIO() self.pil.save(image_str, ext_to_format(self.cached_name)) self.cache.set(self.cached_name, image_str.getvalue()) image_str.close() class Presets(object): """ Representation of an image format storage """ @classmethod def get_all(cls): """ Reads presets from settings """ return getattr(settings, 'IMAGEFIT_PRESETS', {}) @classmethod def get(cls, key, to_tuple=False): """ Retrieves a specific preset by its name """ preset = cls.get_all().get(key, None) return preset @classmethod def has(cls, key): """ Checks if a preset exists """ return key in cls.get_all() @classmethod def from_string(cls, string): """ Converts a <width>x<height> into a {'width': <width>, 'height': <height>} dict return dict or None """ if re.match('(\d+)x(\d+),?(\w*)', string): sizes = [x for x in re.match( '(\d+)x(\d+)(,?[c|C]?)', string).groups()] return { 'width': int(sizes[0]), 'height': int(sizes[1]), 'crop': bool(sizes[2])}
bsd-3-clause
6,212,976,185,050,799,000
26.968992
76
0.54296
false
3.670397
false
false
false
berth64/modded_modded_1257ad
source/process_presentations.py
1
1453
import sys sys.dont_write_bytecode = True import string from module_info import * from module_presentations import * from ID_meshes import * from process_common import * from process_operations import * # Lav's export_dir tweak export_dir = '%s/' % export_dir.replace('\\', '/').rstrip('/') def save_presentations(variable_list,variable_uses,tag_uses,quick_strings): ofile = open(export_dir + "presentations.txt","w") ofile.write("presentationsfile version 1\n") ofile.write(" %d\n"%(len(presentations))) for presentation in presentations: ofile.write("prsnt_%s %d %d "%(presentation[0], presentation[1], presentation[2])) save_simple_triggers(ofile,presentation[3], variable_list,variable_uses,tag_uses,quick_strings) ofile.write("\n") ofile.close() def save_python_header(): file = open("./ID_presentations.py","w") for i_presentation in xrange(len(presentations)): file.write("prsnt_%s = %d\n"%(presentations[i_presentation][0],i_presentation)) file.close() print "Exporting presentations..." save_python_header() variable_uses = [] variables = load_variables(export_dir,variable_uses) tag_uses = load_tag_uses(export_dir) quick_strings = load_quick_strings(export_dir) save_presentations(variables,variable_uses,tag_uses,quick_strings) save_variables(export_dir,variables,variable_uses) save_tag_uses(export_dir,tag_uses) save_quick_strings(export_dir,quick_strings)
agpl-3.0
8,558,168,947,293,909,000
32.595238
99
0.714384
false
3.193407
false
false
false
Dinnerbone/mcstatus
setup.py
1
1718
from setuptools import setup with open("requirements.txt") as f: install_requires = f.read().splitlines() with open("test-requirements.txt") as f: tests_require = f.read().splitlines() tests_require.pop(0) # remove '-r requirements.txt' line setup( name="mcstatus", version="6.4.0", author="Nathan Adams", author_email="dinnerbone@dinnerbone.com", url="https://pypi.python.org/pypi/mcstatus", packages=["mcstatus", "mcstatus.protocol", "mcstatus.scripts"], description="A library to query Minecraft Servers for their status and capabilities.", long_description=open("README.md", "r").read(), long_description_content_type="text/markdown", install_requires=install_requires, extras_require={ "tests": tests_require, }, python_requires=">=3.6", classifiers=[ "Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", "Natural Language :: English", "Operating System :: OS Independent", "Programming Language :: Python", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Games/Entertainment", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Monitoring", ], entry_points=""" [console_scripts] mcstatus=mcstatus.scripts.mcstatus:cli """, project_urls={ "Source": "https://github.com/Dinnerbone/mcstatus", }, )
apache-2.0
-681,558,182,179,929,600
34.791667
90
0.623981
false
4.023419
false
false
false