repo_name
stringlengths
5
92
path
stringlengths
4
221
copies
stringclasses
19 values
size
stringlengths
4
6
content
stringlengths
766
896k
license
stringclasses
15 values
hash
int64
-9,223,277,421,539,062,000
9,223,102,107B
line_mean
float64
6.51
99.9
line_max
int64
32
997
alpha_frac
float64
0.25
0.96
autogenerated
bool
1 class
ratio
float64
1.5
13.6
config_test
bool
2 classes
has_no_keywords
bool
2 classes
few_assignments
bool
1 class
kfcpaladin/sze-the-game
renpy/debug.py
1
1941
# Copyright 2004-2017 Tom Rothamel <pytom@bishoujo.us> # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. # This file contains debugging code that isn't enabled in normal Ren'Py # operation. from __future__ import print_function import renpy import __builtin__ import threading import datetime import os real_open = __builtin__.open __builtin__.real_file = __builtin__.file def replacement_open(*args, **kwargs): rv = real_open(*args, **kwargs) if not renpy.game.contexts: return rv if renpy.game.context().init_phase: return rv if threading.current_thread().name != "MainThread": return rv print(datetime.datetime.now().strftime("%H:%M:%S"), "In main thread: open" + repr(args)) return rv def init_main_thread_open(): if not "RENPY_DEBUG_MAIN_THREAD_OPEN" in os.environ: return __builtin__.open = replacement_open __builtin__.file = replacement_open
mit
1,905,923,962,222,076,000
31.898305
92
0.729006
false
4.069182
false
false
false
pdebuyl/cg_md_polymerization
code/epoxy_setup.py
1
3569
import espressopp import mpi4py.MPI as MPI def get_velocity(system, n): """Obtain total velocity of a espressopp system.""" total_v = espressopp.Real3D(0.) total_m = 0. for i in range(n): p = system.storage.getParticle(i) total_v += p.v*p.mass total_m += p.mass return total_v/total_m def reset_velocity(system, n): """Reset the total velocity of a espressopp system.""" excess_v = get_velocity(system, n) for i in range(n): v = system.storage.getParticle(i).v system.storage.modifyParticle(i, 'v', v-excess_v) # LJ settins sigma = 1.0 epsilon=1.0 caprad_LJ=0.85 rc = pow(2., 1./6.) # FENE settings K=30. rMax=1.5 caprad_FENE=1.4 # Polymer chain settings bondlen=0.97 # General settings skin = 0.3 def chains_x_system(num_chains, monomers_per_chain, num_X, density=0.8, seed=None): num_particles = num_chains*monomers_per_chain + num_X L = pow(num_particles/density, 1./3.) box = (L, L, L) # Initialize the espressopp system system = espressopp.System() if seed is not None: system.rng = espressopp.esutil.RNG(seed) else: system.rng = espressopp.esutil.RNG() system.bc = espressopp.bc.OrthorhombicBC(system.rng, box) system.skin = skin nodeGrid = espressopp.tools.decomp.nodeGrid(MPI.COMM_WORLD.size) cellGrid = espressopp.tools.decomp.cellGrid(box, nodeGrid, rc, skin) system.storage = espressopp.storage.DomainDecomposition(system, nodeGrid, cellGrid) def normal_v(): return espressopp.Real3D(system.rng.normal()*0.5, system.rng.normal()*0.5, system.rng.normal()*0.5) # Add the chains chainFPL = espressopp.FixedPairList(system.storage) pid = 0 for i in range(num_chains): chain=[] startpos = system.bc.getRandomPos() positions, bonds = espressopp.tools.topology.polymerRW(pid, startpos, monomers_per_chain, bondlen) for k in range(monomers_per_chain): part = [pid + k, positions[k], normal_v()] chain.append(part) pid += monomers_per_chain system.storage.addParticles(chain, 'id', 'pos', 'v') chainFPL.addBonds(bonds) # Add the individual particles Xs = [] for i in range(num_X): pos = system.bc.getRandomPos() v = espressopp.Real3D(system.rng.normal(),system.rng.normal(),system.rng.normal()) Xs.append([pid, pos, v]) pid += 1 system.storage.addParticles(Xs, 'id', 'pos', 'v') # Define capped LJ potential verletList = espressopp.VerletList(system, cutoff=rc) LJCapped = espressopp.interaction.VerletListLennardJonesCapped(verletList) LJCapped.setPotential(type1=0, type2=0, potential=espressopp.interaction.LennardJonesCapped(epsilon=epsilon, sigma=sigma, cutoff=rc, caprad=caprad_LJ)) system.addInteraction(LJCapped) # Define capped FENE potential potFENE = espressopp.interaction.FENECapped(K=K, r0=0.0, rMax=rMax, caprad=caprad_FENE) FENECapped = espressopp.interaction.FixedPairListFENECapped(system, chainFPL, potFENE) system.addInteraction(FENECapped) # Define integrator and StochasticVelocityRescaling thermostat integrator = espressopp.integrator.VelocityVerlet(system) thermostat = espressopp.integrator.StochasticVelocityRescaling(system) thermostat.temperature = 1.0 integrator.addExtension(thermostat) system.storage.decompose() return system, integrator, LJCapped, verletList, FENECapped, chainFPL, thermostat, num_particles
bsd-3-clause
-3,066,971,447,088,541,000
34.336634
155
0.674979
false
3.042626
false
false
false
hpparvi/PyTransit
pytransit/lpf/tessoclttvlpf.py
1
3050
# PyTransit: fast and easy exoplanet transit modelling in Python. # Copyright (C) 2010-2019 Hannu Parviainen # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. from pathlib import Path from astropy.table import Table from numba import njit, prange from numpy import atleast_2d, zeros, log, concatenate, pi, transpose, sum, compress, ones from uncertainties import UFloat, ufloat from .oclttvlpf import OCLTTVLPF from ..utils.keplerlc import KeplerLC from .baselines.legendrebaseline import LegendreBaseline @njit(parallel=True, cache=False, fastmath=True) def lnlike_normal_v(o, m, e): m = atleast_2d(m) npv = m.shape[0] npt = o.size lnl = zeros(npv) for i in prange(npv): lnl[i] = -npt*log(e[i, 0]) - 0.5*log(2*pi) - 0.5*sum(((o-m[i, :])/e[i ,0])**2) return lnl class TESSCLTTVLPF(LegendreBaseline, OCLTTVLPF): def __init__(self, name: str, dfile: Path, zero_epoch: float, period: float, nsamples: int = 10, trdur: float = 0.125, bldur: float = 0.3, nlegendre: int = 2, ctx = None, queue = None): zero_epoch = zero_epoch if isinstance(zero_epoch, UFloat) else ufloat(zero_epoch, 1e-5) period = period if isinstance(period, UFloat) else ufloat(period, 1e-7) tb = Table.read(dfile) self.bjdrefi = tb.meta['BJDREFI'] zero_epoch = zero_epoch - self.bjdrefi df = tb.to_pandas().dropna(subset=['TIME', 'SAP_FLUX', 'PDCSAP_FLUX']) self.lc = lc = KeplerLC(df.TIME.values, df.SAP_FLUX.values, zeros(df.shape[0]), zero_epoch.n, period.n, trdur, bldur) LegendreBaseline.__init__(self, nlegendre) OCLTTVLPF.__init__(self, name, zero_epoch, period, ['TESS'], times=lc.time_per_transit, fluxes=lc.normalized_flux_per_transit, pbids=zeros(lc.nt, 'int'), nsamples=nsamples, exptimes=[0.00139], cl_ctx=ctx, cl_queue=queue) self.lnlikelihood = self.lnlikelihood_nb def create_pv_population(self, npop=50): pvp = self.ps.sample_from_prior(npop) return pvp def flux_model(self, pvp): tmodel = transpose(self.transit_model(pvp, copy=True)).copy() return tmodel * self.baseline(pvp) def lnlikelihood_nb(self, pvp): fmodel = self.flux_model(pvp).astype('d') err = 10**atleast_2d(pvp)[:, self._sl_err] return lnlike_normal_v(self.ofluxa, fmodel, err)
gpl-2.0
-3,970,768,457,033,635,300
40.780822
105
0.65541
false
3.125
false
false
false
RJRetro/mame
scripts/build/makedep.py
2
8404
#!/usr/bin/python ## ## license:BSD-3-Clause ## copyright-holders:Miodrag Milanovic from __future__ import with_statement import sys ## to ignore include of emu.h add it always to list files_included = ['src/emu/emu.h'] include_dirs = ['src/emu/', 'src/devices/', 'src/mame/'] mappings = dict() deps_files_included = [ ] deps_include_dirs = ['src/mame/'] components = [ ] drivers = [ ] def file_exists(root, srcfile, folder, inc_dir): includes = [ folder ] includes.extend(inc_dir) for line in includes: try: fp = open(root + line + srcfile, 'r') fp.close() return line + srcfile except IOError: pass return '' def add_c_if_exists(root, fullname): try: fp = open(root + fullname, 'r') fp.close() deps_files_included.append(fullname) except IOError: pass def add_rest_if_exists(root, srcfile,folder): t = srcfile.rsplit('/', 2) if t[1]=='includes': t[2] = t[2].replace('.h','.cpp') t[1] = 'drivers' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'machine' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'video' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) t[1] = 'audio' add_c_if_exists(root,"/".join(t)) parse_file_for_deps(root, "/".join(t), folder) def parse_file_for_deps(root, srcfile, folder): try: fp = open(root + srcfile, 'r') except IOError: return 1 in_comment = 0 linenum = 0 for line in fp.readlines(): content = '' linenum+=1 srcptr = 0 while srcptr < len(line): c = line[srcptr] srcptr+=1 if ord(c)==13 or ord(c)==10: if ord(c)==13 and ord(line[srcptr])==10: srcptr+=1 continue if c==' ' or ord(c)==9: continue if in_comment==1 and c=='*' and line[srcptr]=='/' : srcptr+=1 in_comment = 0 continue if in_comment: continue if c=='/' and line[srcptr]=='*' : srcptr+=1 in_comment = 1 continue if c=='/' and line[srcptr]=='/' : break content += c content = content.strip() if len(content)>0: if content.startswith('#include'): name = content[8:] name = name.replace('"','') fullname = file_exists(root, name, folder,deps_include_dirs) if fullname in deps_files_included: continue if fullname!='': deps_files_included.append(fullname) add_c_if_exists(root, fullname.replace('.h','.cpp')) add_rest_if_exists(root, fullname,folder) newfolder = fullname.rsplit('/', 1)[0] + '/' parse_file_for_deps(root, fullname, newfolder) continue fp.close() return 0 def parse_file(root, srcfile, folder): try: fp = open(root + srcfile, 'r') except IOError: return 1 in_comment = 0 linenum = 0 for line in fp.readlines(): content = '' linenum+=1 srcptr = 0 while srcptr < len(line): c = line[srcptr] srcptr+=1 if ord(c)==13 or ord(c)==10: if ord(c)==13 and ord(line[srcptr])==10: srcptr+=1 continue if c==' ' or ord(c)==9: continue if in_comment==1 and c=='*' and line[srcptr]=='/' : srcptr+=1 in_comment = 0 continue if in_comment: continue if c=='/' and line[srcptr]=='*' : srcptr+=1 in_comment = 1 continue if c=='/' and line[srcptr]=='/' : break content += c content = content.strip() if len(content)>0: if content.startswith('#include'): name = content[8:] name = name.replace('"','') fullname = file_exists(root, name, folder,include_dirs) if fullname in files_included: continue if "src/lib/netlist/" in fullname: continue if fullname!='': if fullname in mappings.keys(): if not(mappings[fullname] in components): components.append(mappings[fullname]) files_included.append(fullname) newfolder = fullname.rsplit('/', 1)[0] + '/' parse_file(root, fullname, newfolder) if (fullname.endswith('.h')): parse_file(root, fullname.replace('.h','.cpp'), newfolder) continue fp.close() return 0 def parse_file_for_drivers(root, srcfile): srcfile = srcfile.replace('\\','/') if srcfile.startswith('src/mame/drivers'): splitname = srcfile.split('/', 4) drivers.append(splitname[3]) return 0 def parse_lua_file(srcfile): try: fp = open(srcfile, 'r') except IOError: sys.stderr.write("Unable to open source file '%s'\n" % srcfile) return 1 for line in fp.readlines(): content = line.strip() if len(content)>0: if content.startswith('--@'): name = content[3:] mappings[name.rsplit(',', 1)[0]] = name.rsplit(',', 1)[1] return 0 if len(sys.argv) < 5: print('Usage:') print(' makedep <root> <source.c> <type> <target>') sys.exit(0) root = sys.argv[1] + '/' parse_lua_file(root +'scripts/src/bus.lua') parse_lua_file(root +'scripts/src/cpu.lua') parse_lua_file(root +'scripts/src/machine.lua') parse_lua_file(root +'scripts/src/sound.lua') parse_lua_file(root +'scripts/src/video.lua') for filename in sys.argv[2].rsplit(',') : deps_files_included.append(filename.replace('\\','/')) parse_file_for_deps(root,filename,'') for filename in deps_files_included: parse_file(root,filename,'') for filename in sys.argv[2].rsplit(',') : parse_file_for_drivers(root,filename) # display output if sys.argv[3]=='drivers': #output the list of externs first for drv in sorted(drivers): print(drv) print("") if sys.argv[3]=='target': for line in components: sys.stdout.write("%s\n" % line) sys.stdout.write('\n') sys.stdout.write('function createProjects_mame_%s(_target, _subtarget)\n' % sys.argv[4]) sys.stdout.write(' project ("mame_%s")\n' % sys.argv[4]) sys.stdout.write(' targetsubdir(_target .."_" .. _subtarget)\n') sys.stdout.write(' kind (LIBTYPE)\n') sys.stdout.write(' uuid (os.uuid("drv-mame-%s"))\n' % sys.argv[4]) sys.stdout.write(' \n') sys.stdout.write(' includedirs {\n') sys.stdout.write(' MAME_DIR .. "src/osd",\n') sys.stdout.write(' MAME_DIR .. "src/emu",\n') sys.stdout.write(' MAME_DIR .. "src/devices",\n') sys.stdout.write(' MAME_DIR .. "src/mame",\n') sys.stdout.write(' MAME_DIR .. "src/lib",\n') sys.stdout.write(' MAME_DIR .. "src/lib/util",\n') sys.stdout.write(' MAME_DIR .. "src/lib/netlist",\n') sys.stdout.write(' MAME_DIR .. "3rdparty",\n') sys.stdout.write(' GEN_DIR .. "mame/layout",\n') sys.stdout.write(' ext_includedir("zlib"),\n') sys.stdout.write(' ext_includedir("flac"),\n') sys.stdout.write(' }\n') sys.stdout.write('\n') sys.stdout.write(' files{\n') for line in deps_files_included: sys.stdout.write(' MAME_DIR .. "%s",\n' % line) sys.stdout.write(' }\n') sys.stdout.write('end\n') sys.stdout.write('\n') sys.stdout.write('function linkProjects_mame_%s(_target, _subtarget)\n' % sys.argv[4]) sys.stdout.write(' links {\n') sys.stdout.write(' "mame_%s",\n' % sys.argv[4]) sys.stdout.write(' }\n') sys.stdout.write('end\n')
gpl-2.0
5,154,461,549,047,289,000
32.086614
92
0.510114
false
3.543002
false
false
false
pku9104038/edx-platform
common/lib/capa/capa/capa_problem.py
1
28574
# # File: capa/capa_problem.py # # Nomenclature: # # A capa Problem is a collection of text and capa Response questions. # Each Response may have one or more Input entry fields. # The capa problem may include a solution. # """ Main module which shows problems (of "capa" type). This is used by capa_module. """ from datetime import datetime import logging import os.path import re from lxml import etree from xml.sax.saxutils import unescape from copy import deepcopy from capa.correctmap import CorrectMap import capa.inputtypes as inputtypes import capa.customrender as customrender import capa.responsetypes as responsetypes from capa.util import contextualize_text, convert_files_to_filenames import capa.xqueue_interface as xqueue_interface from capa.safe_exec import safe_exec from pytz import UTC # extra things displayed after "show answers" is pressed solution_tags = ['solution'] # these get captured as student responses response_properties = ["codeparam", "responseparam", "answer", "openendedparam"] # special problem tags which should be turned into innocuous HTML html_transforms = { 'problem': {'tag': 'div'}, 'text': {'tag': 'span'}, 'math': {'tag': 'span'}, } # These should be removed from HTML output, including all subelements html_problem_semantics = [ "codeparam", "responseparam", "answer", "script", "hintgroup", "openendedparam", "openendedrubric" ] log = logging.getLogger(__name__) #----------------------------------------------------------------------------- # main class for this module class LoncapaSystem(object): """ An encapsulation of resources needed from the outside. These interfaces are collected here so that a caller of LoncapaProblem can provide these resources however make sense for their environment, and this code can remain independent. Attributes: i18n: an object implementing the `gettext.Translations` interface so that we can use `.ugettext` to localize strings. See :class:`ModuleSystem` for documentation of other attributes. """ def __init__( # pylint: disable=invalid-name self, ajax_url, anonymous_student_id, cache, can_execute_unsafe_code, DEBUG, # pylint: disable=invalid-name filestore, i18n, node_path, render_template, seed, # Why do we do this if we have self.seed? STATIC_URL, # pylint: disable=invalid-name xqueue, ): self.ajax_url = ajax_url self.anonymous_student_id = anonymous_student_id self.cache = cache self.can_execute_unsafe_code = can_execute_unsafe_code self.DEBUG = DEBUG # pylint: disable=invalid-name self.filestore = filestore self.i18n = i18n self.node_path = node_path self.render_template = render_template self.seed = seed # Why do we do this if we have self.seed? self.STATIC_URL = STATIC_URL # pylint: disable=invalid-name self.xqueue = xqueue class LoncapaProblem(object): """ Main class for capa Problems. """ def __init__(self, problem_text, id, capa_system, state=None, seed=None): """ Initializes capa Problem. Arguments: problem_text (string): xml defining the problem. id (string): identifier for this problem, often a filename (no spaces). capa_system (LoncapaSystem): LoncapaSystem instance which provides OS, rendering, user context, and other resources. state (dict): containing the following keys: - `seed` (int) random number generator seed - `student_answers` (dict) maps input id to the stored answer for that input - `correct_map` (CorrectMap) a map of each input to their 'correctness' - `done` (bool) indicates whether or not this problem is considered done - `input_state` (dict) maps input_id to a dictionary that holds the state for that input seed (int): random number generator seed. """ ## Initialize class variables from state self.do_reset() self.problem_id = id self.capa_system = capa_system state = state or {} # Set seed according to the following priority: # 1. Contained in problem's state # 2. Passed into capa_problem via constructor self.seed = state.get('seed', seed) assert self.seed is not None, "Seed must be provided for LoncapaProblem." self.student_answers = state.get('student_answers', {}) if 'correct_map' in state: self.correct_map.set_dict(state['correct_map']) self.done = state.get('done', False) self.input_state = state.get('input_state', {}) # Convert startouttext and endouttext to proper <text></text> problem_text = re.sub(r"startouttext\s*/", "text", problem_text) problem_text = re.sub(r"endouttext\s*/", "/text", problem_text) self.problem_text = problem_text # parse problem XML file into an element tree self.tree = etree.XML(problem_text) # handle any <include file="foo"> tags self._process_includes() # construct script processor context (eg for customresponse problems) self.context = self._extract_context(self.tree) # Pre-parse the XML tree: modifies it to add ID's and perform some in-place # transformations. This also creates the dict (self.responders) of Response # instances for each question in the problem. The dict has keys = xml subtree of # Response, values = Response instance self._preprocess_problem(self.tree) if not self.student_answers: # True when student_answers is an empty dict self.set_initial_display() # dictionary of InputType objects associated with this problem # input_id string -> InputType object self.inputs = {} self.extracted_tree = self._extract_html(self.tree) def do_reset(self): """ Reset internal state to unfinished, with no answers """ self.student_answers = dict() self.correct_map = CorrectMap() self.done = False def set_initial_display(self): """ Set the student's answers to the responders' initial displays, if specified. """ initial_answers = dict() for responder in self.responders.values(): if hasattr(responder, 'get_initial_display'): initial_answers.update(responder.get_initial_display()) self.student_answers = initial_answers def __unicode__(self): return u"LoncapaProblem ({0})".format(self.problem_id) def get_state(self): """ Stored per-user session data neeeded to: 1) Recreate the problem 2) Populate any student answers. """ return {'seed': self.seed, 'student_answers': self.student_answers, 'correct_map': self.correct_map.get_dict(), 'input_state': self.input_state, 'done': self.done} def get_max_score(self): """ Return the maximum score for this problem. """ maxscore = 0 for responder in self.responders.values(): maxscore += responder.get_max_score() return maxscore def get_score(self): """ Compute score for this problem. The score is the number of points awarded. Returns a dictionary {'score': integer, from 0 to get_max_score(), 'total': get_max_score()}. """ correct = 0 for key in self.correct_map: try: correct += self.correct_map.get_npoints(key) except Exception: log.error('key=%s, correct_map = %s', key, self.correct_map) raise if (not self.student_answers) or len(self.student_answers) == 0: return {'score': 0, 'total': self.get_max_score()} else: return {'score': correct, 'total': self.get_max_score()} def update_score(self, score_msg, queuekey): """ Deliver grading response (e.g. from async code checking) to the specific ResponseType that requested grading Returns an updated CorrectMap """ cmap = CorrectMap() cmap.update(self.correct_map) for responder in self.responders.values(): if hasattr(responder, 'update_score'): # Each LoncapaResponse will update its specific entries in cmap # cmap is passed by reference responder.update_score(score_msg, cmap, queuekey) self.correct_map.set_dict(cmap.get_dict()) return cmap def ungraded_response(self, xqueue_msg, queuekey): """ Handle any responses from the xqueue that do not contain grades Will try to pass the queue message to all inputtypes that can handle ungraded responses Does not return any value """ # check against each inputtype for the_input in self.inputs.values(): # if the input type has an ungraded function, pass in the values if hasattr(the_input, 'ungraded_response'): the_input.ungraded_response(xqueue_msg, queuekey) def is_queued(self): """ Returns True if any part of the problem has been submitted to an external queue (e.g. for grading.) """ return any(self.correct_map.is_queued(answer_id) for answer_id in self.correct_map) def get_recentmost_queuetime(self): """ Returns a DateTime object that represents the timestamp of the most recent queueing request, or None if not queued """ if not self.is_queued(): return None # Get a list of timestamps of all queueing requests, then convert it to a DateTime object queuetime_strs = [ self.correct_map.get_queuetime_str(answer_id) for answer_id in self.correct_map if self.correct_map.is_queued(answer_id) ] queuetimes = [ datetime.strptime(qt_str, xqueue_interface.dateformat).replace(tzinfo=UTC) for qt_str in queuetime_strs ] return max(queuetimes) def grade_answers(self, answers): """ Grade student responses. Called by capa_module.check_problem. `answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123 Calls the Response for each question in this problem, to do the actual grading. """ # if answers include File objects, convert them to filenames. self.student_answers = convert_files_to_filenames(answers) return self._grade_answers(answers) def supports_rescoring(self): """ Checks that the current problem definition permits rescoring. More precisely, it checks that there are no response types in the current problem that are not fully supported (yet) for rescoring. This includes responsetypes for which the student's answer is not properly stored in state, i.e. file submissions. At present, we have no way to know if an existing response was actually a real answer or merely the filename of a file submitted as an answer. It turns out that because rescoring is a background task, limiting it to responsetypes that don't support file submissions also means that the responsetypes are synchronous. This is convenient as it permits rescoring to be complete when the rescoring call returns. """ return all('filesubmission' not in responder.allowed_inputfields for responder in self.responders.values()) def rescore_existing_answers(self): """ Rescore student responses. Called by capa_module.rescore_problem. """ return self._grade_answers(None) def _grade_answers(self, student_answers): """ Internal grading call used for checking new 'student_answers' and also rescoring existing student_answers. For new student_answers being graded, `student_answers` is a dict of all the entries from request.POST, but with the first part of each key removed (the string before the first "_"). Thus, for example, input_ID123 -> ID123, and input_fromjs_ID123 -> fromjs_ID123. For rescoring, `student_answers` is None. Calls the Response for each question in this problem, to do the actual grading. """ # old CorrectMap oldcmap = self.correct_map # start new with empty CorrectMap newcmap = CorrectMap() # Call each responsetype instance to do actual grading for responder in self.responders.values(): # File objects are passed only if responsetype explicitly allows # for file submissions. But we have no way of knowing if # student_answers contains a proper answer or the filename of # an earlier submission, so for now skip these entirely. # TODO: figure out where to get file submissions when rescoring. if 'filesubmission' in responder.allowed_inputfields and student_answers is None: _ = self.capa_system.i18n.ugettext raise Exception(_(u"Cannot rescore problems with possible file submissions")) # use 'student_answers' only if it is provided, and if it might contain a file # submission that would not exist in the persisted "student_answers". if 'filesubmission' in responder.allowed_inputfields and student_answers is not None: results = responder.evaluate_answers(student_answers, oldcmap) else: results = responder.evaluate_answers(self.student_answers, oldcmap) newcmap.update(results) self.correct_map = newcmap return newcmap def get_question_answers(self): """ Returns a dict of answer_ids to answer values. If we cannot generate an answer (this sometimes happens in customresponses), that answer_id is not included. Called by "show answers" button JSON request (see capa_module) """ # dict of (id, correct_answer) answer_map = dict() for response in self.responders.keys(): results = self.responder_answers[response] answer_map.update(results) # include solutions from <solution>...</solution> stanzas for entry in self.tree.xpath("//" + "|//".join(solution_tags)): answer = etree.tostring(entry) if answer: answer_map[entry.get('id')] = contextualize_text(answer, self.context) log.debug('answer_map = %s', answer_map) return answer_map def get_answer_ids(self): """ Return the IDs of all the responses -- these are the keys used for the dicts returned by grade_answers and get_question_answers. (Though get_question_answers may only return a subset of these. """ answer_ids = [] for response in self.responders.keys(): results = self.responder_answers[response] answer_ids.append(results.keys()) return answer_ids def get_html(self): """ Main method called externally to get the HTML to be rendered for this capa Problem. """ html = contextualize_text(etree.tostring(self._extract_html(self.tree)), self.context) return html def handle_input_ajax(self, data): """ InputTypes can support specialized AJAX calls. Find the correct input and pass along the correct data Also, parse out the dispatch from the get so that it can be passed onto the input type nicely """ # pull out the id input_id = data['input_id'] if self.inputs[input_id]: dispatch = data['dispatch'] return self.inputs[input_id].handle_ajax(dispatch, data) else: log.warning("Could not find matching input for id: %s", input_id) return {} # ======= Private Methods Below ======== def _process_includes(self): """ Handle any <include file="foo"> tags by reading in the specified file and inserting it into our XML tree. Fail gracefully if debugging. """ includes = self.tree.findall('.//include') for inc in includes: filename = inc.get('file') if filename is not None: try: # open using LoncapaSystem OSFS filestore ifp = self.capa_system.filestore.open(filename) except Exception as err: log.warning( 'Error %s in problem xml include: %s', err, etree.tostring(inc, pretty_print=True) ) log.warning( 'Cannot find file %s in %s', filename, self.capa_system.filestore ) # if debugging, don't fail - just log error # TODO (vshnayder): need real error handling, display to users if not self.capa_system.DEBUG: raise else: continue try: # read in and convert to XML incxml = etree.XML(ifp.read()) except Exception as err: log.warning( 'Error %s in problem xml include: %s', err, etree.tostring(inc, pretty_print=True) ) log.warning('Cannot parse XML in %s', (filename)) # if debugging, don't fail - just log error # TODO (vshnayder): same as above if not self.capa_system.DEBUG: raise else: continue # insert new XML into tree in place of include parent = inc.getparent() parent.insert(parent.index(inc), incxml) parent.remove(inc) log.debug('Included %s into %s' % (filename, self.problem_id)) def _extract_system_path(self, script): """ Extracts and normalizes additional paths for code execution. For now, there's a default path of data/course/code; this may be removed at some point. script : ?? (TODO) """ DEFAULT_PATH = ['code'] # Separate paths by :, like the system path. raw_path = script.get('system_path', '').split(":") + DEFAULT_PATH # find additional comma-separated modules search path path = [] for dir in raw_path: if not dir: continue # path is an absolute path or a path relative to the data dir dir = os.path.join(self.capa_system.filestore.root_path, dir) # Check that we are within the filestore tree. reldir = os.path.relpath(dir, self.capa_system.filestore.root_path) if ".." in reldir: log.warning("Ignoring Python directory outside of course: %r", dir) continue abs_dir = os.path.normpath(dir) path.append(abs_dir) return path def _extract_context(self, tree): """ Extract content of <script>...</script> from the problem.xml file, and exec it in the context of this problem. Provides ability to randomize problems, and also set variables for problem answer checking. Problem XML goes to Python execution context. Runs everything in script tags. """ context = {} context['seed'] = self.seed all_code = '' python_path = [] for script in tree.findall('.//script'): stype = script.get('type') if stype: if 'javascript' in stype: continue # skip javascript if 'perl' in stype: continue # skip perl # TODO: evaluate only python for d in self._extract_system_path(script): if d not in python_path and os.path.exists(d): python_path.append(d) XMLESC = {"&apos;": "'", "&quot;": '"'} code = unescape(script.text, XMLESC) all_code += code if all_code: try: safe_exec( all_code, context, random_seed=self.seed, python_path=python_path, cache=self.capa_system.cache, slug=self.problem_id, unsafely=self.capa_system.can_execute_unsafe_code(), ) except Exception as err: log.exception("Error while execing script code: " + all_code) msg = "Error while executing script code: %s" % str(err).replace('<', '&lt;') raise responsetypes.LoncapaProblemError(msg) # Store code source in context, along with the Python path needed to run it correctly. context['script_code'] = all_code context['python_path'] = python_path return context def _extract_html(self, problemtree): # private """ Main (private) function which converts Problem XML tree to HTML. Calls itself recursively. Returns Element tree of XHTML representation of problemtree. Calls render_html of Response instances to render responses into XHTML. Used by get_html. """ if not isinstance(problemtree.tag, basestring): # Comment and ProcessingInstruction nodes are not Elements, # and we're ok leaving those behind. # BTW: etree gives us no good way to distinguish these things # other than to examine .tag to see if it's a string. :( return if (problemtree.tag == 'script' and problemtree.get('type') and 'javascript' in problemtree.get('type')): # leave javascript intact. return deepcopy(problemtree) if problemtree.tag in html_problem_semantics: return problemid = problemtree.get('id') # my ID if problemtree.tag in inputtypes.registry.registered_tags(): # If this is an inputtype subtree, let it render itself. status = "unsubmitted" msg = '' hint = '' hintmode = None input_id = problemtree.get('id') if problemid in self.correct_map: pid = input_id status = self.correct_map.get_correctness(pid) msg = self.correct_map.get_msg(pid) hint = self.correct_map.get_hint(pid) hintmode = self.correct_map.get_hintmode(pid) value = "" if self.student_answers and problemid in self.student_answers: value = self.student_answers[problemid] if input_id not in self.input_state: self.input_state[input_id] = {} # do the rendering state = { 'value': value, 'status': status, 'id': input_id, 'input_state': self.input_state[input_id], 'feedback': { 'message': msg, 'hint': hint, 'hintmode': hintmode, } } input_type_cls = inputtypes.registry.get_class_for_tag(problemtree.tag) # save the input type so that we can make ajax calls on it if we need to self.inputs[input_id] = input_type_cls(self.capa_system, problemtree, state) return self.inputs[input_id].get_html() # let each Response render itself if problemtree in self.responders: overall_msg = self.correct_map.get_overall_message() return self.responders[problemtree].render_html( self._extract_html, response_msg=overall_msg ) # let each custom renderer render itself: if problemtree.tag in customrender.registry.registered_tags(): renderer_class = customrender.registry.get_class_for_tag(problemtree.tag) renderer = renderer_class(self.capa_system, problemtree) return renderer.get_html() # otherwise, render children recursively, and copy over attributes tree = etree.Element(problemtree.tag) for item in problemtree: item_xhtml = self._extract_html(item) if item_xhtml is not None: tree.append(item_xhtml) if tree.tag in html_transforms: tree.tag = html_transforms[problemtree.tag]['tag'] else: # copy attributes over if not innocufying for (key, value) in problemtree.items(): tree.set(key, value) tree.text = problemtree.text tree.tail = problemtree.tail return tree def _preprocess_problem(self, tree): # private """ Assign IDs to all the responses Assign sub-IDs to all entries (textline, schematic, etc.) Annoted correctness and value In-place transformation Also create capa Response instances for each responsetype and save as self.responders Obtain all responder answers and save as self.responder_answers dict (key = response) """ response_id = 1 self.responders = {} for response in tree.xpath('//' + "|//".join(responsetypes.registry.registered_tags())): response_id_str = self.problem_id + "_" + str(response_id) # create and save ID for this response response.set('id', response_id_str) response_id += 1 answer_id = 1 input_tags = inputtypes.registry.registered_tags() inputfields = tree.xpath( "|".join(['//' + response.tag + '[@id=$id]//' + x for x in (input_tags + solution_tags)]), id=response_id_str ) # assign one answer_id for each input type or solution type for entry in inputfields: entry.attrib['response_id'] = str(response_id) entry.attrib['answer_id'] = str(answer_id) entry.attrib['id'] = "%s_%i_%i" % (self.problem_id, response_id, answer_id) answer_id = answer_id + 1 # instantiate capa Response responsetype_cls = responsetypes.registry.get_class_for_tag(response.tag) responder = responsetype_cls(response, inputfields, self.context, self.capa_system) # save in list in self self.responders[response] = responder # get responder answers (do this only once, since there may be a performance cost, # eg with externalresponse) self.responder_answers = {} for response in self.responders.keys(): try: self.responder_answers[response] = self.responders[response].get_answers() except: log.debug('responder %s failed to properly return get_answers()', self.responders[response]) # FIXME raise # <solution>...</solution> may not be associated with any specific response; give # IDs for those separately # TODO: We should make the namespaces consistent and unique (e.g. %s_problem_%i). solution_id = 1 for solution in tree.findall('.//solution'): solution.attrib['id'] = "%s_solution_%i" % (self.problem_id, solution_id) solution_id += 1
agpl-3.0
-9,036,447,143,522,534,000
37.718157
115
0.585042
false
4.429391
false
false
false
JonathanSalwan/Triton
src/examples/python/proving_opaque_predicates.py
1
3432
#!/usr/bin/env python3 ## -*- coding: utf-8 -*- ## ## Example to detect opaque predicates. This example is based ## on the Tomislav Zubcic's blog post [0,1] =). ## ## Output: ## ## $ python3 proving_opaque_predicates.py ## xor eax, eax ## jo 7 ## opaque predicate: never taken ## ---------------------------------- ## xor eax, eax ## je 7 ## opaque predicate: always taken ## ---------------------------------- ## xor eax, ebx ## je 7 ## not an opaque predicate ## ---------------------------------- ## and eax, 0x3fffffff ## and ebx, 0x3fffffff ## xor ecx, edx ## xor edx, edi ## add eax, ebx ## jo 0x16 ## opaque predicate: never taken ## ---------------------------------- ## and eax, 0x3fffffff ## and ebx, 0x3fffffff ## xor ecx, edx ## xor edx, edi ## xor eax, ebx ## je 0x16 ## not an opaque predicate ## ---------------------------------- ## ## [0] http://zubcic.re/blog/experimenting-with-z3-proving-opaque-predicates ## [1] https://www.reddit.com/r/ReverseEngineering/comments/4yf6tz/experimenting_with_z3_proving_opaque_predicates/ ## ## -- jonathan from __future__ import print_function from triton import TritonContext, ARCH, Instruction import sys trace_1 = [ b"\x31\xC0", # xor eax, eax b"\x0F\x80\x01\x00\x00\x00", # jo 7 ] trace_2 = [ b"\x31\xC0", # xor eax, eax b"\x0F\x84\x01\x00\x00\x00", # je 7 ] trace_3 = [ b"\x31\xD8", # xor eax, ebx b"\x0F\x84\x01\x00\x00\x00", # je 7 ] trace_4 = [ b"\x25\xff\xff\xff\x3f", # and eax, 0x3fffffff b"\x81\xe3\xff\xff\xff\x3f", # and ebx, 0x3fffffff b"\x31\xd1", # xor ecx, edx b"\x31\xfa", # xor edx, edi b"\x01\xd8", # add eax, ebx b"\x0f\x80\x10\x00\x00\x00", # jo 27 ] trace_5 = [ b"\x25\xff\xff\xff\x3f", # and eax, 0x3fffffff b"\x81\xe3\xff\xff\xff\x3f", # and ebx, 0x3fffffff b"\x31\xd1", # xor ecx, edx b"\x31\xfa", # xor edx, edi b"\x31\xD8", # xor eax, ebx b"\x0F\x84\x10\x00\x00\x00", # je 16 ] Triton = TritonContext() def symbolization_init(): Triton.symbolizeRegister(Triton.registers.eax) Triton.symbolizeRegister(Triton.registers.ebx) Triton.symbolizeRegister(Triton.registers.ecx) Triton.symbolizeRegister(Triton.registers.edx) return def test_trace(trace): Triton.setArchitecture(ARCH.X86) symbolization_init() astCtxt = Triton.getAstContext() for opcode in trace: instruction = Instruction() instruction.setOpcode(opcode) Triton.processing(instruction) print(instruction.getDisassembly()) if instruction.isBranch(): # Opaque Predicate AST op_ast = Triton.getPathPredicate() # Try another model model = Triton.getModel(astCtxt.lnot(op_ast)) if model: print("not an opaque predicate") else: if instruction.isConditionTaken(): print("opaque predicate: always taken") else: print("opaque predicate: never taken") print('----------------------------------') return if __name__ == '__main__': test_trace(trace_1) test_trace(trace_2) test_trace(trace_3) test_trace(trace_4) test_trace(trace_5) sys.exit(0)
apache-2.0
-7,506,264,363,885,100,000
26.238095
115
0.540501
false
2.918367
true
false
false
mekkablue/Glyphs-Scripts
Kerning/kernanalysis.py
1
7373
# -*- coding: utf-8 -*--- -- from __future__ import print_function from GlyphsApp import Glyphs if Glyphs.versionNumber >= 3.0: from GlyphsApp import LTR from Foundation import NSNotFound intervalList = (1,3,5,10,20) categoryList = ( "Letter:Uppercase", "Letter:Lowercase", "Letter:Smallcaps", "Punctuation", "Symbol:Currency", "Symbol:Math", "Symbol:Other", "Symbol:Arrow", "Number:Decimal Digit", "Number:Small", "Number:Fraction", ) def stringToListOfGlyphsForFont( string, Font, report=True, excludeNonExporting=True, suffix="" ): # parse string into parseList: parseList = [] waitForSeparator = False parsedName = "" # cut off comment: if "#" in string: string = string[:string.find("#")].strip() # parse string: for i,x in enumerate(string): if x in "/ ": if parsedName: parseList.append(parsedName) parsedName = "" if x == "/": waitForSeparator = True else: waitForSeparator = False elif waitForSeparator: parsedName += x if i == len(string)-1: parseList.append(parsedName) else: parsedName = "" parseList.append(x) # go through parseList and find corresponding glyph in Font: glyphList = [] for parsedName in parseList: if parsedName.startswith("@"): # category and subcategory: if ":" in parsedName: category, subcategory = parsedName[1:].split(":") else: category, subcategory = parsedName[1:], None # TODO parse categoryGlyphs = listOfNamesForCategories( Font, category, subcategory, #OK "latin", # requiredScript, # need to implement still None, # excludedGlyphNameParts, # need to implement still excludeNonExporting, #OK suffix=suffix, ) if categoryGlyphs: glyphList += categoryGlyphs if report: print(u"Added glyphs for category %s, subcategory %s: %s" % (category, subcategory, ", ".join(categoryGlyphs))) elif report: print(u"Warning: no glyphs found for category %s, subcategory %s." % (category, subcategory)) else: # actual single glyph names: glyph = Font.glyphForName_(parsedName+suffix) # actual single character: if not glyph and len(parsedName) == 1: unicodeForName = "%04X" % ord(parsedName) glyphInfo = Glyphs.glyphInfoForUnicode(unicodeForName) if glyphInfo: glyphName = "%s%s" % (glyphInfo.name, suffix) glyph = Font.glyphs[glyphName] # check if glyph exists, exports, and collect in glyphList: if glyph: if (glyph.export or not excludeNonExporting): glyphList.append(glyph) elif report: print(u"Ignoring non-exporting glyph '%s'." % (parsedName+suffix)) elif report: print(u"Warning: Could not find glyph for '%s'." % (parsedName+suffix)) return glyphList def nameUntilFirstPeriod( glyphName ): if not "." in glyphName: return glyphName else: offset = glyphName.find(".") return glyphName[:offset] def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID, directionSensitive=True ): leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID] rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID] if Glyphs.versionNumber>=3: direction = 0 #LTR if directionSensitive: direction = Glyphs.font.currentTab.direction effectiveKerning = leftLayer.nextKerningForLayer_direction_( rightLayer, direction ) else: effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer ) if effectiveKerning < NSNotFound: return effectiveKerning else: return 0.0 # older version: # def effectiveKerning( leftGlyphName, rightGlyphName, thisFont, thisFontMasterID ): # leftLayer = thisFont.glyphs[leftGlyphName].layers[thisFontMasterID] # rightLayer = thisFont.glyphs[rightGlyphName].layers[thisFontMasterID] # if Glyphs.versionNumber >= 3.0: # effectiveKerning = leftLayer.nextKerningForLayer_direction_( rightLayer, LTR ) # else: # effectiveKerning = leftLayer.rightKerningForLayer_( rightLayer ) # return effectiveKerning # can be NSNotFound # # if effectiveKerning < NSNotFound: # # return effectiveKerning # # else: # # return 0.0 def listOfNamesForCategories( thisFont, requiredCategory, requiredSubCategory, requiredScript, excludedGlyphNameParts, excludeNonExporting, suffix="" ): nameList = [] for thisGlyph in thisFont.glyphs: thisScript = thisGlyph.script glyphName = thisGlyph.name nameIsOK = True if suffix: nameIsOK = glyphName.endswith(suffix) if nameIsOK and excludedGlyphNameParts: for thisNamePart in excludedGlyphNameParts: nameIsOK = nameIsOK and not thisNamePart in glyphName if nameIsOK and (thisGlyph.export or not excludeNonExporting): if thisScript == None or thisScript == requiredScript: if thisGlyph.category == requiredCategory: if requiredSubCategory: if thisGlyph.subCategory == requiredSubCategory: nameList.append( glyphName ) else: nameList.append( glyphName ) return nameList def splitString( string, delimiter=":", minimum=2 ): # split string into a list: returnList = string.split(delimiter) # remove trailing spaces: for i in range(len(returnList)): returnList[i] = returnList[i].strip() # if necessary fill up with None: while len(returnList) < minimum: returnList.append(None) if returnList == [""]: return None return returnList def measureLayerAtHeightFromLeftOrRight( thisLayer, height, leftSide=True ): try: if leftSide: measurement = thisLayer.lsbAtHeight_(height) else: measurement = thisLayer.rsbAtHeight_(height) if measurement < NSNotFound: return measurement else: return None except: return None def isHeightInIntervals( height, ignoreIntervals ): if ignoreIntervals: for interval in ignoreIntervals: if height <= interval[1] and height >= interval[0]: return True return False def minDistanceBetweenTwoLayers( leftLayer, rightLayer, interval=5.0, kerning=0.0, report=False, ignoreIntervals=[] ): # correction = leftLayer.RSB+rightLayer.LSB topY = min( leftLayer.bounds.origin.y+leftLayer.bounds.size.height, rightLayer.bounds.origin.y+rightLayer.bounds.size.height ) bottomY = max( leftLayer.bounds.origin.y, rightLayer.bounds.origin.y ) distance = topY - bottomY minDist = None if kerning > 10000: # NSNotFound kerning = 0 for i in range(int(distance//interval)): height = bottomY + i * interval if not isHeightInIntervals(height, ignoreIntervals) or not ignoreIntervals: left = measureLayerAtHeightFromLeftOrRight( leftLayer, height, leftSide=False ) right = measureLayerAtHeightFromLeftOrRight( rightLayer, height, leftSide=True ) try: # avoid gaps like in i or j total = left+right+kerning # +correction if minDist == None or minDist > total: minDist = total except: pass return minDist def sortedIntervalsFromString( intervals="" ): ignoreIntervals = [] if intervals: for interval in intervals.split(","): if interval.find(":") != -1: interval = interval.strip() try: intervalTuple = tuple(sorted([ int(interval.split(":")[0].strip()), int(interval.split(":")[1].strip()), ])) ignoreIntervals.append(intervalTuple) except: print("Warning: could not convert '%s' into a number interval." % interval.strip()) pass else: print("Warning: '%s' is not an interval (missing colon)" % interval.strip()) return ignoreIntervals
apache-2.0
-576,480,600,968,904,300
29.849372
152
0.714092
false
3.276889
false
false
false
Pablites/W2IO
setup.py
1
1491
#!/usr/bin/env python # -*- coding: utf-8 -*- # Project: W2IO (https://github.com/Pablites/W2IO) # Authors: Paweł Wichary & Michał Waleszczuk # Date: 21 July 2015 # Licence: available on github """ Setup of W2IO install all files of plugin Instal directory is: ~/.starcluster/plugins """ import logging import os import shutil import sys from installer.src.files_copy import FilesCopier from installer.src.folders_dependency import FoldersDependency logging.basicConfig(filename='log/w2io_setup.log', level=logging.DEBUG) logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG) logging.info('W2IO setup - started') print 'W2IO setup - started' if os.path.exists("/root/.starcluster/plugins/w2io"): logging.info('W2IO setup - removing old W2IO') shutil.rmtree("/root/.starcluster/plugins/w2io") __current_path = os.path.dirname(os.path.abspath(__file__)) try: FoldersDependency(__current_path).run() except OSError as e: logging.error(e) print e logging.info('W2IO setup - aborting') print 'W2IO setup - aborting' sys.exit("NoDirectoryFound") FilesCopier(__current_path).run() logging.info('W2IO setup - plugin installed at ~/.starcluster/plugins') print 'W2IO setup - plugin installed at ~/.starcluster/plugins' logging.info('W2IO setup - remember to read configuration file ~/.starcluster/plugins/w2io/w2io_config') print 'W2IO setup - remember to read configuration file ~/.starcluster/plugins/w2io/w2io_config'
lgpl-3.0
-7,020,134,948,091,910,000
30.020833
104
0.733378
false
3.229935
true
true
false
m3h0w/jigsaw_friend
trackbar.py
1
1497
""" usage: threshold_custom = tb.SimpleTrackbar(img, "ImgThresh") """ import cv2 import numpy as np def empty_function(*arg): pass def SimpleTrackbar(img, win_name): trackbar_name = win_name + "Trackbar" cv2.namedWindow(win_name) cv2.createTrackbar(trackbar_name, win_name, 0, 255, empty_function) while True: trackbar_pos = cv2.getTrackbarPos(trackbar_name, win_name) _, img_th = cv2.threshold(img, trackbar_pos, 255, cv2.THRESH_BINARY) cv2.imshow(win_name, img_th) key = cv2.waitKey(1) & 0xFF if key == ord("c"): break cv2.destroyAllWindows() return trackbar_pos def CannyTrackbar(img, win_name): trackbar_name = win_name + "Trackbar" cv2.namedWindow(win_name) cv2.resizeWindow(win_name, 500,100) cv2.createTrackbar("first", win_name, 0, 255, empty_function) cv2.createTrackbar("second", win_name, 0, 255, empty_function) cv2.createTrackbar("third", win_name, 0, 255, empty_function) while True: trackbar_pos1 = cv2.getTrackbarPos("first", win_name) trackbar_pos2 = cv2.getTrackbarPos("second", win_name) trackbar_pos3 = cv2.getTrackbarPos("third", win_name) img_blurred = cv2.GaussianBlur(img.copy(), (7,7), 2) canny = cv2.Canny(img_blurred, trackbar_pos1, trackbar_pos2) cv2.imshow(win_name, canny) key = cv2.waitKey(1) & 0xFF if key == ord("c"): break cv2.destroyAllWindows() return canny
mit
-5,651,194,603,384,155,000
28.372549
76
0.639947
false
2.970238
false
false
false
eteq/ginga
ginga/Bindings.py
1
66446
# # Bindings.py -- Bindings classes for Ginga FITS viewer. # # Eric Jeschke (eric@naoj.org) # # Copyright (c) Eric R. Jeschke. All rights reserved. # This is open-source software licensed under a BSD license. # Please see the file LICENSE.txt for details. import math from ginga.misc import Bunch, Settings, Callback from ginga import AutoCuts, trcalc from ginga import cmap, imap class ImageViewBindings(object): """ Mouse Operation and Bindings """ def __init__(self, logger, settings=None): super(ImageViewBindings, self).__init__() self.logger = logger self.canpan = False self.canzoom = False self._ispanning = False self.cancut = False self.cancmap = False self.canflip = False self.canrotate = False # For panning self._pantype = 1 self._start_x = None self._start_y = None self._start_panx = 0 self._start_pany = 0 self._start_scale_x = 0 self._start_scale_y = 0 self._start_rot = 0 if settings is None: # No settings passed. Set up defaults. settings = Settings.SettingGroup(name='bindings', logger=self.logger) self.initialize_settings(settings) self.settings = settings self.autocuts = AutoCuts.ZScale(self.logger) self.features = dict( # name, attr pairs pan='canpan', zoom='canzoom', cuts='cancut', cmap='cancmap', flip='canflip', rotate='canrotate') def initialize_settings(self, settings): settings.addSettings( # You should rarely have to change these. btn_nobtn = 0x0, btn_left = 0x1, btn_middle= 0x2, btn_right = 0x4, # Set up our standard modifiers mod_shift = ['shift_l', 'shift_r'], mod_ctrl = ['control_l', 'control_r'], mod_meta = ['meta_right'], # Define our modes dmod_draw = ['space', None, None], dmod_cmap = ['y', None, None], dmod_cuts = ['s', None, None], dmod_dist = ['d', None, None], dmod_contrast = ['t', None, None], dmod_rotate = ['r', None, None], dmod_pan = ['q', None, None], dmod_freepan = ['w', None, None], # KEYBOARD kp_zoom_in = ['+', '='], kp_zoom_out = ['-', '_'], kp_zoom = ['1', '2', '3', '4', '5', '6', '7', '8', '9', '0'], kp_zoom_inv = ['!', '@', '#', '$', '%', '^', '&', '*', '(', ')'], kp_zoom_fit = ['backquote'], kp_autozoom_toggle = ['doublequote'], kp_autozoom_override = ['singlequote'], kp_dist_reset = ['D'], kp_pan_set = ['p'], kp_center = ['c'], kp_cut_255 = ['A'], kp_cut_minmax = ['S'], kp_cut_auto = ['a'], kp_autocuts_toggle = [':'], kp_autocuts_override = [';'], kp_autocenter_toggle = ['?'], kp_autocenter_override = ['/'], kp_contrast_restore = ['T'], kp_cmap_reset = ['Y'], kp_imap_reset = [], kp_flip_x = ['[', '{'], kp_flip_y = [']', '}'], kp_swap_xy = ['backslash', '|'], kp_rotate_reset = ['R'], kp_rotate_inc90 = ['.'], kp_rotate_dec90 = [','], kp_orient_lh = ['o'], kp_orient_rh = ['O'], kp_poly_add = ['v', 'draw+v'], kp_poly_del = ['z', 'draw+z'], kp_edit_del = ['draw+x'], kp_reset = ['escape'], kp_lock = ['l'], # SCROLLING/WHEEL sc_pan = [], sc_pan_fine = [], sc_pan_coarse = [], sc_zoom = ['scroll'], sc_zoom_fine = ['shift+scroll'], sc_zoom_coarse = ['ctrl+scroll'], sc_cuts_fine = ['cuts+scroll'], sc_cuts_coarse = [], sc_dist = ['dist+scroll'], sc_cmap = ['cmap+scroll'], sc_imap = [], #sc_draw = ['draw+scroll'], scroll_pan_acceleration = 1.0, # 1.0 is appropriate for a mouse, 0.1 for most trackpads scroll_zoom_acceleration = 1.0, #scroll_zoom_acceleration = 0.1, mouse_zoom_acceleration = 1.085, mouse_rotate_acceleration = 0.75, pan_reverse = False, pan_multiplier = 1.0, zoom_scroll_reverse = False, # MOUSE/BUTTON ms_none = ['nobtn'], ms_cursor = ['left'], ms_wheel = [], ms_draw = ['draw+left', 'meta+left', 'right'], ms_rotate = ['rotate+left'], ms_rotate_reset = ['rotate+right'], ms_contrast = ['contrast+left', 'ctrl+right'], ms_contrast_restore = ['contrast+right', 'ctrl+middle'], ms_pan = ['pan+left', 'ctrl+left'], ms_zoom = ['pan+right'], ms_freepan = ['freepan+left', 'middle'], ms_zoom_in = ['freepan+middle'], ms_zoom_out = ['freepan+right'], ## ms_cutlo = ['cutlo+left'], ## ms_cuthi = ['cuthi+left'], ms_cutall = ['cuts+left'], ms_cut_auto = ['cuts+right'], ms_panset = ['pan+middle', 'shift+left'], # GESTURES (some backends only) gs_pinch = [], # Rotate gesture usually doesn't work so well on most platforms # so don't enable by default #gs_rotate = [], gs_pan = [], gs_swipe = [], gs_tap = [], pinch_actions = [], pinch_zoom_acceleration = 1.4, pinch_rotate_acceleration = 1.0, ) def get_settings(self): return self.settings def window_map(self, viewer): self.to_default_mode(viewer) def set_bindings(self, viewer): viewer.add_callback('map', self.window_map) bindmap = viewer.get_bindmap() bindmap.clear_button_map() bindmap.clear_event_map() # Set up bindings self.setup_settings_events(viewer, bindmap) def set_mode(self, viewer, name, mode_type='oneshot'): bindmap = viewer.get_bindmap() bindmap.set_mode(name, mode_type=mode_type) def parse_combo(self, combo, modes_set, modifiers_set): mode, mods, trigger = None, set([]), combo if '+' in combo: if combo.endswith('+'): # special case: probably contains the keystroke '+' trigger, combo = '+', combo[:-1] if '+' in combo: items = set(combo.split('+')) else: items = set(combo) else: # trigger is always specified last items = combo.split('+') trigger, items = items[-1], set(items[:-1]) mods = items.intersection(modifiers_set) mode = items.intersection(modes_set) if len(mode) == 0: mode = None else: mode = mode.pop() return (mode, mods, trigger) def setup_settings_events(self, viewer, bindmap): d = self.settings.getDict() if len(d) == 0: self.initialize_settings(self.settings) d = self.settings.getDict() # First scan settings for buttons and modes bindmap.clear_modifier_map() bindmap.clear_mode_map() for name, value in d.items(): if name.startswith('mod_'): modname = name[4:] for combo in value: # NOTE: for now no chorded combinations keyname = combo bindmap.add_modifier(keyname, modname) elif name.startswith('btn_'): btnname = name[4:] bindmap.map_button(value, btnname) elif name.startswith('dmod_'): mode_name = name[5:] keyname, mode_type, msg = value bindmap.add_mode(keyname, mode_name, mode_type=mode_type, msg=msg) modes_set = bindmap.get_modes() modifiers_set = bindmap.get_modifiers() # Add events for name, value in d.items(): if len(name) <= 3: continue pfx = name[:3] if not pfx in ('kp_', 'ms_', 'sc_', 'gs_'): continue evname = name[3:] for combo in value: mode, modifiers, trigger = self.parse_combo(combo, modes_set, modifiers_set) bindmap.map_event(mode, modifiers, trigger, evname) # Register for this symbolic event if we have a handler for it try: cb_method = getattr(self, name) except AttributeError: self.logger.warn("No method found matching '%s'" % (name)) cb_method = None if pfx == 'kp_': # keyboard event event = 'keydown-%s' % (evname) viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'ms_': # mouse/button event for action in ('down', 'move', 'up'): event = '%s-%s' % (evname, action) viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'sc_': # scrolling event event = '%s-scroll' % evname viewer.enable_callback(event) if cb_method: viewer.add_callback(event, cb_method) elif pfx == 'gs_': viewer.set_callback(evname, cb_method) def reset(self, viewer): bindmap = viewer.get_bindmap() bindmap.reset_mode(viewer) self.pan_stop(viewer) viewer.onscreen_message(None) ##### ENABLERS ##### # These methods are a quick way to enable or disable certain user # interface features in a ImageView window def enable_pan(self, tf): """Enable the image to be panned interactively (True/False).""" self.canpan = tf def enable_zoom(self, tf): """Enable the image to be zoomed interactively (True/False).""" self.canzoom = tf def enable_cuts(self, tf): """Enable the cuts levels to be set interactively (True/False).""" self.cancut = tf def enable_cmap(self, tf): """Enable the color map to be warped interactively (True/False).""" self.cancmap = tf def enable_flip(self, tf): """Enable the image to be flipped interactively (True/False).""" self.canflip = tf def enable_rotate(self, tf): """Enable the image to be rotated interactively (True/False).""" self.canrotate = tf def enable(self, **kwdargs): """ General enable function encompassing all user interface features. Usage (e.g.): viewer.enable(rotate=False, flip=True) """ for feat, value in kwdargs: feat = feat.lower() if not feat in self.features: raise ValueError("'%s' is not a feature. Must be one of %s" % ( feat, str(self.features))) attr = self.features[feat] setattr(self, attr, bool(value)) def enable_all(self, tf): for feat, attr in self.features.items(): setattr(self, attr, bool(tf)) ##### Help methods ##### # Methods used by the callbacks to do actions. def get_new_pan(self, viewer, win_x, win_y, ptype=1): if ptype == 1: # This is a "free pan", similar to dragging the "lens" # over the canvas. dat_wd, dat_ht = viewer.get_data_size() win_wd, win_ht = viewer.get_window_size() if (win_x >= win_wd): win_x = win_wd - 1 if (win_y >= win_ht): win_y = win_ht - 1 # Figure out data x,y based on percentage of X axis # and Y axis off_x, off_y = viewer.window_to_offset(win_x, win_y) max_x, max_y = viewer.window_to_offset(win_wd, win_ht) wd_x = abs(max_x) * 2.0 ht_y = abs(max_y) * 2.0 panx = (off_x + abs(max_x)) / float(wd_x) pany = (off_y + abs(max_y)) / float(ht_y) # Account for user preference if self.settings.get('pan_reverse', False): panx = 1.0 - panx pany = 1.0 - pany data_x, data_y = panx * dat_wd, pany * dat_ht return data_x, data_y elif ptype == 2: # This is a "drag pan", similar to dragging the canvas # under the "lens" or "viewport". if self._start_x is None: # user has not held the mouse button yet # return current pan values return (self._start_panx, self._start_pany) scale_x, scale_y = viewer.get_scale_xy() multiplier = self.settings.get('pan_multiplier', 1.0) off_x, off_y = viewer.window_to_offset(win_x, win_y) delta_x = float(self._start_x - off_x) / scale_x * multiplier delta_y = float(self._start_y - off_y) / scale_y * multiplier data_x = self._start_panx + delta_x data_y = self._start_pany + delta_y return (data_x, data_y) def _panset(self, viewer, data_x, data_y, msg=True): try: msg = self.settings.get('msg_panset', msg) if msg: viewer.onscreen_message("Pan position set", delay=0.4) res = viewer.panset_xy(data_x, data_y) return res except Exception as e: viewer.onscreen_message("Pan position set error; see log", delay=2.0) # most likely image does not have a valid wcs self.logger.error("Error setting pan position: %s" % ( str(e))) def get_direction(self, direction, rev=False): """ Translate a direction in compass degrees into 'up' or 'down'. """ if (direction < 90.0) or (direction > 270.0): if not rev: return 'up' else: return 'down' elif (90.0 < direction < 270.0): if not rev: return 'down' else: return 'up' else: return 'none' def _tweak_colormap(self, viewer, x, y, mode): win_wd, win_ht = viewer.get_window_size() # translate Y cursor position as a percentage of the window # height into a scaling factor y_pct = (win_ht - y) / float(win_ht) # I tried to mimic ds9's exponential scale feel along the Y-axis def exp_scale(i): return (1.0/(i**3))*0.0002 + (1.0/i)*0.085 scale_pct = exp_scale(1.0 - y_pct) # translate X cursor position as a percentage of the window # width into a shifting factor shift_pct = x / float(win_wd) - 0.5 viewer.scale_and_shift_cmap(scale_pct, shift_pct) def _cutlow_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = loval + (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f" % (loval)) viewer.cut_levels(loval, hival) def _cutlow_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() pct = float(x) / float(win_wd) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = minval + (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f" % (loval)) viewer.cut_levels(loval, hival) def _cuthigh_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() hival = hival - (pct * spread) if msg: viewer.onscreen_message("Cut high: %.4f" % (hival)) viewer.cut_levels(loval, hival) def _cuthigh_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() pct = 1.0 - (float(x) / float(win_wd)) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() hival = maxval - (pct * spread) if msg: viewer.onscreen_message("Cut high: %.4f" % (hival)) viewer.cut_levels(loval, hival) def _cutboth_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_cuts', msg) win_wd, win_ht = viewer.get_window_size() xpct = 1.0 - (float(x) / float(win_wd)) #ypct = 1.0 - (float(y) / float(win_ht)) ypct = (float(win_ht - y) / float(win_ht)) spread = self._hival - self._loval hival = self._hival - (xpct * spread) loval = self._loval + (ypct * spread) if msg: viewer.onscreen_message("Cut low: %.4f high: %.4f" % ( loval, hival)) viewer.cut_levels(loval, hival) def _cut_pct(self, viewer, pct, msg=True): msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() minval, maxval = image.get_minmax() spread = maxval - minval loval, hival = viewer.get_cut_levels() loval = loval + (pct * spread) hival = hival - (pct * spread) if msg: viewer.onscreen_message("Cut low: %.4f high: %.4f" % ( loval, hival), delay=1.0) viewer.cut_levels(loval, hival) def _adjust_cuts(self, viewer, direction, pct, msg=True): direction = self.get_direction(direction) if direction == 'up': self._cut_pct(viewer, pct, msg=msg) elif direction == 'down': self._cut_pct(viewer, -pct, msg=msg) def _scale_image(self, viewer, direction, factor, msg=True): msg = self.settings.get('msg_zoom', msg) rev = self.settings.get('zoom_scroll_reverse', False) scale_x, scale_y = viewer.get_scale_xy() direction = self.get_direction(direction, rev=rev) if direction == 'up': mult = 1.0 + factor elif direction == 'down': mult = 1.0 - factor scale_x, scale_y = scale_x * mult, scale_y * mult viewer.scale_to(scale_x, scale_y) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=0.4) def _zoom_xy(self, viewer, x, y, msg=True): win_wd, win_ht = viewer.get_window_size() delta = float(x - self._start_x) factor = math.fabs(self.settings.get('mouse_zoom_acceleration', 1.085) - 1.0) direction = 0.0 if delta < 0.0: direction = 180.0 #print("factor=%f direction=%f" % (factor, direction)) self._start_x = x self._scale_image(viewer, direction, factor, msg=msg) def _cycle_dist(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_dist', msg) rgbmap = viewer.get_rgbmap() algs = rgbmap.get_hash_algorithms() algname = rgbmap.get_hash_algorithm() idx = algs.index(algname) if direction == 'down': idx = (idx + 1) % len(algs) else: idx = idx - 1 if idx < 0: idx = len(algs) - 1 algname = algs[idx] rgbmap.set_hash_algorithm(algname) if msg: viewer.onscreen_message("Color dist: %s" % (algname), delay=1.0) def _reset_dist(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_dist', msg) rgbmap = viewer.get_rgbmap() algname = 'linear' rgbmap.set_hash_algorithm(algname) if msg: viewer.onscreen_message("Color dist: %s" % (algname), delay=1.0) def _cycle_cmap(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() cm = rgbmap.get_cmap() cmapname = cm.name cmapnames = cmap.get_names() idx = cmapnames.index(cmapname) if direction == 'down': idx = (idx + 1) % len(cmapnames) else: idx = idx - 1 if idx < 0: idx = len(cmapnames) - 1 cmapname = cmapnames[idx] rgbmap.set_cmap(cmap.get_cmap(cmapname)) if msg: viewer.onscreen_message("Color map: %s" % (cmapname), delay=1.0) def _reset_cmap(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() # default cmapname = 'gray' rgbmap.set_cmap(cmap.get_cmap(cmapname)) if msg: viewer.onscreen_message("Color map: %s" % (cmapname), delay=1.0) def _cycle_imap(self, viewer, msg, direction='down'): if self.cancmap: msg = self.settings.get('msg_imap', msg) rgbmap = viewer.get_rgbmap() im = rgbmap.get_imap() imapname = im.name imapnames = imap.get_names() idx = imapnames.index(imapname) if direction == 'down': idx = (idx + 1) % len(imapnames) else: idx = idx - 1 if idx < 0: idx = len(imapnames) - 1 imapname = imapnames[idx] rgbmap.set_imap(imap.get_imap(imapname)) if msg: viewer.onscreen_message("Intensity map: %s" % (imapname), delay=1.0) def _reset_imap(self, viewer, msg): if self.cancmap: msg = self.settings.get('msg_imap', msg) rgbmap = viewer.get_rgbmap() # default imapname = 'ramp' rgbmap.set_imap(imap.get_imap(imapname)) if msg: viewer.onscreen_message("Intensity map: %s" % (imapname), delay=1.0) def _get_pct_xy(self, viewer, x, y): win_wd, win_ht = viewer.get_window_size() x_pct = float(x - self._start_x) / win_wd y_pct = float(y - self._start_y) / win_ht return (x_pct, y_pct) def _rotate_xy(self, viewer, x, y, msg=True): msg = self.settings.get('msg_rotate', msg) x_pct, y_pct = self._get_pct_xy(viewer, x, y) delta_deg = x_pct * 360.0 factor = self.settings.get('mouse_rotate_acceleration', 0.75) deg = math.fmod(self._start_rot + delta_deg * factor, 360.0) if msg: viewer.onscreen_message("Rotate: %.2f" % (deg)) viewer.rotate(deg) def _rotate_inc(self, viewer, inc_deg, msg=True): msg = self.settings.get('msg_rotate_inc', msg) cur_rot_deg = viewer.get_rotation() rot_deg = math.fmod(cur_rot_deg + inc_deg, 360.0) viewer.rotate(rot_deg) if msg: viewer.onscreen_message("Rotate Inc: (%.2f) %.2f" % ( inc_deg, rot_deg), delay=1.0) def _orient(self, viewer, righthand=False, msg=True): msg = self.settings.get('msg_orient', msg) image = viewer.get_image() (x, y, xn, yn, xe, ye) = image.calc_compass_center() degn = math.degrees(math.atan2(xn - x, yn - y)) self.logger.info("degn=%f xe=%f ye=%f" % ( degn, xe, ye)) # rotate east point also by degn xe2, ye2 = trcalc.rotate_pt(xe, ye, degn, xoff=x, yoff=y) dege = math.degrees(math.atan2(xe2 - x, ye2 - y)) self.logger.info("dege=%f xe2=%f ye2=%f" % ( dege, xe2, ye2)) # if right-hand image, flip it to make left hand xflip = righthand if dege > 0.0: xflip = not xflip if xflip: degn = - degn viewer.transform(xflip, False, False) viewer.rotate(degn) if msg: viewer.onscreen_message("Orient: rot=%.2f flipx=%s" % ( degn, str(xflip)), delay=1.0) def to_default_mode(self, viewer): self._ispanning = False viewer.switch_cursor('pick') def pan_start(self, viewer, ptype=1): # If already panning then ignore multiple keystrokes if self._ispanning: return self._pantype = ptype viewer.switch_cursor('pan') self._ispanning = True def pan_set_origin(self, viewer, win_x, win_y, data_x, data_y): self._start_x, self._start_y = viewer.window_to_offset(win_x, win_y) self._start_panx, self._start_pany = viewer.get_pan() def pan_stop(self, viewer): self._ispanning = False self._start_x = None self._pantype = 1 self.to_default_mode(viewer) def restore_colormap(self, viewer, msg=True): msg = self.settings.get('msg_cmap', msg) rgbmap = viewer.get_rgbmap() rgbmap.reset_sarr() if msg: viewer.onscreen_message("Restored color map", delay=0.5) return True ##### KEYBOARD ACTION CALLBACKS ##### def kp_pan_set(self, viewer, event, data_x, data_y, msg=True): if self.canpan: self._panset(viewer, data_x, data_y, msg=msg) return True def kp_center(self, viewer, event, data_x, data_y): if self.canpan: viewer.center_image() return True def kp_zoom_out(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_in(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_in() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom') zoomval = (keylist.index(event.key) + 1) viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_inv(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) keylist = self.settings.get('kp_zoom_inv') zoomval = - (keylist.index(event.key) + 1) viewer.zoom_to(zoomval) if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_zoom_fit(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.zoom_fit() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def kp_autozoom_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) val = viewer.get_settings().get('autozoom') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autozoom(val) if msg: viewer.onscreen_message('Autozoom %s' % val, delay=1.0) return True def kp_autozoom_override(self, viewer, event, data_x, data_y, msg=True): if self.canzoom: msg = self.settings.get('msg_zoom', msg) viewer.enable_autozoom('override') if msg: viewer.onscreen_message('Autozoom Override', delay=1.0) return True def kp_cut_255(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.cut_levels(0.0, 255.0, no_reset=True) return True def kp_cut_minmax(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) image = viewer.get_image() mn, mx = image.get_minmax(noinf=True) viewer.cut_levels(mn, mx, no_reset=True) return True def kp_cut_auto(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) if msg: viewer.onscreen_message("Auto cut levels", delay=1.0) viewer.auto_levels() return True def kp_autocuts_toggle(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) val = viewer.get_settings().get('autocuts') if val == 'off': val = 'on' else: val = 'off' viewer.enable_autocuts(val) if msg: viewer.onscreen_message('Autocuts %s' % val, delay=1.0) return True def kp_autocuts_override(self, viewer, event, data_x, data_y, msg=True): if self.cancut: msg = self.settings.get('msg_cuts', msg) viewer.enable_autocuts('override') if msg: viewer.onscreen_message('Autocuts Override', delay=1.0) return True def kp_autocenter_toggle(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) val = viewer.get_settings().get('autocenter') if val == 'off': val = 'on' else: val = 'off' viewer.set_autocenter(val) if msg: viewer.onscreen_message('Autocenter %s' % val, delay=1.0) return True def kp_autocenter_override(self, viewer, event, data_x, data_y, msg=True): if self.canpan: msg = self.settings.get('msg_pan', msg) viewer.set_autocenter('override') if msg: viewer.onscreen_message('Autocenter Override', delay=1.0) return True def kp_contrast_restore(self, viewer, event, data_x, data_y, msg=True): if self.cancmap: msg = self.settings.get('msg_cmap', msg) self.restore_colormap(viewer, msg=msg) return True def kp_flip_x(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == '[': flipx = not flipX else: flipx = False viewer.transform(flipx, flipY, swapXY) if msg: viewer.onscreen_message("Flip X=%s" % flipx, delay=1.0) return True def kp_flip_y(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == ']': flipy = not flipY else: flipy = False viewer.transform(flipX, flipy, swapXY) if msg: viewer.onscreen_message("Flip Y=%s" % flipy, delay=1.0) return True def kp_swap_xy(self, viewer, event, data_x, data_y, msg=True): if self.canflip: msg = self.settings.get('msg_transform', msg) flipX, flipY, swapXY = viewer.get_transforms() if event.key == 'backslash': swapxy = not swapXY else: swapxy = False viewer.transform(flipX, flipY, swapxy) if msg: viewer.onscreen_message("Swap XY=%s" % swapxy, delay=1.0) return True def kp_dist(self, viewer, event, data_x, data_y, msg=True): self._cycle_dist(viewer, msg) return True def kp_dist_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_dist(viewer, msg) return True def kp_cmap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_cmap(viewer, msg) return True def kp_imap_reset(self, viewer, event, data_x, data_y, msg=True): self._reset_imap(viewer, msg) return True def kp_rotate_reset(self, viewer, event, data_x, data_y): if self.canrotate: viewer.rotate(0.0) # also reset all transforms viewer.transform(False, False, False) return True def kp_rotate_inc90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, 90.0, msg=msg) return True def kp_rotate_dec90(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._rotate_inc(viewer, -90.0, msg=msg) return True def kp_orient_lh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=False, msg=msg) return True def kp_orient_rh(self, viewer, event, data_x, data_y, msg=True): if self.canrotate: self._orient(viewer, righthand=True, msg=msg) return True def kp_reset(self, viewer, event, data_x, data_y): self.reset(viewer) return True def kp_lock(self, viewer, event, data_x, data_y): bm = viewer.get_bindmap() # toggle default mode type to locked/oneshot dfl_modetype = bm.get_default_mode_type() # get current mode mode_name, cur_modetype = bm.current_mode() if dfl_modetype == 'locked': mode_type = 'oneshot' bm.set_default_mode_type(mode_type) # turning off lock also resets the mode bm.reset_mode(viewer) else: mode_type = 'locked' bm.set_default_mode_type(mode_type) bm.set_mode(mode_name, mode_type=mode_type) return True ##### MOUSE ACTION CALLBACKS ##### ## def ms_none(self, viewer, event, data_x, data_y): ## return False ## def ms_cursor(self, viewer, event, data_x, data_y): ## return False ## def ms_wheel(self, viewer, event, data_x, data_y): ## return False ## def ms_draw(self, viewer, event, data_x, data_y): ## return False def ms_zoom(self, viewer, event, data_x, data_y, msg=True): """Zoom the image by dragging the cursor left or right. """ if not self.canzoom: return True msg = self.settings.get('msg_zoom', msg) x, y = viewer.get_last_win_xy() if event.state == 'move': self._zoom_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Zoom (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y else: viewer.onscreen_message(None) return True def ms_zoom_in(self, viewer, event, data_x, data_y, msg=True): """Zoom in one level by a mouse click. """ if not self.canzoom: return True if event.state == 'down': viewer.panset_xy(data_x, data_y) viewer.zoom_in() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_zoom_out(self, viewer, event, data_x, data_y, msg=True): """Zoom out one level by a mouse click. """ if not self.canzoom: return True if event.state == 'down': viewer.panset_xy(data_x, data_y) viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=1.0) return True def ms_rotate(self, viewer, event, data_x, data_y, msg=True): """Rotate the image by dragging the cursor left or right. """ if not self.canrotate: return True msg = self.settings.get('msg_rotate', msg) x, y = viewer.get_last_win_xy() if event.state == 'move': self._rotate_xy(viewer, x, y) elif event.state == 'down': if msg: viewer.onscreen_message("Rotate (drag mouse L-R)", delay=1.0) self._start_x, self._start_y = x, y self._start_rot = viewer.get_rotation() else: viewer.onscreen_message(None) return True def ms_rotate_reset(self, viewer, event, data_x, data_y, msg=True): if not self.canrotate: return True msg = self.settings.get('msg_rotate', msg) if event.state == 'down': viewer.rotate(0.0) viewer.onscreen_message("Rotation reset", delay=0.5) return True def ms_contrast(self, viewer, event, data_x, data_y, msg=True): """Shift the colormap by dragging the cursor left or right. Stretch the colormap by dragging the cursor up or down. """ if not self.cancmap: return True msg = self.settings.get('msg_contrast', msg) x, y = viewer.get_last_win_xy() if not viewer._originUpper: y = viewer._imgwin_ht - y if event.state == 'move': self._tweak_colormap(viewer, x, y, 'preview') elif event.state == 'down': self._start_x, self._start_y = x, y if msg: viewer.onscreen_message("Shift and stretch colormap (drag mouse)", delay=1.0) else: viewer.onscreen_message(None) return True def ms_contrast_restore(self, viewer, event, data_x, data_y, msg=True): """An interactive way to restore the colormap settings after a warp operation. """ if self.cancmap and (event.state == 'down'): self.restore_colormap(viewer, msg=msg) return True def ms_pan(self, viewer, event, data_x, data_y): """A 'drag' or proportional pan, where the image is panned by 'dragging the canvas' up or down. The amount of the pan is proportionate to the length of the drag. """ if not self.canpan: return True x, y = viewer.get_last_win_xy() if event.state == 'move': data_x, data_y = self.get_new_pan(viewer, x, y, ptype=self._pantype) viewer.panset_xy(data_x, data_y) elif event.state == 'down': self.pan_set_origin(viewer, x, y, data_x, data_y) self.pan_start(viewer, ptype=2) else: self.pan_stop(viewer) return True def ms_freepan(self, viewer, event, data_x, data_y): """A 'free' pan, where the image is panned by dragging the cursor towards the area you want to see in the image. The entire image is pannable by dragging towards each corner of the window. """ if not self.canpan: return True x, y = viewer.get_last_win_xy() if event.state == 'move': data_x, data_y = self.get_new_pan(viewer, x, y, ptype=self._pantype) viewer.panset_xy(data_x, data_y) elif event.state == 'down': self.pan_start(viewer, ptype=1) else: self.pan_stop(viewer) return True def ms_cutlo(self, viewer, event, data_x, data_y): """An interactive way to set the low cut level. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if event.state == 'move': self._cutlow_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y self._loval, self._hival = viewer.get_cut_levels() else: viewer.onscreen_message(None) return True def ms_cuthi(self, viewer, event, data_x, data_y): """An interactive way to set the high cut level. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if event.state == 'move': self._cuthigh_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y self._loval, self._hival = viewer.get_cut_levels() else: viewer.onscreen_message(None) return True def ms_cutall(self, viewer, event, data_x, data_y): """An interactive way to set the low AND high cut levels. """ if not self.cancut: return True x, y = viewer.get_last_win_xy() if not viewer._originUpper: y = viewer._imgwin_ht - y if event.state == 'move': self._cutboth_xy(viewer, x, y) elif event.state == 'down': self._start_x, self._start_y = x, y image = viewer.get_image() #self._loval, self._hival = viewer.get_cut_levels() self._loval, self._hival = self.autocuts.calc_cut_levels(image) else: viewer.onscreen_message(None) return True def ms_cut_auto(self, viewer, event, data_x, data_y, msg=True): return self.kp_cut_auto(viewer, event, data_x, data_y, msg=msg) def ms_panset(self, viewer, event, data_x, data_y, msg=True): """An interactive way to set the pan position. The location (data_x, data_y) will be centered in the window. """ if self.canpan and (event.state == 'down'): self._panset(viewer, data_x, data_y, msg=msg) return True ##### SCROLL ACTION CALLBACKS ##### def sc_cuts_coarse(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it coarsely. """ if self.cancut: self._adjust_cuts(viewer, event.direction, 0.01, msg=msg) return True def sc_cuts_fine(self, viewer, event, msg=True): """Adjust cuts interactively by setting the low AND high cut levels. This function adjusts it finely. """ if self.cancut: self._adjust_cuts(viewer, event.direction, 0.001, msg=msg) return True def sc_zoom(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by the zoom steps configured under Preferences. """ if self.canzoom: msg = self.settings.get('msg_zoom', msg) rev = self.settings.get('zoom_scroll_reverse', False) direction = self.get_direction(event.direction, rev=rev) if direction == 'up': viewer.zoom_in() elif direction == 'down': viewer.zoom_out() if msg: viewer.onscreen_message(viewer.get_scale_text(), delay=0.4) return True def sc_zoom_new(self, viewer, event, msg=True): return self.sc_zoom_coarse(viewer, event, msg=msg) def sc_zoom_coarse(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by adjusting the scale in x and y coarsely. """ if self.canzoom: zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) amount = zoom_accel * 0.20 self._scale_image(viewer, event.direction, amount, msg=msg) return True def sc_zoom_fine(self, viewer, event, msg=True): """Interactively zoom the image by scrolling motion. This zooms by adjusting the scale in x and y coarsely. """ if self.canzoom: zoom_accel = self.settings.get('scroll_zoom_acceleration', 1.0) amount = zoom_accel * 0.08 self._scale_image(viewer, event.direction, 0.08, msg=msg) return True def sc_pan(self, viewer, event, msg=True): """Interactively pan the image by scrolling motion. """ if not self.canpan: return True # User has "Pan Reverse" preference set? rev = self.settings.get('pan_reverse', False) direction = event.direction if rev: direction = math.fmod(direction + 180.0, 360.0) pan_accel = self.settings.get('scroll_pan_acceleration', 1.0) num_degrees = event.amount * pan_accel ang_rad = math.radians(90.0 - direction) # Calculate distance of pan amount, based on current scale wd, ht = viewer.get_data_size() # pageSize = min(wd, ht) ((x0, y0), (x1, y1), (x2, y2), (x3, y3)) = viewer.get_pan_rect() page_size = min(abs(x2 - x0), abs(y2 - y0)) distance = (num_degrees / 360.0) * page_size self.logger.debug("angle=%f ang_rad=%f distance=%f" % ( direction, ang_rad, distance)) # Calculate new pan position pan_x, pan_y = viewer.get_pan() new_x = pan_x + math.cos(ang_rad) * distance new_y = pan_y + math.sin(ang_rad) * distance # cap pan position new_x = min(max(new_x, 0.0), wd) new_y = min(max(new_y, 0.0), ht) # Because pan position is reported +0.5 #new_x, new_y = new_x - 0.5, new_y - 0.5 #print "data x,y=%f,%f new x, y=%f,%f" % (pan_x, pan_y, new_x, new_y) viewer.panset_xy(new_x, new_y) # For checking result #pan_x, pan_y = viewer.get_pan() #print "new pan x,y=%f, %f" % (pan_x, pan_y) return True def sc_pan_coarse(self, viewer, event, msg=True): event.amount = event.amount / 2.0 return self.sc_pan(viewer, event, msg=msg) def sc_pan_fine(self, viewer, event, msg=True): event.amount = event.amount / 5.0 return self.sc_pan(viewer, event, msg=msg) def sc_dist(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_dist(viewer, msg, direction=direction) return True def sc_cmap(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_cmap(viewer, msg, direction=direction) return True def sc_imap(self, viewer, event, msg=True): direction = self.get_direction(event.direction) self._cycle_imap(viewer, msg, direction=direction) return True ##### GESTURE ACTION CALLBACKS ##### def gs_pinch(self, viewer, state, rot_deg, scale, msg=True): pinch_actions = self.settings.get('pinch_actions', []) if state == 'start': self._start_scale_x, self._start_scale_y = viewer.get_scale_xy() self._start_rot = viewer.get_rotation() else: msg_str = None if self.canzoom and ('zoom' in pinch_actions): scale_accel = self.settings.get('pinch_zoom_acceleration', 1.0) scale = scale * scale_accel scale_x, scale_y = (self._start_scale_x * scale, self._start_scale_y * scale) viewer.scale_to(scale_x, scale_y) msg_str = viewer.get_scale_text() msg = self.settings.get('msg_zoom', True) if self.canrotate and ('rotate' in pinch_actions): deg = self._start_rot - rot_deg rotate_accel = self.settings.get('pinch_rotate_acceleration', 1.0) deg = rotate_accel * deg viewer.rotate(deg) if msg_str is None: msg_str = "Rotate: %.2f" % (deg) msg = self.settings.get('msg_rotate', msg) if msg and (msg_str is not None): viewer.onscreen_message(msg_str, delay=0.4) return True def gs_pan(self, viewer, state, dx, dy): if not self.canpan: return True if state == 'move': scale_x, scale_y = viewer.get_scale_xy() delta_x = float(dx) / scale_x delta_y = float(dy) / scale_y data_x = self._start_panx + delta_x data_y = self._start_pany + delta_y viewer.panset_xy(data_x, data_y) elif state == 'start': self._start_panx, self._start_pany = viewer.get_pan() self.pan_start(viewer, ptype=2) else: self.pan_stop(viewer) return True def gs_rotate(self, viewer, state, rot_deg, msg=True): if state == 'start': self._start_rot = viewer.get_rotation() else: msg_str = None if self.canrotate: deg = self._start_rot - rot_deg rotate_accel = self.settings.get('pinch_rotate_acceleration', 1.0) deg = rotate_accel * deg viewer.rotate(deg) if msg_str is None: msg_str = "Rotate: %.2f" % (deg) msg = self.settings.get('msg_rotate', msg) if msg and (msg_str is not None): viewer.onscreen_message(msg_str, delay=0.4) return True class UIEvent(object): pass class KeyEvent(UIEvent): def __init__(self, key=None, state=None, mode=None, modifiers=None, data_x=None, data_y=None, viewer=None): super(KeyEvent, self).__init__() self.key = key self.state = state self.mode = mode self.modifiers = modifiers self.data_x = data_x self.data_y = data_y self.viewer = viewer class PointEvent(UIEvent): def __init__(self, button=None, state=None, mode=None, modifiers=None, data_x=None, data_y=None, viewer=None): super(PointEvent, self).__init__() self.button = button self.state = state self.mode = mode self.modifiers = modifiers self.data_x = data_x self.data_y = data_y self.viewer = viewer class ScrollEvent(UIEvent): def __init__(self, button=None, state=None, mode=None, modifiers=None, direction=None, amount=None, data_x=None, data_y=None, viewer=None): super(ScrollEvent, self).__init__() self.button = button self.state = state self.mode = mode self.modifiers = modifiers self.direction = direction self.amount = amount self.data_x = data_x self.data_y = data_y self.viewer = viewer class BindingMapError(Exception): pass class BindingMapper(Callback.Callbacks): """The BindingMapper class maps physical events (key presses, button clicks, mouse movement, etc) into logical events. By registering for logical events, plugins and other event handling code doesn't need to care about the physical controls bindings. The bindings can be changed and everything continues to work. """ def __init__(self, logger, btnmap=None, mode_map=None, modifier_map=None): Callback.Callbacks.__init__(self) self.logger = logger # For event mapping self.eventmap = {} self._kbdmode = None self._kbdmode_types = ('held', 'oneshot', 'locked') self._kbdmode_type = 'held' self._kbdmode_type_default = 'oneshot' self._delayed_reset = False self._modifiers = frozenset([]) # Set up button mapping if btnmap is None: btnmap = { 0x1: 'cursor', 0x2: 'wheel', 0x4: 'draw' } self.btnmap = btnmap self._button = 0 # Set up modifier mapping if modifier_map is None: self.modifier_map = {} for keyname in ('shift_l', 'shift_r'): self.add_modifier(keyname, 'shift') for keyname in ('control_l', 'control_r'): self.add_modifier(keyname, 'ctrl') for keyname in ('meta_right',): self.add_modifier(keyname, 'meta') else: self.modifier_map = mode_map # Set up mode mapping if mode_map is None: self.mode_map = {} else: self.mode_map = mode_map self._empty_set = frozenset([]) # For callbacks for name in ('mode-set', ): self.enable_callback(name) def add_modifier(self, keyname, modname): bnch = Bunch.Bunch(name=modname) self.modifier_map[keyname] = bnch self.modifier_map['mod_%s' % modname] = bnch def get_modifiers(self): return set([bnch.name for keyname, bnch in self.modifier_map.items()]) def clear_modifier_map(self): self.modifier_map = {} def set_mode_map(self, mode_map): self.mode_map = mode_map def clear_mode_map(self): self.mode_map = {} def current_mode(self): return (self._kbdmode, self._kbdmode_type) def get_modes(self): return set([bnch.name for keyname, bnch in self.mode_map.items()]) def add_mode(self, keyname, mode_name, mode_type='held', msg=None): if mode_type is not None: assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) bnch = Bunch.Bunch(name=mode_name, type=mode_type, msg=msg) self.mode_map[keyname] = bnch self.mode_map['mode_%s' % mode_name] = bnch def set_mode(self, name, mode_type=None): if mode_type == None: mode_type = self._kbdmode_type_default assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) self._kbdmode = name if name is None: # like a reset_mode() mode_type = 'held' self._delayed_reset = False self._kbdmode_type = mode_type self.logger.info("set keyboard mode to '%s' type=%s" % (name, mode_type)) self.make_callback('mode-set', self._kbdmode, self._kbdmode_type) def set_default_mode_type(self, mode_type): assert mode_type in self._kbdmode_types, \ ValueError("Bad mode type '%s': must be one of %s" % ( mode_type, self._kbdmode_types)) self._kbdmode_type_default = mode_type def get_default_mode_type(self): return self._kbdmode_type_default def reset_mode(self, viewer): try: bnch = self.mode_map['mode_%s' % self._kbdmode] except: bnch = None self._kbdmode = None self._kbdmode_type = 'held' self._delayed_reset = False self.logger.info("set keyboard mode reset") # clear onscreen message, if any if (bnch is not None) and (bnch.msg is not None): viewer.onscreen_message(None) self.make_callback('mode-set', self._kbdmode, self._kbdmode_type) def clear_button_map(self): self.btnmap = {} def map_button(self, btncode, alias): """For remapping the buttons to different names. 'btncode' is a fixed button code and 'alias' is a logical name. """ self.btnmap[btncode] = alias def get_buttons(self): return set([alias for keyname, alias in self.btnmap.items()]) def clear_event_map(self): self.eventmap = {} def map_event(self, mode, modifiers, alias, eventname): self.eventmap[(mode, frozenset(tuple(modifiers)), alias)] = Bunch.Bunch(name=eventname) def register_for_events(self, viewer): # Add callbacks for interesting events viewer.add_callback('motion', self.window_motion) viewer.add_callback('button-press', self.window_button_press) viewer.add_callback('button-release', self.window_button_release) viewer.add_callback('key-press', self.window_key_press) viewer.add_callback('key-release', self.window_key_release) ## viewer.add_callback('drag-drop', self.window_drag_drop) viewer.add_callback('scroll', self.window_scroll) ## viewer.add_callback('map', self.window_map) ## viewer.add_callback('focus', self.window_focus) ## viewer.add_callback('enter', self.window_enter) ## viewer.add_callback('leave', self.window_leave) def window_map(self, viewer): pass def window_focus(self, viewer, hasFocus): return True def window_enter(self, viewer): return True def window_leave(self, viewer): return True def window_key_press(self, viewer, keyname): self.logger.debug("keyname=%s" % (keyname)) # Is this a modifer key? if keyname in self.modifier_map: bnch = self.modifier_map[keyname] self._modifiers = self._modifiers.union(set([bnch.name])) return True # Is this a mode key? elif keyname in self.mode_map: bnch = self.mode_map[keyname] if self._kbdmode_type == 'locked': if bnch.name == self._kbdmode: self.reset_mode(viewer) return True if self._delayed_reset: if bnch.name == self._kbdmode: self._delayed_reset = False return False # if there is not a mode active now, # activate this one if self._kbdmode is None: mode_type = bnch.type if mode_type == None: mode_type = self._kbdmode_type_default self.set_mode(bnch.name, mode_type) if bnch.msg is not None: viewer.onscreen_message(bnch.msg) return True try: # TEMP: hack to get around the issue of how keynames # are generated. if keyname == 'escape': idx = (None, self._empty_set, keyname) else: idx = (self._kbdmode, self._modifiers, keyname) emap = self.eventmap[idx] except KeyError: try: idx = (None, self._empty_set, keyname) emap = self.eventmap[idx] except KeyError: return False self.logger.debug("idx=%s" % (str(idx))) cbname = 'keydown-%s' % (emap.name) last_x, last_y = viewer.get_last_data_xy() event = KeyEvent(key=keyname, state='down', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=last_x, data_y=last_y) return viewer.make_ui_callback(cbname, event, last_x, last_y) def window_key_release(self, viewer, keyname): self.logger.debug("keyname=%s" % (keyname)) # Is this a modifer key? if keyname in self.modifier_map: bnch = self.modifier_map[keyname] self._modifiers = self._modifiers.difference(set([bnch.name])) return True try: idx = (self._kbdmode, self._modifiers, keyname) emap = self.eventmap[idx] except KeyError: try: idx = (None, self._empty_set, keyname) emap = self.eventmap[idx] except KeyError: emap = None # Is this a mode key? if keyname in self.mode_map: bnch = self.mode_map[keyname] if self._kbdmode == bnch.name: # <-- the current mode key is being released if bnch.type == 'held': if self._button == 0: # if no button is being held, then reset mode self.reset_mode(viewer) else: self._delayed_reset = True return True # release mode if this is a oneshot mode ## if self._kbdmode_type == 'oneshot': ## self.reset_mode(viewer) if emap is None: return False cbname = 'keyup-%s' % (emap.name) last_x, last_y = viewer.get_last_data_xy() event = KeyEvent(key=keyname, state='up', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=last_x, data_y=last_y) return viewer.make_ui_callback(cbname, event, last_x, last_y) def window_button_press(self, viewer, btncode, data_x, data_y): self.logger.debug("x,y=%d,%d btncode=%s" % (data_x, data_y, hex(btncode))) self._button |= btncode button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: #self.logger.warn("No button map binding for %s" % (str(btncode))) return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-down' % (emap.name) self.logger.debug("making callback for %s (mode=%s)" % ( cbname, self._kbdmode)) event = PointEvent(button=button, state='down', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_motion(self, viewer, btncode, data_x, data_y): button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-move' % (emap.name) event = PointEvent(button=button, state='move', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_button_release(self, viewer, btncode, data_x, data_y): self.logger.debug("x,y=%d,%d button=%s" % (data_x, data_y, hex(btncode))) self._button &= ~btncode button = self.btnmap[btncode] try: idx = (self._kbdmode, self._modifiers, button) # release mode if this is a oneshot mode if (self._kbdmode_type == 'oneshot') or (self._delayed_reset): self.reset_mode(viewer) emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, button) emap = self.eventmap[idx] except KeyError: #self.logger.warn("No button map binding for %s" % (str(btncode))) return False self.logger.debug("Event map for %s" % (str(idx))) cbname = '%s-up' % (emap.name) event = PointEvent(button=button, state='up', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event, data_x, data_y) def window_scroll(self, viewer, direction, amount, data_x, data_y): try: idx = (self._kbdmode, self._modifiers, 'scroll') emap = self.eventmap[idx] except KeyError: # no entry for this mode, try unmodified entry try: idx = (None, self._empty_set, 'scroll') emap = self.eventmap[idx] except KeyError: return False cbname = '%s-scroll' % (emap.name) event = ScrollEvent(button='scroll', state='scroll', mode=self._kbdmode, modifiers=self._modifiers, viewer=viewer, direction=direction, amount=amount, data_x=data_x, data_y=data_y) return viewer.make_ui_callback(cbname, event) #END
bsd-3-clause
6,027,979,708,236,508,000
34.646996
82
0.523658
false
3.671861
false
false
false
yotamfr/prot2vec
src/python/pssm3go_model.py
1
11287
import numpy as np import torch import torch.nn as nn from torch.autograd import Variable import torch.nn.functional as F USE_CUDA = False KERN_SIZE = 3 def set_cuda(val): global USE_CUDA USE_CUDA = val def sequence_mask(sequence_length, max_len=None): if max_len is None: max_len = sequence_length.data.max() batch_size = sequence_length.size(0) seq_range = torch.arange(0, max_len).long() seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len) seq_range_expand = Variable(seq_range_expand) # if sequence_length.is_cuda: if USE_CUDA: seq_range_expand = seq_range_expand.cuda() seq_length_expand = (sequence_length.unsqueeze(1) .expand_as(seq_range_expand)) return seq_range_expand < seq_length_expand def masked_cross_entropy(logits, target, length, gamma=0, eps=1e-7): length = Variable(torch.LongTensor(length)) if USE_CUDA: length = length.cuda() """ Args: logits: A Variable containing a FloatTensor of size (batch, max_len, num_classes) which contains the unnormalized probability for each class. target: A Variable containing a LongTensor of size (batch, max_len) which contains the index of the true class for each corresponding step. length: A Variable containing a LongTensor of size (batch,) which contains the length of each data in a batch. Returns: loss: An average loss value masked by the length. """ # logits_flat: (batch * max_len, num_classes) logits_flat = logits.view(-1, logits.size(-1)) # target_flat: (batch * max_len, 1) target_flat = target.view(-1, 1) # probs_flat: (batch * max_len, 1) probs_flat = torch.gather(F.softmax(logits_flat), dim=1, index=target_flat) probs_flat = probs_flat.clamp(eps, 1. - eps) # prob: [0, 1] -> [eps, 1 - eps] # losses_flat: (batch * max_len, 1) losses_flat = -torch.log(probs_flat) * (1 - probs_flat) ** gamma # focal loss # losses: (batch, max_len) losses = losses_flat.view(*target.size()) # mask: (batch, max_len) mask = sequence_mask(sequence_length=length, max_len=target.size(1)) losses = losses * mask.float() loss = losses.sum() / length.float().sum() return loss class EncoderRNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderRNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.gru = nn.GRU(input_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): # Note: we run this all at once (over multiple batches of multiple sequences) packed = torch.nn.utils.rnn.pack_padded_sequence(input_seqs, input_lengths) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs return outputs, hidden class CNN(nn.Module): def __init__(self, input_size): super(CNN, self).__init__() inp_size = input_size self.features = nn.Sequential( nn.Conv2d(1, 10, kernel_size=(KERN_SIZE, inp_size)), nn.BatchNorm2d(10), nn.ReLU(inplace=True), nn.Conv2d(10, 10, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(10), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), nn.Conv2d(10, 20, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(20), nn.ReLU(inplace=True), nn.Conv2d(20, 20, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(20), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), nn.Conv2d(20, 40, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(40), nn.ReLU(inplace=True), nn.Conv2d(40, 40, kernel_size=(KERN_SIZE, 1)), nn.BatchNorm2d(40), nn.ReLU(inplace=True), nn.MaxPool2d((2, 1)), ) self.n_pool_layers = 3 def forward(self, x): out = self.features(x) out = out.view(out.size(2), out.size(0), out.size(1) * out.size(3)) return out class EncoderCNN(nn.Module): def __init__(self, input_size, hidden_size, n_layers=1, dropout=0.1): super(EncoderCNN, self).__init__() self.input_size = input_size self.hidden_size = hidden_size self.n_layers = n_layers self.dropout = dropout self.cnn = CNN(input_size) self.gru = nn.GRU(input_size, hidden_size, n_layers, dropout=self.dropout, bidirectional=True) def forward(self, input_seqs, input_lengths, hidden=None): input_features = self.cnn(input_seqs.transpose(0, 1).unsqueeze(1)) features_length = [(l//(2 ** self.cnn.n_pool_layers)) for l in input_lengths] # features_length = input_lengths # print(input_features.size()) # print(features_length) # Note: we run this all at once (over multiple batches of multiple sequences) packed = torch.nn.utils.rnn.pack_padded_sequence(input_features, features_length) outputs, hidden = self.gru(packed, hidden) outputs, output_lengths = torch.nn.utils.rnn.pad_packed_sequence(outputs) # unpack (back to padded) outputs = outputs[:, :, :self.hidden_size] + outputs[:, :, self.hidden_size:] # Sum bidirectional outputs return outputs, hidden class Attn(nn.Module): def __init__(self, method, hidden_size): super(Attn, self).__init__() self.method = method self.hidden_size = hidden_size if self.method == 'general': self.attn = nn.Linear(self.hidden_size, hidden_size) elif self.method == 'concat': self.attn = nn.Linear(self.hidden_size * 2, hidden_size) self.v = nn.Parameter(torch.FloatTensor(1, hidden_size)) def forward(self, hidden, encoder_outputs): max_len = encoder_outputs.size(0) this_batch_size = encoder_outputs.size(1) # Create variable to store attention energies attn_energies = Variable(torch.zeros(this_batch_size, max_len)) # B x S if USE_CUDA: attn_energies = attn_energies.cuda() # For each batch of encoder outputs for b in range(this_batch_size): # Calculate energy for each encoder output for i in range(max_len): attn_energies[b, i] = self.score(hidden[:, b], encoder_outputs[i, b].unsqueeze(0)) # Normalize energies to weights in range 0 to 1, resize to 1 x B x S return F.softmax(attn_energies).unsqueeze(1) def score(self, hidden, encoder_output): if self.method == 'dot': energy = torch.dot(hidden.view(-1), encoder_output.view(-1)) return energy elif self.method == 'general': energy = self.attn(encoder_output) energy = torch.dot(hidden.view(-1), energy.view(-1)) return energy elif self.method == 'concat': energy = self.attn(torch.cat((hidden, encoder_output), 1)) energy = self.v.dot(energy) return energy class LuongAttnDecoderRNN(nn.Module): def __init__(self, attn_model, hidden_size, output_size, n_layers=1, prior_size=0, dropout=0.1, embedding=None): super(LuongAttnDecoderRNN, self).__init__() # Keep for reference self.attn_model = attn_model self.hidden_size = hidden_size self.output_size = output_size self.prior_size = prior_size self.n_layers = n_layers self.dropout = dropout # Define layers if np.any(embedding): self.embedding_size = embedding_size = embedding.shape[1] self.embedding = nn.Embedding(output_size, embedding_size) self.embedding.weight = nn.Parameter(torch.from_numpy(embedding).float()) self.embedding.requires_grad = True else: self.embedding_size = embedding_size = hidden_size self.embedding = nn.Embedding(output_size, embedding_size) self.embedding_dropout = nn.Dropout(dropout) self.gru = nn.GRU(embedding_size, hidden_size, n_layers, dropout=dropout) self.concat = nn.Linear(hidden_size * 2, hidden_size) self.out = nn.Linear(hidden_size + prior_size, output_size) # Choose attention model if attn_model != 'none': self.attn = Attn(attn_model, hidden_size) def forward(self, input_seq, last_hidden, encoder_outputs, prior): # Note: we run this one step at a time # Get the embedding of the current input word (last output word) batch_size = input_seq.size(0) embedded = self.embedding(input_seq) embedded = self.embedding_dropout(embedded) embedded = embedded.view(1, batch_size, -1) # S=1 x B x N # Get current hidden state from input word and last hidden state rnn_output, hidden = self.gru(embedded, last_hidden) # Calculate attention from current RNN state and all encoder outputs; # apply to encoder outputs to get weighted average attn_weights = self.attn(rnn_output, encoder_outputs) context = attn_weights.bmm(encoder_outputs.transpose(0, 1)) # B x S=1 x N # Attentional vector using the RNN hidden state and context vector # concatenated together (Luong eq. 5) rnn_output = rnn_output.squeeze(0) # S=1 x B x N -> B x N context = context.squeeze(1) # B x S=1 x N -> B x N concat_input = torch.cat((rnn_output, context), 1) concat_output = F.tanh(self.concat(concat_input)) # Finally predict next token (Luong eq. 6, without softmax) if prior is None: output = self.out(concat_output) else: output = self.out(torch.cat((concat_output, prior), 1)) # Return final output, hidden state, and attention weights (for visualization) return output, hidden, attn_weights # https://github.com/DingKe/pytorch_workplace/blob/master/focalloss/loss.py def one_hot(index, classes): size = index.size() + (classes,) view = index.size() + (1,) mask = torch.Tensor(*size).fill_(0) index = index.view(*view) ones = 1. if isinstance(index, Variable): ones = Variable(torch.Tensor(index.size()).fill_(1)) mask = Variable(mask, volatile=index.volatile) return mask.scatter_(1, index, ones) class FocalLoss(nn.Module): def __init__(self, gamma=0, eps=1e-7): super(FocalLoss, self).__init__() self.gamma = gamma self.eps = eps def forward(self, input, target): y = one_hot(target, input.size(-1)) logit = F.softmax(input) logit = logit.clamp(self.eps, 1. - self.eps) loss = -1 * y * torch.log(logit) # cross entropy loss = loss * (1 - logit) ** self.gamma # focal loss return loss.sum()
mit
-5,608,129,983,196,193,000
35.409677
116
0.611234
false
3.52829
false
false
false
eneldoserrata/marcos_openerp
addons/point_of_sale/report/pos_receipt.py
1
3036
# -*- coding: utf-8 -*- ############################################################################## # # OpenERP, Open Source Management Solution # Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>). # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # # You should have received a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## import time from openerp.report import report_sxw from openerp import pooler def titlize(journal_name): words = journal_name.split() while words.pop() != 'journal': continue return ' '.join(words) class order(report_sxw.rml_parse): def __init__(self, cr, uid, name, context): super(order, self).__init__(cr, uid, name, context=context) user = pooler.get_pool(cr.dbname).get('res.users').browse(cr, uid, uid, context=context) partner = user.company_id.partner_id self.localcontext.update({ 'time': time, 'disc': self.discount, 'net': self.netamount, 'get_journal_amt': self._get_journal_amt, 'address': partner or False, 'titlize': titlize }) def netamount(self, order_line_id): sql = 'select (qty*price_unit) as net_price from pos_order_line where id = %s' self.cr.execute(sql, (order_line_id,)) res = self.cr.fetchone() return res[0]/1.18 def discount(self, order_id): sql = 'select discount, price_unit, qty from pos_order_line where order_id = %s ' self.cr.execute(sql, (order_id,)) res = self.cr.fetchall() dsum = 0 for line in res: if line[0] != 0: dsum = dsum +(line[2] * (line[0]*line[1]/100)) return dsum def _get_journal_amt(self, order_id): data={} sql = """ select aj.name,absl.amount as amt from account_bank_statement as abs LEFT JOIN account_bank_statement_line as absl ON abs.id = absl.statement_id LEFT JOIN account_journal as aj ON aj.id = abs.journal_id WHERE absl.pos_statement_id =%d"""%(order_id) self.cr.execute(sql) data = self.cr.dictfetchall() return data report_sxw.report_sxw('report.pos.receipt', 'pos.order', 'addons/point_of_sale/report/pos_receipt.rml', parser=order, header=False) # vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
agpl-3.0
873,737,431,624,882,400
38.428571
131
0.591897
false
3.790262
false
false
false
chrysante87/pyterpol
pyterpol_test/test_hjd/test.hjd.py
1
1829
import numpy as np import pyterpol def load_observations(f): """ :param f: file :return: """ # load the observations flist = np.loadtxt(f, usecols=[0], unpack=True, dtype=str) hjd = np.loadtxt(f, usecols=[1], unpack=True).tolist() hjd[0] = None # create list of observations obs = [] for i, sf in enumerate(flist): # wrap the spectrum into observed spectrum class # o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0)) # o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=0), hjd=hjd[i]) o = pyterpol.ObservedSpectrum(filename=sf, group=dict(rv=i), hjd=hjd[i]) # estimate uncertainty from continuum o.get_sigma_from_continuum(6620., 6630.) obs.append(o) # create ObservedList ol = pyterpol.ObservedList() ol.add_observations(obs) return ol, flist, hjd def main(): """ :return: """ # parameters niter = 2 # 1) Generate region rl = pyterpol.RegionList() rl.add_region(wmin=6337., wmax=6410.0, groups=dict(lr=0)) rl.add_region(wmin=6530., wmax=6600.0, groups=dict(lr=0)) rl.add_region(wmin=6660., wmax=6690.0, groups=dict(lr=0)) # 2) Load observed data ol = load_observations('prekor.lst')[0] ## 3) Generate components sl = pyterpol.StarList() sl.add_component('primary', teff=16000., logg=4.285, lr=1.0, vrot=90., z=1.0) ## 4) construct the interface itf = pyterpol.Interface(sl=sl, rl=rl, ol=ol) itf.set_grid_properties(order=4, step=0.05) itf.setup() print itf ## 5) write rvs itf.save('test.sav') itf.write_rvs('test.rv.dat') # 6) try to load it itf.load('test.sav') # 7) and save it again itf.save('test2.sav') if __name__ == '__main__': main()
gpl-2.0
-9,011,860,902,559,890,000
24.402778
87
0.600328
false
2.95
false
false
false
upcFrost/SDR2_translate
GuiFuncs.py
1
6614
import os, ConfigParser, PIL from PIL import Image, ImageTk, ImageDraw, ImageFont from PakFile import * from GimFile import * from Common import * from clt import * def showSprite(self, GameDataLoc, pars): fn = os.path.join(GameDataLoc,'all','cg', 'bustup_%02d_%02d.gim' % (pars[1][1], pars[2][1])) GimImage = GimFile() GimImage.openGim(fn) GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.sprite = ImageTk.PhotoImage(pilImage) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 imagesprite = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.sprite, tag = 'sprite') pass def showBGD(self, GameDataLoc, pars): fn = os.path.join(GameDataLoc,'all','cg', 'bgd_%03d.gim' % (pars[0][1])) # Show image if (pars[1][1] == 1): GimImage = GimFile() GimImage.openGim(fn) GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.bgd = ImageTk.PhotoImage(pilImage) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 imagebgd = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.bgd, tag = 'bgd') else: self.scene.bgd = []; self._ScreenView.delete('bgd') pass def showFlash(self, GameDataLoc, pars): # Flash types: # If id < 1000, then it's a flash event. # if id >= 1000, then it's ammo # if id >= 1500, then it's an ammo update # if id >= 2000, then it's a present # If id >= 3000, it's a cutin. id = pars[0][1] # Flash ID added_Y = SCREEN_H/2 # Additional display height readfile = True # Flag that we're reading file, not dataarray # Check if that really is a flash if id >= 3000: # Cutin root = os.path.join(GameDataLoc,'all','cg','cutin') fn_tmp = 'cutin_ico_%03d.gim' id = id - 3000 elif id >= 2000: # Present root = os.path.join(GameDataLoc,'all','cg','present') fn_tmp = 'present_ico_%03d.gim' id = id - 2000 elif id >= 1500: # Ammo root = os.path.join(GameDataLoc,'all','cg','kotodama') fn_tmp = 'kotodama_ico_%03d.gim' id = id - 1500 elif id >= 1000: # Also ammo root = os.path.join(GameDataLoc,'all','cg','kotodama') fn_tmp = 'kotodama_ico_%03d.gim' id = id - 1000 # A flash event. else: added_Y = 0 # Don't need an additional height here root = os.path.join(GameDataLoc,'all','flash') fn_tmp = 'fla_%03d.pak' file = os.path.join(root, fn_tmp % id) # Check dir because we have 2 of those if not os.path.isfile(file): root = os.path.join(GameDataLoc,'jp','flash') file = os.path.join(root, fn_tmp % id) # Check for file if not os.path.isfile(file): return -1 # Get extension _, ext = os.path.splitext(file) if ext not in ['.pak', '.gmo', '.gim']: return -1 if ext == '.pak': Pak = PakFile(file) Pak.getFiles() # FIXME: need to check its number idx = pars[-1][1] if idx == 255: # Erase everything from the screen self._ScreenView.delete(ALL) self.scene.flash = [] return 0 # Else - check for image, numeration starts with 1 # Note that i'm using the SAME variable for unification file = Pak.files[idx - 1][1] _, ext = os.path.splitext(Pak.files[idx - 1][0]) # Set flag that we're reading data readfile = False # Check extension if ext not in ['.gmo', '.gim']: return -1 if ext == '.gmo': GmoImage = GmoFile() if readfile: GmoImage.openGmo(file) else: GmoImage.fromData(file) GmoImage.extractGim() GimImage = GmoImage.gim if ext == '.gim': GimImage = GimFile() if readfile: GimImage.openGim(file) else: GimImage.fromData(file) # Now working with gim image GimImage.getImage() pilImage = PIL.Image.new("RGBA", (GimImage.width, GimImage.height)) pilImage.putdata(GimImage.image) self.scene.flash.append(ImageTk.PhotoImage(pilImage)) POS_X = (2*SCREEN_W - GimImage.width)/2 POS_Y = (2*SCREEN_H - GimImage.height)/2 - added_Y imagesprite = self._ScreenView.create_image(POS_X,POS_Y,image=self.scene.flash[-1]) # Text should be kept on the top self._ScreenView.tag_raise('text') return 0 def printLine(self): # First delete the old line try: self._ScreenView.delete(self.scene.text_idx) except: print "No old line present on the screen" # I'm using images here because of the following things: positioning, alpha and font pilImage = PIL.Image.new("RGBA", (SCREEN_W, TEXT_H), (32,32,32,192)) draw = PIL.ImageDraw.Draw(pilImage) font = PIL.ImageFont.truetype("Meiryo.ttf", 20) # First - draw the speaker name at (20,0) draw.text((20,0), self.scene.speaker, (255,255,255), font=font) # Default highlighting clt = 0 color = CLT_STYLES[clt].top_color # Regex for finding highlighted regions clt_marker = re.compile(r"\<CLT (\d+)\>(.*?)\<CLT\>", re.DOTALL) clt_counter = 0 # The text is split into a list like [CLT0_TEXT, CLT_NUM, CLT_TEXT, CLT0_TEXT] text = re.split(clt_marker, self.scene.text) # Draw lines with the fixed line spacing attSpacing = 20 x = 20 # Margin y = 20 # Initial y partNum = 0 for part in text: # Reset text color if partNum % 3 == 0: clt = 0 color = CLT_STYLES[clt].top_color # Every first out of 3 - CLT number (look at the list form once again) if partNum % 3 == 1: clt = int(part) color = CLT_STYLES[clt].top_color # Dealing with a string else: # Draw text with the color we need for line in part.splitlines(): draw.text( (x,y), line, color, font=font) y = y + attSpacing # Next part partNum += 1 # Draw the text on canvas self.scene.text_img = ImageTk.PhotoImage(pilImage) self.scene.text_idx = self._ScreenView.create_image(SCREEN_W/2, SCREEN_H - TEXT_H/2,image=self.scene.text_img, tag = 'text') pass
gpl-3.0
-2,265,219,950,478,194,700
34.180851
128
0.579226
false
3.164593
false
false
false
lokeshsaini94/DaysCalculator
src/days_calculator.py
1
2817
# Calculates number of days between two date. # Enter your birth date and current date. This program will calculate the number of days def date_error_check(month1, day1, month2, day2): # Checks if dates are correct. if month1 > 12 or month2 > 12 or day1 > 31 or day2 > 31: return False return True def date_is_before(year1, month1, day1, year2, month2, day2): # Checks if birth date is less than current date if year1 < year2: return True if year1 == year2: if month1 < month2: return True if month1 == month2: if day1 < day2: return True return False def is_leap_year(year1): # Checks if the year is a leap year or not if (year1 % 400 == 0): return True if (year1 % 100 == 0): return False if (year1 % 4 == 0): return True return False def days_in_month(year1, month1): # Returns the number of days in the given month and year if month1 == 1 or month1 == 3 or month1 == 5 or month1 == 7 or month1 == 8 or month1 == 10 or month1 == 12: return 31 if month1 == 2: if is_leap_year(year1): return 29 return 28 return 30 def next_day(year1, month1, day1): # Returns the date of next day if day1 < days_in_month(year1, month1): return year1, month1, day1+1 else: if month1 < 12: return year1, month1+1, 1 else: return year1+1, 01, 01 def days_calculator(year1, month1, day1, year2, month2, day2): # Calculates the days b/w birth date and current date days = 0 if not date_error_check(month1, day1, month2, day2): return False # "Wrong date format! try again" if not date_is_before(year1, month1, day1, year2, month2, day2): return False # "No Time travelers allowed" while date_is_before(year1, month1, day1, year2, month2, day2): year1, month1, day1 = next_day(year1, month1, day1) days = days + 1 return days # Getting user input and printing results print "Enter Birth date (yyyy-mm-dd)" year1 = input("Enter year 1: ") month1 = input("Enter month 1: ") day1 = input("Enter day 1: ") print "Enter current date (yyyy-mm-dd)" year2 = input("Enter year 2: ") month2 = input("Enter month 2: ") day2 = input("Enter day 2: ") if not days_calculator(year1, month1, day1, year2, month2, day2): print "Wrong Date! Try again" else: print "Number of days:", days_calculator(year1, month1, day1, year2, month2, day2) print "Number of hours:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 print "Number of minutes:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 * 60 print "Number of seconds:", days_calculator(year1, month1, day1, year2, month2, day2) * 24 * 60 * 60
apache-2.0
6,824,768,713,936,740,000
34.658228
117
0.629393
false
3.260417
false
false
false
moshthepitt/product.co.ke
links/migrations/0001_initial.py
1
1633
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-05-02 10:06 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Link', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Created on')), ('updated_on', models.DateTimeField(auto_now=True, verbose_name='Updated on')), ('title', models.CharField(max_length=255, verbose_name='Title')), ('link', models.URLField(max_length=2083, unique=True, verbose_name='Link')), ('description', models.TextField(default='', help_text='A short description. Please limit to 300 cahracters.', verbose_name='Description')), ('active', models.BooleanField(default=True, verbose_name='Active')), ('ghost', models.BooleanField(default=False, verbose_name='Ghost')), ('user', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL, verbose_name='User')), ], options={ 'ordering': ['-created_on'], 'verbose_name': 'Link', 'verbose_name_plural': 'Links', }, ), ]
mit
4,329,832,834,615,764,500
41.973684
157
0.59951
false
4.219638
false
false
false
akretion/l10n-brazil
l10n_br_base/tests/test_other_ie.py
1
3687
# -*- coding: utf-8 -*- # @ 2018 Akretion - www.akretion.com.br - # Magno Costa <magno.costa@akretion.com.br> # License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl.html). from odoo.tests.common import TransactionCase import logging _logger = logging.getLogger(__name__) class OtherIETest(TransactionCase): def setUp(self): super(OtherIETest, self).setUp() self.company_model = self.env['res.company'] self.company = self.company_model.create({ 'name': 'Akretion Sao Paulo', 'legal_name': 'Akretion Sao Paulo', 'cnpj_cpf': '26.905.703/0001-52', 'inscr_est': '932.446.119.086', 'street': 'Rua Paulo Dias', 'number': '586', 'district': 'Alumínio', 'state_id': self.ref('base.state_br_sp'), 'l10n_br_city_id': self.ref('l10n_br_base.city_3501152'), 'country_id': self.ref('base.br'), 'city': 'Alumínio', 'zip': '18125-000', 'phone': '+55 (21) 3010 9965', 'email': 'contact@companytest.com.br', 'website': 'www.companytest.com.br' }) def test_included_valid_ie_in_company(self): result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902653, })] }) self.assertTrue(result, "Error to included valid IE.") for line in self.company.partner_id.other_inscr_est_lines: result = False if line.inscr_est == '41902653': result = True self.assertTrue( result, "Error in method to update other IE(s) on partner.") try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 67729139, })] }) except: result = False self.assertFalse( result, "Error to check included other" " IE to State already informed.") def test_included_invalid_ie(self): try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902652, })] }) except: result = False self.assertFalse(result, "Error to check included invalid IE.") def test_included_other_valid_ie_to_same_state_of_company(self): try: result = self.company.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_sp'), 'inscr_est': 692015742119, })] }) except: result = False self.assertFalse( result, "Error to check included other valid IE " " in to same state of Company.") def test_included_valid_ie_on_partner(self): result = self.company.partner_id.write({ 'other_inscr_est_lines': [(0, 0, { 'state_id': self.ref('base.state_br_ba'), 'inscr_est': 41902653, })] }) self.assertTrue(result, "Error to included valid IE.") for line in self.company.other_inscr_est_lines: result = False if line.inscr_est == '41902653': result = True self.assertTrue( result, "Error in method to update other IE(s) on Company.")
agpl-3.0
-2,175,972,573,397,180,000
34.095238
76
0.504206
false
3.641304
true
false
false
DedMemez/ODS-August-2017
toon/DistributedNPCLaffRestock.py
1
2504
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.toon.DistributedNPCLaffRestock from otp.nametag.NametagConstants import CFSpeech, CFTimeout from toontown.toonbase import TTLocalizer, ToontownGlobals from toontown.toon import NPCToons from DistributedNPCToonBase import DistributedNPCToonBase import LaffRestockGlobals, LaffShopGui, time class DistributedNPCLaffRestock(DistributedNPCToonBase): def __init__(self, cr): DistributedNPCToonBase.__init__(self, cr) self.lastCollision = 0 self.laffDialog = None return def disable(self): self.ignoreAll() self.destroyDialog() DistributedNPCToonBase.disable(self) def destroyDialog(self): self.clearChat() if self.laffDialog: self.laffDialog.destroy() self.laffDialog = None return def postToonStateInit(self): self.putOnSuit(ToontownGlobals.cogHQZoneId2deptIndex(self.zoneId), rental=True) def getCollSphereRadius(self): return 1.25 def handleCollisionSphereEnter(self, collEntry): if self.lastCollision > time.time(): return self.lastCollision = time.time() + ToontownGlobals.NPCCollisionDelay if base.localAvatar.getHp() >= base.localAvatar.getMaxHp(): self.setChatAbsolute(TTLocalizer.RestockFullLaffMessage, CFSpeech | CFTimeout) return base.cr.playGame.getPlace().fsm.request('stopped') base.setCellsAvailable(base.bottomCells, 0) self.destroyDialog() self.acceptOnce('laffShopDone', self.__laffShopDone) self.laffDialog = LaffShopGui.LaffShopGui() def freeAvatar(self): base.cr.playGame.getPlace().fsm.request('walk') base.setCellsAvailable(base.bottomCells, 1) def __laffShopDone(self, state, laff): self.freeAvatar() if state == LaffRestockGlobals.TIMER_END: self.setChatAbsolute(TTLocalizer.STOREOWNER_TOOKTOOLONG, CFSpeech | CFTimeout) elif state == LaffRestockGlobals.USER_CANCEL: self.setChatAbsolute(TTLocalizer.STOREOWNER_GOODBYE, CFSpeech | CFTimeout) elif state == LaffRestockGlobals.RESTOCK: self.sendUpdate('restock', [laff]) def restockResult(self, state): if state in LaffRestockGlobals.RestockMessages: self.setChatAbsolute(LaffRestockGlobals.RestockMessages[state], CFSpeech | CFTimeout)
apache-2.0
3,289,223,680,876,199,400
38.419355
98
0.680911
false
3.602878
false
false
false
cerrno/neurokernel
neurokernel/tools/gpu.py
1
7561
#!/usr/bin/env python import numbers import numpy as np import pycuda.driver as drv import pycuda.elementwise as elementwise import pycuda.gpuarray as gpuarray from pycuda.tools import dtype_to_ctype # List of available numerical types provided by numpy: # XXX This try/except is an ugly hack to prevent the doc build on # ReadTheDocs from failing: try: num_types = [np.typeDict[t] for t in \ np.typecodes['AllInteger']+np.typecodes['AllFloat']] except TypeError: num_types = [] # Numbers of bytes occupied by each numerical type: num_nbytes = dict((np.dtype(t), t(1).nbytes) for t in num_types) def set_realloc(x_gpu, data): """ Transfer data into a GPUArray instance. Copies the contents of a numpy array into a GPUArray instance. If the array has a different type or dimensions than the instance, the GPU memory used by the instance is reallocated and the instance updated appropriately. Parameters ---------- x_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. data : numpy.ndarray Array of data to transfer to the GPU. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import misc >>> x = np.asarray(np.random.rand(5), np.float32) >>> x_gpu = gpuarray.to_gpu(x) >>> x = np.asarray(np.random.rand(10, 1), np.float64) >>> set_realloc(x_gpu, x) >>> np.allclose(x, x_gpu.get()) True """ # Only reallocate if absolutely necessary: if x_gpu.shape != data.shape or x_gpu.size != data.size or \ x_gpu.strides != data.strides or x_gpu.dtype != data.dtype: # Free old memory: x_gpu.gpudata.free() # Allocate new memory: nbytes = num_nbytes[data.dtype] x_gpu.gpudata = drv.mem_alloc(nbytes*data.size) # Set array attributes: x_gpu.shape = data.shape x_gpu.size = data.size x_gpu.strides = data.strides x_gpu.dtype = data.dtype # Update the GPU memory: x_gpu.set(data) def bufint(a): """ Return buffer interface to GPU array. Parameters ---------- a : pycuda.gpuarray.GPUArray GPU array. Returns ------- b : buffer Buffer interface to array. Returns None if `a` has a length of 0. """ assert isinstance(a, gpuarray.GPUArray) if a.size: return a.gpudata.as_buffer(a.nbytes) else: return None def set_by_inds(dest_gpu, ind, src_gpu, ind_which='dest'): """ Set values in a GPUArray by index. Parameters ---------- dest_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. ind : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices to set. Must have an integer dtype. src_gpu : pycuda.gpuarray.GPUArray GPUArray instance from which to set values. ind_which : str If set to 'dest', set the elements in `dest_gpu` with indices `ind` to the successive values in `src_gpu`; the lengths of `ind` and `src_gpu` must be equal. If set to 'src', set the successive values in `dest_gpu` to the values in `src_gpu` with indices `ind`; the lengths of `ind` and `dest_gpu` must be equal. Examples -------- >>> import pycuda.gpuarray as gpuarray >>> import pycuda.autoinit >>> import numpy as np >>> import misc >>> dest_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32)) >>> ind = gpuarray.to_gpu(np.array([0, 2, 4])) >>> src_gpu = gpuarray.to_gpu(np.array([1, 1, 1], dtype=np.float32)) >>> misc.set_by_inds(dest_gpu, ind, src_gpu, 'dest') >>> np.allclose(dest_gpu.get(), np.array([1, 1, 1, 3, 1], dtype=np.float32)) True >>> dest_gpu = gpuarray.to_gpu(np.zeros(3, dtype=np.float32)) >>> ind = gpuarray.to_gpu(np.array([0, 2, 4])) >>> src_gpu = gpuarray.to_gpu(np.arange(5, dtype=np.float32)) >>> misc.set_by_inds(dest_gpu, ind, src_gpu) >>> np.allclose(dest_gpu.get(), np.array([0, 2, 4], dtype=np.float32)) True Notes ----- Only supports 1D index arrays. May not be efficient for certain index patterns because of lack of inability to coalesce memory operations. """ # Only support 1D index arrays: assert len(np.shape(ind)) == 1 assert dest_gpu.dtype == src_gpu.dtype assert issubclass(ind.dtype.type, numbers.Integral) N = len(ind) # Manually handle empty index array because it will cause the kernel to # fail if processed: if N == 0: return if ind_which == 'dest': assert N == len(src_gpu) elif ind_which == 'src': assert N == len(dest_gpu) else: raise ValueError('invalid value for `ind_which`') if not isinstance(ind, gpuarray.GPUArray): ind = gpuarray.to_gpu(ind) try: func = set_by_inds.cache[(dest_gpu.dtype, ind.dtype, ind_which)] except KeyError: data_ctype = dtype_to_ctype(dest_gpu.dtype) ind_ctype = dtype_to_ctype(ind.dtype) v = "{data_ctype} *dest, {ind_ctype} *ind, {data_ctype} *src".format(data_ctype=data_ctype, ind_ctype=ind_ctype) if ind_which == 'dest': func = elementwise.ElementwiseKernel(v, "dest[ind[i]] = src[i]") else: func = elementwise.ElementwiseKernel(v, "dest[i] = src[ind[i]]") set_by_inds.cache[(dest_gpu.dtype, ind.dtype, ind_which)] = func func(dest_gpu, ind, src_gpu, range=slice(0, N, 1)) set_by_inds.cache = {} def set_by_inds_from_inds(dest_gpu, ind_dest, src_gpu, ind_src): """ Set values in a GPUArray by index from indexed values in another GPUArray. Parameters ---------- dest_gpu : pycuda.gpuarray.GPUArray GPUArray instance to modify. ind_dest : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices in `dest_gpu` to set. Must have an integer dtype. src_gpu : pycuda.gpuarray.GPUArray GPUArray instance from which to set values. ind_src : pycuda.gpuarray.GPUArray or numpy.ndarray 1D array of element indices in `src_gpu` to copy. Must have an integer dtype. """ assert len(np.shape(ind_dest)) == 1 assert len(np.shape(ind_src)) == 1 assert dest_gpu.dtype == src_gpu.dtype assert ind_dest.dtype == ind_src.dtype assert issubclass(ind_dest.dtype.type, numbers.Integral) assert issubclass(ind_src.dtype.type, numbers.Integral) N = len(ind_src) # Manually handle empty index array because it will cause the kernel to # fail if processed: if N == 0: return assert N == len(ind_dest) if not isinstance(ind_dest, gpuarray.GPUArray): ind_dest = gpuarray.to_gpu(ind_dest) if not isinstance(ind_src, gpuarray.GPUArray): ind_src = gpuarray.to_gpu(ind_src) try: func = set_by_inds_from_inds.cache[(dest_gpu.dtype, ind_dest.dtype)] except KeyError: data_ctype = dtype_to_ctype(dest_gpu.dtype) ind_ctype = dtype_to_ctype(ind_dest.dtype) v = "{data_ctype} *dest, {ind_ctype} *ind_dest,"\ "{data_ctype} *src, {ind_ctype} *ind_src".format(data_ctype=data_ctype, ind_ctype=ind_ctype) func = elementwise.ElementwiseKernel(v, "dest[ind_dest[i]] = src[ind_src[i]]") set_by_inds_from_inds.cache[(dest_gpu.dtype, ind_dest.dtype)] = func func(dest_gpu, ind_dest, src_gpu, ind_src, range=slice(0, N, 1)) set_by_inds_from_inds.cache = {}
bsd-3-clause
-1,037,744,287,174,244,400
33.683486
120
0.622404
false
3.384512
false
false
false
TravelModellingGroup/TMGToolbox
TMGToolbox/src/XTMF_internal/delete_scenario.py
1
2229
''' Copyright 2016 Travel Modelling Group, Department of Civil Engineering, University of Toronto This file is part of the TMG Toolbox. The TMG Toolbox is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. The TMG Toolbox is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with the TMG Toolbox. If not, see <http://www.gnu.org/licenses/>. ''' #---METADATA--------------------- ''' Delete Scenario Authors: JamesVaughan Latest revision by: JamesVaughan This tool will allow XTMF to be able to delete a scenario within an EMME Databank. ''' #---VERSION HISTORY ''' 0.0.1 Created on 2016-03-24 by JamesVaughan ''' import inro.modeller as _m import traceback as _traceback _MODELLER = _m.Modeller() #Instantiate Modeller once. class DeleteScenario(_m.Tool()): version = '0.0.1' Scenario = _m.Attribute(int) def page(self): pb = _m.ToolPageBuilder(self, title="Delete Scenario", runnable=False, description="Cannot be called from Modeller.", branding_text="XTMF") return pb.render() def run(self): pass def __call__(self, Scenario): try: self._execute(Scenario) except Exception as e: raise Exception(_traceback.format_exc()) def _execute(self, Scenario): project = _MODELLER.emmebank scenario = project.scenario(str(Scenario)) if scenario is None: print "A delete was requested for scenario " + str(Scenario) + " but the scenario does not exist." return if scenario.delete_protected == True: scenario.delete_protected = False project.delete_scenario(scenario.id)
gpl-3.0
-351,590,633,516,950,140
30.408451
110
0.638852
false
4.237643
false
false
false
google/nerfactor
third_party/xiuminglib/xiuminglib/imprt.py
1
2311
from importlib import import_module from .log import get_logger logger = get_logger() # For < Python 3.6 try: ModuleNotFoundError except NameError: ModuleNotFoundError = ImportError def preset_import(name, assert_success=False): """A unified importer for both regular and ``google3`` modules, according to specified presets/profiles (e.g., ignoring ``ModuleNotFoundError``). """ if name in ('cv2', 'opencv'): try: # BUILD dep: # "//third_party/py/cvx2", from cvx2 import latest as mod # Or # BUILD dep: # "//third_party/OpenCVX:cvx2", # from google3.third_party.OpenCVX import cvx2 as cv2 except ModuleNotFoundError: mod = import_module_404ok('cv2') elif name in ('tf', 'tensorflow'): mod = import_module_404ok('tensorflow') elif name == 'gfile': # BUILD deps: # "//pyglib:gfile", # "//file/colossus/cns", mod = import_module_404ok('google3.pyglib.gfile') elif name == 'video_api': # BUILD deps: # "//learning/deepmind/video/python:video_api", mod = import_module_404ok( 'google3.learning.deepmind.video.python.video_api') elif name in ('bpy', 'bmesh', 'OpenEXR', 'Imath'): # BUILD deps: # "//third_party/py/Imath", # "//third_party/py/OpenEXR", mod = import_module_404ok(name) elif name in ('Vector', 'Matrix', 'Quaternion'): mod = import_module_404ok('mathutils') mod = _get_module_class(mod, name) elif name == 'BVHTree': mod = import_module_404ok('mathutils.bvhtree') mod = _get_module_class(mod, name) else: raise NotImplementedError(name) if assert_success: assert mod is not None, "Failed in importing '%s'" % name return mod def import_module_404ok(*args, **kwargs): """Returns ``None`` (instead of failing) in the case of ``ModuleNotFoundError``. """ try: mod = import_module(*args, **kwargs) except (ModuleNotFoundError, ImportError) as e: mod = None logger.debug("Ignored: %s", str(e)) return mod def _get_module_class(mod, clsname): if mod is None: return None return getattr(mod, clsname)
apache-2.0
-7,016,370,503,272,079,000
27.182927
77
0.590221
false
3.668254
false
false
false
CrankWheel/grit-i18n
grit/tool/xmb.py
1
11671
#!/usr/bin/env python # Copyright (c) 2012 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """The 'grit xmb' tool. """ import getopt import os from xml.sax import saxutils from grit import grd_reader from grit import lazy_re from grit import tclib from grit import util from grit.tool import interface # Used to collapse presentable content to determine if # xml:space="preserve" is needed. _WHITESPACES_REGEX = lazy_re.compile(ur'\s\s*') # See XmlEscape below. _XML_QUOTE_ESCAPES = { u"'": u'&apos;', u'"': u'&quot;', } _XML_BAD_CHAR_REGEX = lazy_re.compile(u'[^\u0009\u000A\u000D' u'\u0020-\uD7FF\uE000-\uFFFD]') def _XmlEscape(s): """Returns text escaped for XML in a way compatible with Google's internal Translation Console tool. May be used for attributes as well as for contents. """ if not type(s) == unicode: s = unicode(s) result = saxutils.escape(s, _XML_QUOTE_ESCAPES) return _XML_BAD_CHAR_REGEX.sub(u'', result).encode('utf-8') def _WriteAttribute(file, name, value): """Writes an XML attribute to the specified file. Args: file: file to write to name: name of the attribute value: (unescaped) value of the attribute """ if value: file.write(' %s="%s"' % (name, _XmlEscape(value))) def _WriteMessage(file, message): presentable_content = message.GetPresentableContent() assert (type(presentable_content) == unicode or (len(message.parts) == 1 and type(message.parts[0] == tclib.Placeholder))) preserve_space = presentable_content != _WHITESPACES_REGEX.sub( u' ', presentable_content.strip()) file.write('<msg') _WriteAttribute(file, 'desc', message.GetDescription()) _WriteAttribute(file, 'id', message.GetId()) _WriteAttribute(file, 'meaning', message.GetMeaning()) if preserve_space: _WriteAttribute(file, 'xml:space', 'preserve') file.write('>') if not preserve_space: file.write('\n ') parts = message.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): file.write('<ph') _WriteAttribute(file, 'name', part.GetPresentation()) file.write('><ex>') file.write(_XmlEscape(part.GetExample())) file.write('</ex>') file.write(_XmlEscape(part.GetOriginal())) file.write('</ph>') else: file.write(_XmlEscape(part)) if not preserve_space: file.write('\n') file.write('</msg>\n') def WriteXmbFile(file, messages): """Writes the given grit.tclib.Message items to the specified open file-like object in the XMB format. """ file.write("""<?xml version="1.0" encoding="UTF-8"?> <!DOCTYPE messagebundle [ <!ELEMENT messagebundle (msg)*> <!ATTLIST messagebundle class CDATA #IMPLIED> <!ELEMENT msg (#PCDATA|ph|source)*> <!ATTLIST msg id CDATA #IMPLIED> <!ATTLIST msg seq CDATA #IMPLIED> <!ATTLIST msg name CDATA #IMPLIED> <!ATTLIST msg desc CDATA #IMPLIED> <!ATTLIST msg meaning CDATA #IMPLIED> <!ATTLIST msg obsolete (obsolete) #IMPLIED> <!ATTLIST msg xml:space (default|preserve) "default"> <!ATTLIST msg is_hidden CDATA #IMPLIED> <!ELEMENT source (#PCDATA)> <!ELEMENT ph (#PCDATA|ex)*> <!ATTLIST ph name CDATA #REQUIRED> <!ELEMENT ex (#PCDATA)> ]> <messagebundle> """) for message in messages: _WriteMessage(file, message) file.write('</messagebundle>') # Good resource on POT format: http://pology.nedohodnik.net/doc/user/en_US/ch-poformat.html def WritePotFile(file, cliques, lang='', include_translation=False): def WriteAttribute(prefix, value): if value: file.write('%s%s\n' % (prefix, value)) def WriteExamples(): parts = message.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): if part.GetExample(): file.write(u'#. - placeholder %s, example: %s\n' % (part.GetPresentation(), part.GetExample())) else: file.write(u'#. - placeholder %s, literally replaced with: %s\n' % (part.GetPresentation(), part.GetOriginal())) def PotEscape(text): return text.replace(u'\\', u'\\\\').replace(u'\n', u'\\n').replace(u'\t', u'\\t').replace(u'%', u'\%').encode('utf-8') for clique in cliques: message = clique.GetMessage() WriteAttribute(u'#. - description:', message.GetDescription()) WriteExamples() WriteAttribute(u'#: id: ', message.GetId()) meaning = message.GetMeaning() if meaning: file.write(u'msgctxt "%s"\n' % PotEscape(meaning)) def WriteMessagePart(key, msg): file.write(u'%s "' % key) parts = msg.GetContent() for part in parts: if isinstance(part, tclib.Placeholder): file.write(u'%%{%s}' % part.GetPresentation()) else: file.write(PotEscape(part)) file.write(u'"\n') WriteMessagePart(u'msgid', message) if not include_translation: file.write(u'msgstr ""\n') else: WriteMessagePart(u'msgstr', clique.MessageForLanguage(lang, pseudo_if_no_match=False, fallback_to_english=False)) file.write(u'\n') class OutputXmb(interface.Tool): """Outputs all translateable messages in the .grd input file to an .xmb file, which is the format used to give source messages to Google's internal Translation Console tool. The format could easily be used for other systems. Usage: grit xmb [-i|-h] [-l LIMITFILE] OUTPUTPATH OUTPUTPATH is the path you want to output the .xmb file to. The -l option can be used to output only some of the resources to the .xmb file. LIMITFILE is the path to a file that is used to limit the items output to the xmb file. If the filename extension is .grd, the file must be a .grd file and the tool only output the contents of nodes from the input file that also exist in the limit file (as compared on the 'name' attribute). Otherwise it must contain a list of the IDs that output should be limited to, one ID per line, and the tool will only output nodes with 'name' attributes that match one of the IDs. The -i option causes 'grit xmb' to output an "IDs only" file instead of an XMB file. The "IDs only" file contains the message ID of each message that would normally be output to the XMB file, one message ID per line. It is designed for use with the 'grit transl2tc' tool's -l option. Other options: -D NAME[=VAL] Specify a C-preprocessor-like define NAME with optional value VAL (defaults to 1) which will be used to control conditional inclusion of resources. -E NAME=VALUE Set environment variable NAME to VALUE (within grit). """ # The different output formats supported by this tool FORMAT_XMB = 0 FORMAT_IDS_ONLY = 1 FORMAT_POT = 2 def __init__(self, defines=None): super(OutputXmb, self).__init__() self.format = self.FORMAT_XMB self.defines = defines or {} def ShortDescription(self): return 'Exports all translateable messages into an XMB file.' def Run(self, opts, args): self.SetOptions(opts) limit_file = None limit_is_grd = False limit_file_dir = None own_opts, args = getopt.getopt(args, 'l:D:ihp') for key, val in own_opts: if key == '-l': limit_file = open(val, 'r') limit_file_dir = util.dirname(val) if not len(limit_file_dir): limit_file_dir = '.' limit_is_grd = os.path.splitext(val)[1] == '.grd' elif key == '-i': self.format = self.FORMAT_IDS_ONLY elif key == '-p': self.format = self.FORMAT_POT elif key == '-D': name, val = util.ParseDefine(val) self.defines[name] = val elif key == '-E': (env_name, env_value) = val.split('=', 1) os.environ[env_name] = env_value if not len(args) == 1: print ('grit xmb takes exactly one argument, the path to the XMB file ' 'to output.') return 2 xmb_path = args[0] res_tree = grd_reader.Parse(opts.input, debug=opts.extra_verbose) res_tree.SetOutputLanguage('en') res_tree.SetDefines(self.defines) res_tree.OnlyTheseTranslations([]) res_tree.RunGatherers() with open(xmb_path, 'wb') as output_file: self.Process( res_tree, output_file, limit_file, limit_is_grd, limit_file_dir) if limit_file: limit_file.close() print "Wrote %s" % xmb_path def Process(self, res_tree, output_file, limit_file=None, limit_is_grd=False, dir=None): """Writes a document with the contents of res_tree into output_file, limiting output to the IDs specified in limit_file, which is a GRD file if limit_is_grd is true, otherwise a file with one ID per line. The format of the output document depends on this object's format attribute. It can be FORMAT_XMB or FORMAT_IDS_ONLY. The FORMAT_IDS_ONLY format causes this function to write just a list of the IDs of all messages that would have been added to the XMB file, one ID per line. The FORMAT_XMB format causes this function to output the (default) XMB format. Args: res_tree: base.Node() output_file: file open for writing limit_file: None or file open for reading limit_is_grd: True | False dir: Directory of the limit file """ if limit_file: if limit_is_grd: limit_list = [] limit_tree = grd_reader.Parse(limit_file, dir=dir, debug=self.o.extra_verbose) for node in limit_tree: if 'name' in node.attrs: limit_list.append(node.attrs['name']) else: # Not a GRD file, so it's just a file with one ID per line limit_list = [item.strip() for item in limit_file.read().split('\n')] ids_already_done = {} cliques = [] for node in res_tree: if (limit_file and not ('name' in node.attrs and node.attrs['name'] in limit_list)): continue if not node.IsTranslateable(): continue for clique in node.GetCliques(): if not clique.IsTranslateable(): continue if not clique.GetMessage().GetRealContent(): continue # Some explanation is in order here. Note that we can have # many messages with the same ID. # # The way we work around this is to maintain a list of cliques # per message ID (in the UberClique) and select the "best" one # (the first one that has a description, or an arbitrary one # if there is no description) for inclusion in the XMB file. # The translations are all going to be the same for messages # with the same ID, although the way we replace placeholders # might be slightly different. id = clique.GetMessage().GetId() if id in ids_already_done: continue ids_already_done[id] = 1 clique = node.UberClique().BestClique(id) cliques += [clique] # Ensure a stable order of messages, to help regression testing. cliques.sort(key=lambda x:x.GetMessage().GetId()) messages = [c.GetMessage() for c in cliques] if self.format == self.FORMAT_IDS_ONLY: # We just print the list of IDs to the output file. for msg in messages: output_file.write(msg.GetId()) output_file.write('\n') elif self.format == self.FORMAT_POT: WritePotFile(output_file, cliques) else: assert self.format == self.FORMAT_XMB WriteXmbFile(output_file, messages)
bsd-2-clause
5,850,516,025,989,369,000
32.927326
122
0.639877
false
3.564753
false
false
false
zjuchenyuan/BioWeb
Lib/Bio/Blast/NCBIStandalone.py
1
74989
# Copyright 1999-2000 by Jeffrey Chang. All rights reserved. # This code is part of the Biopython distribution and governed by its # license. Please see the LICENSE file that should have been included # as part of this package. # Patches by Mike Poidinger to support multiple databases. # Updated by Peter Cock in 2007 to do a better job on BLAST 2.2.15 """Code for calling standalone BLAST and parsing plain text output (DEPRECATED). Rather than parsing the human readable plain text BLAST output (which seems to change with every update to BLAST), we and the NBCI recommend you parse the XML output instead. The plain text parser in this module still works at the time of writing, but is considered obsolete and updating it to cope with the latest versions of BLAST is not a priority for us. This module also provides code to work with the "legacy" standalone version of NCBI BLAST, tools blastall, rpsblast and blastpgp via three helper functions of the same name. These functions are very limited for dealing with the output as files rather than handles, for which the wrappers in Bio.Blast.Applications are preferred. Furthermore, the NCBI themselves regard these command line tools as "legacy", and encourage using the new BLAST+ tools instead. Biopython has wrappers for these under Bio.Blast.Applications (see the tutorial). """ from __future__ import print_function import sys import re from Bio._py3k import StringIO from Bio.ParserSupport import AbstractParser, AbstractConsumer from Bio.ParserSupport import read_and_call, read_and_call_until from Bio.ParserSupport import read_and_call_while, attempt_read_and_call from Bio.ParserSupport import is_blank_line, safe_peekline, safe_readline from Bio import File from Bio.Blast import Record from Bio import BiopythonDeprecationWarning import warnings warnings.warn("This module has been deprecated. Consider Bio.SearchIO for " "parsing BLAST output instead.", BiopythonDeprecationWarning) _score_e_re = re.compile(r'Score +E') class LowQualityBlastError(Exception): """Error caused by running a low quality sequence through BLAST. When low quality sequences (like GenBank entries containing only stretches of a single nucleotide) are BLASTed, they will result in BLAST generating an error and not being able to perform the BLAST. search. This error should be raised for the BLAST reports produced in this case. """ pass class ShortQueryBlastError(Exception): """Error caused by running a short query sequence through BLAST. If the query sequence is too short, BLAST outputs warnings and errors:: Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch failed. [blastall] ERROR: [000.000] AT1G08320: Blast: [blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at least wordsize done This exception is raised when that condition is detected. """ pass class _Scanner(object): """Scan BLAST output from blastall or blastpgp. Tested with blastall and blastpgp v2.0.10, v2.0.11 Methods: - feed Feed data into the scanner. """ def feed(self, handle, consumer): """S.feed(handle, consumer) Feed in a BLAST report for scanning. handle is a file-like object that contains the BLAST report. consumer is a Consumer object that will receive events as the report is scanned. """ if isinstance(handle, File.UndoHandle): uhandle = handle else: uhandle = File.UndoHandle(handle) # Try to fast-forward to the beginning of the blast report. read_and_call_until(uhandle, consumer.noevent, contains='BLAST') # Now scan the BLAST report. self._scan_header(uhandle, consumer) self._scan_rounds(uhandle, consumer) self._scan_database_report(uhandle, consumer) self._scan_parameters(uhandle, consumer) def _scan_header(self, uhandle, consumer): # BLASTP 2.0.10 [Aug-26-1999] # # # Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Schaf # Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman (1997), # "Gapped BLAST and PSI-BLAST: a new generation of protein database sea # programs", Nucleic Acids Res. 25:3389-3402. # # Query= test # (140 letters) # # Database: sdqib40-1.35.seg.fa # 1323 sequences; 223,339 total letters # # ======================================================== # This next example is from the online version of Blast, # note there are TWO references, an RID line, and also # the database is BEFORE the query line. # Note there possibleuse of non-ASCII in the author names. # ======================================================== # # BLASTP 2.2.15 [Oct-15-2006] # Reference: Altschul, Stephen F., Thomas L. Madden, Alejandro A. Sch??ffer, # Jinghui Zhang, Zheng Zhang, Webb Miller, and David J. Lipman # (1997), "Gapped BLAST and PSI-BLAST: a new generation of # protein database search programs", Nucleic Acids Res. 25:3389-3402. # # Reference: Sch??ffer, Alejandro A., L. Aravind, Thomas L. Madden, Sergei # Shavirin, John L. Spouge, Yuri I. Wolf, Eugene V. Koonin, and # Stephen F. Altschul (2001), "Improving the accuracy of PSI-BLAST # protein database searches with composition-based statistics # and other refinements", Nucleic Acids Res. 29:2994-3005. # # RID: 1166022616-19998-65316425856.BLASTQ1 # # # Database: All non-redundant GenBank CDS # translations+PDB+SwissProt+PIR+PRF excluding environmental samples # 4,254,166 sequences; 1,462,033,012 total letters # Query= gi:16127998 # Length=428 # consumer.start_header() read_and_call(uhandle, consumer.version, contains='BLAST') read_and_call_while(uhandle, consumer.noevent, blank=1) # There might be a <pre> line, for qblast output. attempt_read_and_call(uhandle, consumer.noevent, start="<pre>") # Read the reference(s) while attempt_read_and_call(uhandle, consumer.reference, start='Reference'): # References are normally multiline terminated by a blank line # (or, based on the old code, the RID line) while True: line = uhandle.readline() if is_blank_line(line): consumer.noevent(line) break elif line.startswith("RID"): break else: # More of the reference consumer.reference(line) # Deal with the optional RID: ... read_and_call_while(uhandle, consumer.noevent, blank=1) attempt_read_and_call(uhandle, consumer.reference, start="RID:") read_and_call_while(uhandle, consumer.noevent, blank=1) # blastpgp may have a reference for compositional score matrix # adjustment (see Bug 2502): if attempt_read_and_call( uhandle, consumer.reference, start="Reference"): read_and_call_until(uhandle, consumer.reference, blank=1) read_and_call_while(uhandle, consumer.noevent, blank=1) # blastpgp has a Reference for composition-based statistics. if attempt_read_and_call( uhandle, consumer.reference, start="Reference"): read_and_call_until(uhandle, consumer.reference, blank=1) read_and_call_while(uhandle, consumer.noevent, blank=1) line = uhandle.peekline() assert line.strip() != "" assert not line.startswith("RID:") if line.startswith("Query="): # This is an old style query then database... # Read the Query lines and the following blank line. read_and_call(uhandle, consumer.query_info, start='Query=') read_and_call_until(uhandle, consumer.query_info, blank=1) read_and_call_while(uhandle, consumer.noevent, blank=1) # Read the database lines and the following blank line. read_and_call_until(uhandle, consumer.database_info, end='total letters') read_and_call(uhandle, consumer.database_info, contains='sequences') read_and_call_while(uhandle, consumer.noevent, blank=1) elif line.startswith("Database:"): # This is a new style database then query... read_and_call_until(uhandle, consumer.database_info, end='total letters') read_and_call(uhandle, consumer.database_info, contains='sequences') read_and_call_while(uhandle, consumer.noevent, blank=1) # Read the Query lines and the following blank line. # Or, on BLAST 2.2.22+ there is no blank link - need to spot # the "... Score E" line instead. read_and_call(uhandle, consumer.query_info, start='Query=') # BLAST 2.2.25+ has a blank line before Length= read_and_call_until(uhandle, consumer.query_info, start='Length=') while True: line = uhandle.peekline() if not line.strip() or _score_e_re.search(line) is not None: break # It is more of the query (and its length) read_and_call(uhandle, consumer.query_info) read_and_call_while(uhandle, consumer.noevent, blank=1) else: raise ValueError("Invalid header?") consumer.end_header() def _scan_rounds(self, uhandle, consumer): # Scan a bunch of rounds. # Each round begins with either a "Searching......" line # or a 'Score E' line followed by descriptions and alignments. # The email server doesn't give the "Searching....." line. # If there is no 'Searching.....' line then you'll first see a # 'Results from round' line while not self._eof(uhandle): line = safe_peekline(uhandle) if not line.startswith('Searching') and \ not line.startswith('Results from round') and \ _score_e_re.search(line) is None and \ 'No hits found' not in line: break self._scan_descriptions(uhandle, consumer) self._scan_alignments(uhandle, consumer) def _scan_descriptions(self, uhandle, consumer): # Searching..................................................done # Results from round 2 # # # Sc # Sequences producing significant alignments: (b # Sequences used in model and found again: # # d1tde_2 3.4.1.4.4 (119-244) Thioredoxin reductase [Escherichia ... # d1tcob_ 1.31.1.5.16 Calcineurin regulatory subunit (B-chain) [B... # d1symb_ 1.31.1.2.2 Calcyclin (S100) [RAT (RATTUS NORVEGICUS)] # # Sequences not found previously or not previously below threshold: # # d1osa__ 1.31.1.5.11 Calmodulin [Paramecium tetraurelia] # d1aoza3 2.5.1.3.3 (339-552) Ascorbate oxidase [zucchini (Cucurb... # # If PSI-BLAST, may also have: # # CONVERGED! consumer.start_descriptions() # Read 'Searching' # This line seems to be missing in BLASTN 2.1.2 (others?) attempt_read_and_call(uhandle, consumer.noevent, start='Searching') # blastpgp 2.0.10 from NCBI 9/19/99 for Solaris sometimes crashes here. # If this happens, the handle will yield no more information. if not uhandle.peekline(): raise ValueError("Unexpected end of blast report. " + "Looks suspiciously like a PSI-BLAST crash.") # BLASTN 2.2.3 sometimes spews a bunch of warnings and errors here: # Searching[blastall] WARNING: [000.000] AT1G08320: SetUpBlastSearch # [blastall] ERROR: [000.000] AT1G08320: Blast: # [blastall] ERROR: [000.000] AT1G08320: Blast: Query must be at leas # done # Reported by David Weisman. # Check for these error lines and ignore them for now. Let # the BlastErrorParser deal with them. line = uhandle.peekline() if "ERROR:" in line or line.startswith("done"): read_and_call_while(uhandle, consumer.noevent, contains="ERROR:") read_and_call(uhandle, consumer.noevent, start="done") # Check to see if this is PSI-BLAST. # If it is, the 'Searching' line will be followed by: # (version 2.0.10) # Searching............................. # Results from round 2 # or (version 2.0.11) # Searching............................. # # # Results from round 2 # Skip a bunch of blank lines. read_and_call_while(uhandle, consumer.noevent, blank=1) # Check for the results line if it's there. if attempt_read_and_call(uhandle, consumer.round, start='Results'): read_and_call_while(uhandle, consumer.noevent, blank=1) # Three things can happen here: # 1. line contains 'Score E' # 2. line contains "No hits found" # 3. no descriptions # The first one begins a bunch of descriptions. The last two # indicates that no descriptions follow, and we should go straight # to the alignments. if not attempt_read_and_call( uhandle, consumer.description_header, has_re=_score_e_re): # Either case 2 or 3. Look for "No hits found". attempt_read_and_call(uhandle, consumer.no_hits, contains='No hits found') try: read_and_call_while(uhandle, consumer.noevent, blank=1) except ValueError as err: if str(err) != "Unexpected end of stream.": raise err consumer.end_descriptions() # Stop processing. return # Read the score header lines read_and_call(uhandle, consumer.description_header, start='Sequences producing') # If PSI-BLAST, read the 'Sequences used in model' line. attempt_read_and_call(uhandle, consumer.model_sequences, start='Sequences used in model') read_and_call_while(uhandle, consumer.noevent, blank=1) # In BLAT, rather than a "No hits found" line, we just # get no descriptions (and no alignments). This can be # spotted because the next line is the database block: if safe_peekline(uhandle).startswith(" Database:"): consumer.end_descriptions() # Stop processing. return # Read the descriptions and the following blank lines, making # sure that there are descriptions. if not uhandle.peekline().startswith('Sequences not found'): read_and_call_until(uhandle, consumer.description, blank=1) read_and_call_while(uhandle, consumer.noevent, blank=1) # If PSI-BLAST, read the 'Sequences not found' line followed # by more descriptions. However, I need to watch out for the # case where there were no sequences not found previously, in # which case there will be no more descriptions. if attempt_read_and_call(uhandle, consumer.nonmodel_sequences, start='Sequences not found'): # Read the descriptions and the following blank lines. read_and_call_while(uhandle, consumer.noevent, blank=1) l = safe_peekline(uhandle) # Brad -- added check for QUERY. On some PSI-BLAST outputs # there will be a 'Sequences not found' line followed by no # descriptions. Check for this case since the first thing you'll # get is a blank line and then 'QUERY' if not l.startswith('CONVERGED') and l[0] != '>' \ and not l.startswith('QUERY'): read_and_call_until(uhandle, consumer.description, blank=1) read_and_call_while(uhandle, consumer.noevent, blank=1) attempt_read_and_call(uhandle, consumer.converged, start='CONVERGED') read_and_call_while(uhandle, consumer.noevent, blank=1) consumer.end_descriptions() def _scan_alignments(self, uhandle, consumer): if self._eof(uhandle): return # qblast inserts a helpful line here. attempt_read_and_call(uhandle, consumer.noevent, start="ALIGNMENTS") # First, check to see if I'm at the database report. line = safe_peekline(uhandle) if not line: # EOF return elif line.startswith(' Database') or line.startswith("Lambda"): return elif line[0] == '>': # XXX make a better check here between pairwise and masterslave self._scan_pairwise_alignments(uhandle, consumer) elif line.startswith('Effective'): return else: # XXX put in a check to make sure I'm in a masterslave alignment self._scan_masterslave_alignment(uhandle, consumer) def _scan_pairwise_alignments(self, uhandle, consumer): while not self._eof(uhandle): line = safe_peekline(uhandle) if line[0] != '>': break self._scan_one_pairwise_alignment(uhandle, consumer) def _scan_one_pairwise_alignment(self, uhandle, consumer): if self._eof(uhandle): return consumer.start_alignment() self._scan_alignment_header(uhandle, consumer) # Scan a bunch of score/alignment pairs. while True: if self._eof(uhandle): # Shouldn't have issued that _scan_alignment_header event... break line = safe_peekline(uhandle) if not line.startswith(' Score'): break self._scan_hsp(uhandle, consumer) consumer.end_alignment() def _scan_alignment_header(self, uhandle, consumer): # >d1rip__ 2.24.7.1.1 Ribosomal S17 protein [Bacillus # stearothermophilus] # Length = 81 # # Or, more recently with different white space: # # >gi|15799684|ref|NP_285696.1| threonine synthase ... # gi|15829258|ref|NP_308031.1| threonine synthase # ... # Length=428 read_and_call(uhandle, consumer.title, start='>') while True: line = safe_readline(uhandle) if line.lstrip().startswith(('Length =', 'Length=')): consumer.length(line) break elif is_blank_line(line): # Check to make sure I haven't missed the Length line raise ValueError("I missed the Length in an alignment header") consumer.title(line) # Older versions of BLAST will have a line with some spaces. # Version 2.0.14 (maybe 2.0.13?) and above print a true blank line. if not attempt_read_and_call(uhandle, consumer.noevent, start=' '): read_and_call(uhandle, consumer.noevent, blank=1) def _scan_hsp(self, uhandle, consumer): consumer.start_hsp() self._scan_hsp_header(uhandle, consumer) self._scan_hsp_alignment(uhandle, consumer) consumer.end_hsp() def _scan_hsp_header(self, uhandle, consumer): # Score = 22.7 bits (47), Expect = 2.5 # Identities = 10/36 (27%), Positives = 18/36 (49%) # Strand = Plus / Plus # Frame = +3 # read_and_call(uhandle, consumer.score, start=' Score') read_and_call(uhandle, consumer.identities, start=' Identities') # BLASTN attempt_read_and_call(uhandle, consumer.strand, start=' Strand') # BLASTX, TBLASTN, TBLASTX attempt_read_and_call(uhandle, consumer.frame, start=' Frame') read_and_call(uhandle, consumer.noevent, blank=1) def _scan_hsp_alignment(self, uhandle, consumer): # Query: 11 GRGVSACA-------TCDGFFYRNQKVAVIGGGNTAVEEALYLSNIASEVHLIHRRDGF # GRGVS+ TC Y + + V GGG+ + EE L + I R+ # Sbjct: 12 GRGVSSVVRRCIHKPTCKE--YAVKIIDVTGGGSFSAEEVQELREATLKEVDILRKVSG # # Query: 64 AEKILIKR 71 # I +K # Sbjct: 70 PNIIQLKD 77 # while True: # Blastn adds an extra line filled with spaces before Query attempt_read_and_call(uhandle, consumer.noevent, start=' ') read_and_call(uhandle, consumer.query, start='Query') read_and_call(uhandle, consumer.align, start=' ') read_and_call(uhandle, consumer.sbjct, start='Sbjct') try: read_and_call_while(uhandle, consumer.noevent, blank=1) except ValueError as err: if str(err) != "Unexpected end of stream.": raise err # End of File (well, it looks like it with recent versions # of BLAST for multiple queries after the Iterator class # has broken up the whole file into chunks). break line = safe_peekline(uhandle) # Alignment continues if I see a 'Query' or the spaces for Blastn. if not (line.startswith('Query') or line.startswith(' ')): break def _scan_masterslave_alignment(self, uhandle, consumer): consumer.start_alignment() while True: line = safe_readline(uhandle) # Check to see whether I'm finished reading the alignment. # This is indicated by 1) database section, 2) next psi-blast # round, which can also be a 'Results from round' if no # searching line is present # patch by chapmanb if line.startswith('Searching') or \ line.startswith('Results from round'): uhandle.saveline(line) break elif line.startswith(' Database'): uhandle.saveline(line) break elif is_blank_line(line): consumer.noevent(line) else: consumer.multalign(line) read_and_call_while(uhandle, consumer.noevent, blank=1) consumer.end_alignment() def _eof(self, uhandle): try: line = safe_peekline(uhandle) except ValueError as err: if str(err) != "Unexpected end of stream.": raise err line = "" return not line def _scan_database_report(self, uhandle, consumer): # Database: sdqib40-1.35.seg.fa # Posted date: Nov 1, 1999 4:25 PM # Number of letters in database: 223,339 # Number of sequences in database: 1323 # # Lambda K H # 0.322 0.133 0.369 # # Gapped # Lambda K H # 0.270 0.0470 0.230 # ########################################## # Or, more recently Blast 2.2.15 gives less blank lines ########################################## # Database: All non-redundant GenBank CDS translations+PDB+SwissProt+PIR+PRF excluding # environmental samples # Posted date: Dec 12, 2006 5:51 PM # Number of letters in database: 667,088,753 # Number of sequences in database: 2,094,974 # Lambda K H # 0.319 0.136 0.395 # Gapped # Lambda K H # 0.267 0.0410 0.140 if self._eof(uhandle): return consumer.start_database_report() # Subset of the database(s) listed below # Number of letters searched: 562,618,960 # Number of sequences searched: 228,924 if attempt_read_and_call(uhandle, consumer.noevent, start=" Subset"): read_and_call(uhandle, consumer.noevent, contains="letters") read_and_call(uhandle, consumer.noevent, contains="sequences") read_and_call(uhandle, consumer.noevent, start=" ") # Sameet Mehta reported seeing output from BLASTN 2.2.9 that # was missing the "Database" stanza completely. while attempt_read_and_call(uhandle, consumer.database, start=' Database'): # BLAT output ends abruptly here, without any of the other # information. Check to see if this is the case. If so, # then end the database report here gracefully. if not uhandle.peekline().strip() or \ uhandle.peekline().startswith("BLAST"): consumer.end_database_report() return # Database can span multiple lines. read_and_call_until(uhandle, consumer.database, start=' Posted') read_and_call(uhandle, consumer.posted_date, start=' Posted') read_and_call(uhandle, consumer.num_letters_in_database, start=' Number of letters') read_and_call(uhandle, consumer.num_sequences_in_database, start=' Number of sequences') # There may not be a line starting with spaces... attempt_read_and_call(uhandle, consumer.noevent, start=' ') line = safe_readline(uhandle) uhandle.saveline(line) if 'Lambda' in line: break try: read_and_call(uhandle, consumer.noevent, start='Lambda') read_and_call(uhandle, consumer.ka_params) except Exception: # TODO: ValueError, AttributeError? pass # This blank line is optional: attempt_read_and_call(uhandle, consumer.noevent, blank=1) # not BLASTP attempt_read_and_call(uhandle, consumer.gapped, start='Gapped') # not TBLASTX if attempt_read_and_call(uhandle, consumer.noevent, start='Lambda'): read_and_call(uhandle, consumer.ka_params_gap) # Blast 2.2.4 can sometimes skip the whole parameter section. # Thus, I need to be careful not to read past the end of the # file. try: read_and_call_while(uhandle, consumer.noevent, blank=1) except ValueError as x: if str(x) != "Unexpected end of stream.": raise consumer.end_database_report() def _scan_parameters(self, uhandle, consumer): # Matrix: BLOSUM62 # Gap Penalties: Existence: 11, Extension: 1 # Number of Hits to DB: 50604 # Number of Sequences: 1323 # Number of extensions: 1526 # Number of successful extensions: 6 # Number of sequences better than 10.0: 5 # Number of HSP's better than 10.0 without gapping: 5 # Number of HSP's successfully gapped in prelim test: 0 # Number of HSP's that attempted gapping in prelim test: 1 # Number of HSP's gapped (non-prelim): 5 # length of query: 140 # length of database: 223,339 # effective HSP length: 39 # effective length of query: 101 # effective length of database: 171,742 # effective search space: 17345942 # effective search space used: 17345942 # T: 11 # A: 40 # X1: 16 ( 7.4 bits) # X2: 38 (14.8 bits) # X3: 64 (24.9 bits) # S1: 41 (21.9 bits) # S2: 42 (20.8 bits) ########################################## # Or, more recently Blast(x) 2.2.15 gives ########################################## # Matrix: BLOSUM62 # Gap Penalties: Existence: 11, Extension: 1 # Number of Sequences: 4535438 # Number of Hits to DB: 2,588,844,100 # Number of extensions: 60427286 # Number of successful extensions: 126433 # Number of sequences better than 2.0: 30 # Number of HSP's gapped: 126387 # Number of HSP's successfully gapped: 35 # Length of query: 291 # Length of database: 1,573,298,872 # Length adjustment: 130 # Effective length of query: 161 # Effective length of database: 983,691,932 # Effective search space: 158374401052 # Effective search space used: 158374401052 # Neighboring words threshold: 12 # Window for multiple hits: 40 # X1: 16 ( 7.3 bits) # X2: 38 (14.6 bits) # X3: 64 (24.7 bits) # S1: 41 (21.7 bits) # S2: 32 (16.9 bits) # Blast 2.2.4 can sometimes skip the whole parameter section. # BLAT also skips the whole parameter section. # Thus, check to make sure that the parameter section really # exists. if not uhandle.peekline().strip(): return # BLASTN 2.2.9 looks like it reverses the "Number of Hits" and # "Number of Sequences" lines. consumer.start_parameters() # Matrix line may be missing in BLASTN 2.2.9 attempt_read_and_call(uhandle, consumer.matrix, start='Matrix') # not TBLASTX attempt_read_and_call(uhandle, consumer.gap_penalties, start='Gap') attempt_read_and_call(uhandle, consumer.num_sequences, start='Number of Sequences') attempt_read_and_call(uhandle, consumer.num_hits, start='Number of Hits') attempt_read_and_call(uhandle, consumer.num_sequences, start='Number of Sequences') attempt_read_and_call(uhandle, consumer.num_extends, start='Number of extensions') attempt_read_and_call(uhandle, consumer.num_good_extends, start='Number of successful') attempt_read_and_call(uhandle, consumer.num_seqs_better_e, start='Number of sequences') # not BLASTN, TBLASTX if attempt_read_and_call(uhandle, consumer.hsps_no_gap, start="Number of HSP's better"): # BLASTN 2.2.9 if attempt_read_and_call(uhandle, consumer.noevent, start="Number of HSP's gapped:"): read_and_call(uhandle, consumer.noevent, start="Number of HSP's successfully") # This is omitted in 2.2.15 attempt_read_and_call(uhandle, consumer.noevent, start="Number of extra gapped extensions") else: read_and_call(uhandle, consumer.hsps_prelim_gapped, start="Number of HSP's successfully") read_and_call(uhandle, consumer.hsps_prelim_gap_attempted, start="Number of HSP's that") read_and_call(uhandle, consumer.hsps_gapped, start="Number of HSP's gapped") # e.g. BLASTX 2.2.15 where the "better" line is missing elif attempt_read_and_call(uhandle, consumer.noevent, start="Number of HSP's gapped"): read_and_call(uhandle, consumer.noevent, start="Number of HSP's successfully") # not in blastx 2.2.1 attempt_read_and_call(uhandle, consumer.query_length, has_re=re.compile(r"[Ll]ength of query")) # Not in BLASTX 2.2.22+ attempt_read_and_call(uhandle, consumer.database_length, has_re=re.compile(r"[Ll]ength of \s*[Dd]atabase")) # BLASTN 2.2.9 attempt_read_and_call(uhandle, consumer.noevent, start="Length adjustment") attempt_read_and_call(uhandle, consumer.effective_hsp_length, start='effective HSP') # Not in blastx 2.2.1 attempt_read_and_call( uhandle, consumer.effective_query_length, has_re=re.compile(r'[Ee]ffective length of query')) # This is not in BLASTP 2.2.15 attempt_read_and_call( uhandle, consumer.effective_database_length, has_re=re.compile(r'[Ee]ffective length of \s*[Dd]atabase')) # Not in blastx 2.2.1, added a ':' to distinguish between # this and the 'effective search space used' line attempt_read_and_call( uhandle, consumer.effective_search_space, has_re=re.compile(r'[Ee]ffective search space:')) # Does not appear in BLASTP 2.0.5 attempt_read_and_call( uhandle, consumer.effective_search_space_used, has_re=re.compile(r'[Ee]ffective search space used')) # BLASTX, TBLASTN, TBLASTX attempt_read_and_call(uhandle, consumer.frameshift, start='frameshift') # not in BLASTN 2.2.9 attempt_read_and_call(uhandle, consumer.threshold, start='T') # In BLASTX 2.2.15 replaced by: "Neighboring words threshold: 12" attempt_read_and_call(uhandle, consumer.threshold, start='Neighboring words threshold') # not in BLASTX 2.2.15 attempt_read_and_call(uhandle, consumer.window_size, start='A') # get this instead: "Window for multiple hits: 40" attempt_read_and_call(uhandle, consumer.window_size, start='Window for multiple hits') # not in BLASTX 2.2.22+ attempt_read_and_call(uhandle, consumer.dropoff_1st_pass, start='X1') # not TBLASTN attempt_read_and_call(uhandle, consumer.gap_x_dropoff, start='X2') # not BLASTN, TBLASTX attempt_read_and_call(uhandle, consumer.gap_x_dropoff_final, start='X3') # not TBLASTN attempt_read_and_call(uhandle, consumer.gap_trigger, start='S1') # not in blastx 2.2.1 # first we make sure we have additional lines to work with, if # not then the file is done and we don't have a final S2 if not is_blank_line(uhandle.peekline(), allow_spaces=1): read_and_call(uhandle, consumer.blast_cutoff, start='S2') consumer.end_parameters() class BlastParser(AbstractParser): """Parses BLAST data into a Record.Blast object. """ def __init__(self): """__init__(self)""" self._scanner = _Scanner() self._consumer = _BlastConsumer() def parse(self, handle): """parse(self, handle)""" self._scanner.feed(handle, self._consumer) return self._consumer.data class PSIBlastParser(AbstractParser): """Parses BLAST data into a Record.PSIBlast object. """ def __init__(self): """__init__(self)""" self._scanner = _Scanner() self._consumer = _PSIBlastConsumer() def parse(self, handle): """parse(self, handle)""" self._scanner.feed(handle, self._consumer) return self._consumer.data class _HeaderConsumer(object): def start_header(self): self._header = Record.Header() def version(self, line): c = line.split() self._header.application = c[0] self._header.version = c[1] if len(c) > 2: # The date is missing in the new C++ output from blastx 2.2.22+ # Just get "BLASTX 2.2.22+\n" and that's all. self._header.date = c[2][1:-1] def reference(self, line): if line.startswith('Reference: '): self._header.reference = line[11:] else: self._header.reference = self._header.reference + line def query_info(self, line): if line.startswith('Query= '): self._header.query = line[7:].lstrip() elif line.startswith('Length='): # New style way to give the query length in BLAST 2.2.22+ (the C++ code) self._header.query_letters = _safe_int(line[7:].strip()) elif not line.startswith(' '): # continuation of query_info self._header.query = "%s%s" % (self._header.query, line) else: # Hope it is the old style way to give the query length: letters, = _re_search( r"([0-9,]+) letters", line, "I could not find the number of letters in line\n%s" % line) self._header.query_letters = _safe_int(letters) def database_info(self, line): line = line.rstrip() if line.startswith('Database: '): self._header.database = line[10:] elif not line.endswith('total letters'): if self._header.database: # Need to include a space when merging multi line datase descr self._header.database = self._header.database + " " + line.strip() else: self._header.database = line.strip() else: sequences, letters = _re_search( r"([0-9,]+) sequences; ([0-9,-]+) total letters", line, "I could not find the sequences and letters in line\n%s" % line) self._header.database_sequences = _safe_int(sequences) self._header.database_letters = _safe_int(letters) def end_header(self): # Get rid of the trailing newlines self._header.reference = self._header.reference.rstrip() self._header.query = self._header.query.rstrip() class _DescriptionConsumer(object): def start_descriptions(self): self._descriptions = [] self._model_sequences = [] self._nonmodel_sequences = [] self._converged = 0 self._type = None self._roundnum = None self.__has_n = 0 # Does the description line contain an N value? def description_header(self, line): if line.startswith('Sequences producing'): cols = line.split() if cols[-1] == 'N': self.__has_n = 1 def description(self, line): dh = self._parse(line) if self._type == 'model': self._model_sequences.append(dh) elif self._type == 'nonmodel': self._nonmodel_sequences.append(dh) else: self._descriptions.append(dh) def model_sequences(self, line): self._type = 'model' def nonmodel_sequences(self, line): self._type = 'nonmodel' def converged(self, line): self._converged = 1 def no_hits(self, line): pass def round(self, line): if not line.startswith('Results from round'): raise ValueError("I didn't understand the round line\n%s" % line) self._roundnum = _safe_int(line[18:].strip()) def end_descriptions(self): pass def _parse(self, description_line): line = description_line # for convenience dh = Record.Description() # I need to separate the score and p-value from the title. # sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77 # sp|P21297|FLBT_CAUCR FLBT PROTEIN [snip] 284 7e-77 1 # special cases to handle: # - title must be preserved exactly (including whitespaces) # - score could be equal to e-value (not likely, but what if??) # - sometimes there's an "N" score of '1'. cols = line.split() if len(cols) < 3: raise ValueError( "Line does not appear to contain description:\n%s" % line) if self.__has_n: i = line.rfind(cols[-1]) # find start of N i = line.rfind(cols[-2], 0, i) # find start of p-value i = line.rfind(cols[-3], 0, i) # find start of score else: i = line.rfind(cols[-1]) # find start of p-value i = line.rfind(cols[-2], 0, i) # find start of score if self.__has_n: dh.title, dh.score, dh.e, dh.num_alignments = \ line[:i].rstrip(), cols[-3], cols[-2], cols[-1] else: dh.title, dh.score, dh.e, dh.num_alignments = \ line[:i].rstrip(), cols[-2], cols[-1], 1 dh.num_alignments = _safe_int(dh.num_alignments) dh.score = _safe_int(dh.score) dh.e = _safe_float(dh.e) return dh class _AlignmentConsumer(object): # This is a little bit tricky. An alignment can either be a # pairwise alignment or a multiple alignment. Since it's difficult # to know a-priori which one the blast record will contain, I'm going # to make one class that can parse both of them. def start_alignment(self): self._alignment = Record.Alignment() self._multiple_alignment = Record.MultipleAlignment() def title(self, line): if self._alignment.title: self._alignment.title += " " self._alignment.title += line.strip() def length(self, line): # e.g. "Length = 81" or more recently, "Length=428" parts = line.replace(" ", "").split("=") assert len(parts) == 2, "Unrecognised format length line" self._alignment.length = parts[1] self._alignment.length = _safe_int(self._alignment.length) def multalign(self, line): # Standalone version uses 'QUERY', while WWW version uses blast_tmp. if line.startswith('QUERY') or line.startswith('blast_tmp'): # If this is the first line of the multiple alignment, # then I need to figure out how the line is formatted. # Format of line is: # QUERY 1 acttg...gccagaggtggtttattcagtctccataagagaggggacaaacg 60 try: name, start, seq, end = line.split() except ValueError: raise ValueError("I do not understand the line\n%s" % line) self._start_index = line.index(start, len(name)) self._seq_index = line.index(seq, self._start_index + len(start)) # subtract 1 for the space self._name_length = self._start_index - 1 self._start_length = self._seq_index - self._start_index - 1 self._seq_length = line.rfind(end) - self._seq_index - 1 # self._seq_index = line.index(seq) # # subtract 1 for the space # self._seq_length = line.rfind(end) - self._seq_index - 1 # self._start_index = line.index(start) # self._start_length = self._seq_index - self._start_index - 1 # self._name_length = self._start_index # Extract the information from the line name = line[:self._name_length] name = name.rstrip() start = line[self._start_index:self._start_index + self._start_length] start = start.rstrip() if start: start = _safe_int(start) end = line[self._seq_index + self._seq_length:].rstrip() if end: end = _safe_int(end) seq = line[self._seq_index:self._seq_index + self._seq_length].rstrip() # right pad the sequence with spaces if necessary if len(seq) < self._seq_length: seq += ' ' * (self._seq_length - len(seq)) # I need to make sure the sequence is aligned correctly with the query. # First, I will find the length of the query. Then, if necessary, # I will pad my current sequence with spaces so that they will line # up correctly. # Two possible things can happen: # QUERY # 504 # # QUERY # 403 # # Sequence 504 will need padding at the end. Since I won't know # this until the end of the alignment, this will be handled in # end_alignment. # Sequence 403 will need padding before being added to the alignment. align = self._multiple_alignment.alignment # for convenience align.append((name, start, seq, end)) # This is old code that tried to line up all the sequences # in a multiple alignment by using the sequence title's as # identifiers. The problem with this is that BLAST assigns # different HSP's from the same sequence the same id. Thus, # in one alignment block, there may be multiple sequences with # the same id. I'm not sure how to handle this, so I'm not # going to. # # If the sequence is the query, then just add it. # if name == 'QUERY': # if len(align) == 0: # align.append((name, start, seq)) # else: # aname, astart, aseq = align[0] # if name != aname: # raise ValueError, "Query is not the first sequence" # aseq = aseq + seq # align[0] = aname, astart, aseq # else: # if len(align) == 0: # raise ValueError, "I could not find the query sequence" # qname, qstart, qseq = align[0] # # # Now find my sequence in the multiple alignment. # for i in range(1, len(align)): # aname, astart, aseq = align[i] # if name == aname: # index = i # break # else: # # If I couldn't find it, then add a new one. # align.append((None, None, None)) # index = len(align)-1 # # Make sure to left-pad it. # aname, astart, aseq = name, start, ' '*(len(qseq)-len(seq)) # # if len(qseq) != len(aseq) + len(seq): # # If my sequences are shorter than the query sequence, # # then I will need to pad some spaces to make them line up. # # Since I've already right padded seq, that means aseq # # must be too short. # aseq = aseq + ' '*(len(qseq)-len(aseq)-len(seq)) # aseq = aseq + seq # if astart is None: # astart = start # align[index] = aname, astart, aseq def end_alignment(self): # Remove trailing newlines if self._alignment: self._alignment.title = self._alignment.title.rstrip() # This code is also obsolete. See note above. # If there's a multiple alignment, I will need to make sure # all the sequences are aligned. That is, I may need to # right-pad the sequences. # if self._multiple_alignment is not None: # align = self._multiple_alignment.alignment # seqlen = None # for i in range(len(align)): # name, start, seq = align[i] # if seqlen is None: # seqlen = len(seq) # else: # if len(seq) < seqlen: # seq = seq + ' '*(seqlen - len(seq)) # align[i] = name, start, seq # elif len(seq) > seqlen: # raise ValueError, \ # "Sequence %s is longer than the query" % name # Clean up some variables, if they exist. try: del self._seq_index del self._seq_length del self._start_index del self._start_length del self._name_length except AttributeError: pass class _HSPConsumer(object): def start_hsp(self): self._hsp = Record.HSP() def score(self, line): self._hsp.bits, self._hsp.score = _re_search( r"Score =\s*([0-9.e+]+) bits \(([0-9]+)\)", line, "I could not find the score in line\n%s" % line) self._hsp.score = _safe_float(self._hsp.score) self._hsp.bits = _safe_float(self._hsp.bits) x, y = _re_search( r"Expect\(?(\d*)\)? = +([0-9.e\-|\+]+)", line, "I could not find the expect in line\n%s" % line) if x: self._hsp.num_alignments = _safe_int(x) else: self._hsp.num_alignments = 1 self._hsp.expect = _safe_float(y) def identities(self, line): x, y = _re_search( r"Identities = (\d+)\/(\d+)", line, "I could not find the identities in line\n%s" % line) self._hsp.identities = _safe_int(x), _safe_int(y) self._hsp.align_length = _safe_int(y) if 'Positives' in line: x, y = _re_search( r"Positives = (\d+)\/(\d+)", line, "I could not find the positives in line\n%s" % line) self._hsp.positives = _safe_int(x), _safe_int(y) assert self._hsp.align_length == _safe_int(y) if 'Gaps' in line: x, y = _re_search( r"Gaps = (\d+)\/(\d+)", line, "I could not find the gaps in line\n%s" % line) self._hsp.gaps = _safe_int(x), _safe_int(y) assert self._hsp.align_length == _safe_int(y) def strand(self, line): self._hsp.strand = _re_search( r"Strand\s?=\s?(\w+)\s?/\s?(\w+)", line, "I could not find the strand in line\n%s" % line) def frame(self, line): # Frame can be in formats: # Frame = +1 # Frame = +2 / +2 if '/' in line: self._hsp.frame = _re_search( r"Frame\s?=\s?([-+][123])\s?/\s?([-+][123])", line, "I could not find the frame in line\n%s" % line) else: self._hsp.frame = _re_search( r"Frame = ([-+][123])", line, "I could not find the frame in line\n%s" % line) # Match a space, if one is available. Masahir Ishikawa found a # case where there's no space between the start and the sequence: # Query: 100tt 101 # line below modified by Yair Benita, Sep 2004 # Note that the colon is not always present. 2006 _query_re = re.compile(r"Query(:?) \s*(\d+)\s*(.+) (\d+)") def query(self, line): m = self._query_re.search(line) if m is None: if line.strip() == "Query ------------------------------------------------------------": # Special case - long gap relative to the subject, # note there is no start/end present, cannot update those self._hsp.query += "-" * 60 self._query_len = 60 # number of dashes self._query_start_index = 13 # offset of first dash return raise ValueError("I could not find the query in line\n%s" % line) # line below modified by Yair Benita, Sep 2004. # added the end attribute for the query colon, start, seq, end = m.groups() seq = seq.strip() self._hsp.query += seq if self._hsp.query_start is None: self._hsp.query_start = _safe_int(start) # line below added by Yair Benita, Sep 2004. # added the end attribute for the query self._hsp.query_end = _safe_int(end) # Get index for sequence start (regular expression element 3) self._query_start_index = m.start(3) self._query_len = len(seq) def align(self, line): seq = line[self._query_start_index:].rstrip() if len(seq) < self._query_len: # Make sure the alignment is the same length as the query seq += ' ' * (self._query_len - len(seq)) elif len(seq) < self._query_len: raise ValueError("Match is longer than the query in line\n%s" % line) self._hsp.match = self._hsp.match + seq # To match how we do the query, cache the regular expression. # Note that the colon is not always present. _sbjct_re = re.compile(r"Sbjct(:?) \s*(\d+)\s*(.+) (\d+)") def sbjct(self, line): m = self._sbjct_re.search(line) if m is None: raise ValueError("I could not find the sbjct in line\n%s" % line) colon, start, seq, end = m.groups() # mikep 26/9/00 # On occasion, there is a blast hit with no subject match # so far, it only occurs with 1-line short "matches" # I have decided to let these pass as they appear if not seq.strip(): seq = ' ' * self._query_len else: seq = seq.strip() self._hsp.sbjct += seq if self._hsp.sbjct_start is None: self._hsp.sbjct_start = _safe_int(start) self._hsp.sbjct_end = _safe_int(end) if len(seq) != self._query_len: raise ValueError( "QUERY and SBJCT sequence lengths don't match (%i %r vs %i) in line\n%s" % (self._query_len, self._hsp.query, len(seq), line)) del self._query_start_index # clean up unused variables del self._query_len def end_hsp(self): pass class _DatabaseReportConsumer(object): def start_database_report(self): self._dr = Record.DatabaseReport() def database(self, line): m = re.search(r"Database: (.+)$", line) if m: self._dr.database_name.append(m.group(1)) elif self._dr.database_name: # This must be a continuation of the previous name. self._dr.database_name[-1] = "%s%s" % (self._dr.database_name[-1], line.strip()) def posted_date(self, line): self._dr.posted_date.append(_re_search( r"Posted date:\s*(.+)$", line, "I could not find the posted date in line\n%s" % line)) def num_letters_in_database(self, line): letters, = _get_cols( line, (-1,), ncols=6, expected={2: "letters", 4: "database:"}) self._dr.num_letters_in_database.append(_safe_int(letters)) def num_sequences_in_database(self, line): sequences, = _get_cols( line, (-1,), ncols=6, expected={2: "sequences", 4: "database:"}) self._dr.num_sequences_in_database.append(_safe_int(sequences)) def ka_params(self, line): self._dr.ka_params = [_safe_float(x) for x in line.split()] def gapped(self, line): self._dr.gapped = 1 def ka_params_gap(self, line): self._dr.ka_params_gap = [_safe_float(x) for x in line.split()] def end_database_report(self): pass class _ParametersConsumer(object): def start_parameters(self): self._params = Record.Parameters() def matrix(self, line): self._params.matrix = line[8:].rstrip() def gap_penalties(self, line): self._params.gap_penalties = [_safe_float(x) for x in _get_cols( line, (3, 5), ncols=6, expected={2: "Existence:", 4: "Extension:"})] def num_hits(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=11, expected={2: "Hits"}) self._params.num_hits = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=6, expected={2: "Hits"}) self._params.num_hits = _safe_int(x) def num_sequences(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=9, expected={2: "Sequences:"}) self._params.num_sequences = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=4, expected={2: "Sequences:"}) self._params.num_sequences = _safe_int(x) def num_extends(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=9, expected={2: "extensions:"}) self._params.num_extends = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=4, expected={2: "extensions:"}) self._params.num_extends = _safe_int(x) def num_good_extends(self, line): if '1st pass' in line: x, = _get_cols(line, (-4,), ncols=10, expected={3: "extensions:"}) self._params.num_good_extends = _safe_int(x) else: x, = _get_cols(line, (-1,), ncols=5, expected={3: "extensions:"}) self._params.num_good_extends = _safe_int(x) def num_seqs_better_e(self, line): self._params.num_seqs_better_e, = _get_cols( line, (-1,), ncols=7, expected={2: "sequences"}) self._params.num_seqs_better_e = _safe_int( self._params.num_seqs_better_e) def hsps_no_gap(self, line): self._params.hsps_no_gap, = _get_cols( line, (-1,), ncols=9, expected={3: "better", 7: "gapping:"}) self._params.hsps_no_gap = _safe_int(self._params.hsps_no_gap) def hsps_prelim_gapped(self, line): self._params.hsps_prelim_gapped, = _get_cols( line, (-1,), ncols=9, expected={4: "gapped", 6: "prelim"}) self._params.hsps_prelim_gapped = _safe_int( self._params.hsps_prelim_gapped) def hsps_prelim_gapped_attempted(self, line): self._params.hsps_prelim_gapped_attempted, = _get_cols( line, (-1,), ncols=10, expected={4: "attempted", 7: "prelim"}) self._params.hsps_prelim_gapped_attempted = _safe_int( self._params.hsps_prelim_gapped_attempted) def hsps_gapped(self, line): self._params.hsps_gapped, = _get_cols( line, (-1,), ncols=6, expected={3: "gapped"}) self._params.hsps_gapped = _safe_int(self._params.hsps_gapped) def query_length(self, line): self._params.query_length, = _get_cols( line.lower(), (-1,), ncols=4, expected={0: "length", 2: "query:"}) self._params.query_length = _safe_int(self._params.query_length) def database_length(self, line): self._params.database_length, = _get_cols( line.lower(), (-1,), ncols=4, expected={0: "length", 2: "database:"}) self._params.database_length = _safe_int(self._params.database_length) def effective_hsp_length(self, line): self._params.effective_hsp_length, = _get_cols( line, (-1,), ncols=4, expected={1: "HSP", 2: "length:"}) self._params.effective_hsp_length = _safe_int( self._params.effective_hsp_length) def effective_query_length(self, line): self._params.effective_query_length, = _get_cols( line, (-1,), ncols=5, expected={1: "length", 3: "query:"}) self._params.effective_query_length = _safe_int( self._params.effective_query_length) def effective_database_length(self, line): self._params.effective_database_length, = _get_cols( line.lower(), (-1,), ncols=5, expected={1: "length", 3: "database:"}) self._params.effective_database_length = _safe_int( self._params.effective_database_length) def effective_search_space(self, line): self._params.effective_search_space, = _get_cols( line, (-1,), ncols=4, expected={1: "search"}) self._params.effective_search_space = _safe_int( self._params.effective_search_space) def effective_search_space_used(self, line): self._params.effective_search_space_used, = _get_cols( line, (-1,), ncols=5, expected={1: "search", 3: "used:"}) self._params.effective_search_space_used = _safe_int( self._params.effective_search_space_used) def frameshift(self, line): self._params.frameshift = _get_cols(line, (4, 5), ncols=6, expected={0: "frameshift", 2: "decay"}) def threshold(self, line): if line[:2] == "T:": # Assume its an old style line like "T: 123" self._params.threshold, = _get_cols(line, (1,), ncols=2, expected={0: "T:"}) elif line[:28] == "Neighboring words threshold:": self._params.threshold, = _get_cols(line, (3,), ncols=4, expected={0: "Neighboring", 1: "words", 2: "threshold:"}) else: raise ValueError("Unrecognised threshold line:\n%s" % line) self._params.threshold = _safe_int(self._params.threshold) def window_size(self, line): if line[:2] == "A:": self._params.window_size, = _get_cols(line, (1,), ncols=2, expected={0: "A:"}) elif line[:25] == "Window for multiple hits:": self._params.window_size, = _get_cols(line, (4,), ncols=5, expected={0: "Window", 2: "multiple", 3: "hits:"}) else: raise ValueError("Unrecognised window size line:\n%s" % line) self._params.window_size = _safe_int(self._params.window_size) def dropoff_1st_pass(self, line): score, bits = _re_search( r"X1: (\d+) \(\s*([0-9,.]+) bits\)", line, "I could not find the dropoff in line\n%s" % line) self._params.dropoff_1st_pass = _safe_int(score), _safe_float(bits) def gap_x_dropoff(self, line): score, bits = _re_search( r"X2: (\d+) \(\s*([0-9,.]+) bits\)", line, "I could not find the gap dropoff in line\n%s" % line) self._params.gap_x_dropoff = _safe_int(score), _safe_float(bits) def gap_x_dropoff_final(self, line): score, bits = _re_search( r"X3: (\d+) \(\s*([0-9,.]+) bits\)", line, "I could not find the gap dropoff final in line\n%s" % line) self._params.gap_x_dropoff_final = _safe_int(score), _safe_float(bits) def gap_trigger(self, line): score, bits = _re_search( r"S1: (\d+) \(\s*([0-9,.]+) bits\)", line, "I could not find the gap trigger in line\n%s" % line) self._params.gap_trigger = _safe_int(score), _safe_float(bits) def blast_cutoff(self, line): score, bits = _re_search( r"S2: (\d+) \(\s*([0-9,.]+) bits\)", line, "I could not find the blast cutoff in line\n%s" % line) self._params.blast_cutoff = _safe_int(score), _safe_float(bits) def end_parameters(self): pass class _BlastConsumer(AbstractConsumer, _HeaderConsumer, _DescriptionConsumer, _AlignmentConsumer, _HSPConsumer, _DatabaseReportConsumer, _ParametersConsumer ): # This Consumer is inherits from many other consumer classes that handle # the actual dirty work. An alternate way to do it is to create objects # of those classes and then delegate the parsing tasks to them in a # decorator-type pattern. The disadvantage of that is that the method # names will need to be resolved in this classes. However, using # a decorator will retain more control in this class (which may or # may not be a bad thing). In addition, having each sub-consumer as # its own object prevents this object's dictionary from being cluttered # with members and reduces the chance of member collisions. def __init__(self): self.data = None def round(self, line): # Make sure nobody's trying to pass me PSI-BLAST data! raise ValueError("This consumer doesn't handle PSI-BLAST data") def start_header(self): self.data = Record.Blast() _HeaderConsumer.start_header(self) def end_header(self): _HeaderConsumer.end_header(self) self.data.__dict__.update(self._header.__dict__) def end_descriptions(self): self.data.descriptions = self._descriptions def end_alignment(self): _AlignmentConsumer.end_alignment(self) if self._alignment.hsps: self.data.alignments.append(self._alignment) if self._multiple_alignment.alignment: self.data.multiple_alignment = self._multiple_alignment def end_hsp(self): _HSPConsumer.end_hsp(self) try: self._alignment.hsps.append(self._hsp) except AttributeError: raise ValueError("Found an HSP before an alignment") def end_database_report(self): _DatabaseReportConsumer.end_database_report(self) self.data.__dict__.update(self._dr.__dict__) def end_parameters(self): _ParametersConsumer.end_parameters(self) self.data.__dict__.update(self._params.__dict__) class _PSIBlastConsumer(AbstractConsumer, _HeaderConsumer, _DescriptionConsumer, _AlignmentConsumer, _HSPConsumer, _DatabaseReportConsumer, _ParametersConsumer ): def __init__(self): self.data = None def start_header(self): self.data = Record.PSIBlast() _HeaderConsumer.start_header(self) def end_header(self): _HeaderConsumer.end_header(self) self.data.__dict__.update(self._header.__dict__) def start_descriptions(self): self._round = Record.Round() self.data.rounds.append(self._round) _DescriptionConsumer.start_descriptions(self) def end_descriptions(self): _DescriptionConsumer.end_descriptions(self) self._round.number = self._roundnum if self._descriptions: self._round.new_seqs.extend(self._descriptions) self._round.reused_seqs.extend(self._model_sequences) self._round.new_seqs.extend(self._nonmodel_sequences) if self._converged: self.data.converged = 1 def end_alignment(self): _AlignmentConsumer.end_alignment(self) if self._alignment.hsps: self._round.alignments.append(self._alignment) if self._multiple_alignment: self._round.multiple_alignment = self._multiple_alignment def end_hsp(self): _HSPConsumer.end_hsp(self) try: self._alignment.hsps.append(self._hsp) except AttributeError: raise ValueError("Found an HSP before an alignment") def end_database_report(self): _DatabaseReportConsumer.end_database_report(self) self.data.__dict__.update(self._dr.__dict__) def end_parameters(self): _ParametersConsumer.end_parameters(self) self.data.__dict__.update(self._params.__dict__) class Iterator(object): """Iterates over a file of multiple BLAST results. Methods: next Return the next record from the stream, or None. """ def __init__(self, handle, parser=None): """__init__(self, handle, parser=None) Create a new iterator. handle is a file-like object. parser is an optional Parser object to change the results into another form. If set to None, then the raw contents of the file will be returned. """ try: handle.readline except AttributeError: raise ValueError( "I expected a file handle or file-like object, got %s" % type(handle)) self._uhandle = File.UndoHandle(handle) self._parser = parser self._header = [] def __next__(self): """next(self) -> object Return the next Blast record from the file. If no more records, return None. """ lines = [] query = False while True: line = self._uhandle.readline() if not line: break # If I've reached the next one, then put the line back and stop. if lines and (line.startswith('BLAST') or line.startswith('BLAST', 1) or line.startswith('<?xml ')): self._uhandle.saveline(line) break # New style files omit the BLAST line to mark a new query: if line.startswith("Query="): if not query: if not self._header: self._header = lines[:] query = True else: # Start of another record self._uhandle.saveline(line) break lines.append(line) if query and "BLAST" not in lines[0]: # Cheat and re-insert the header # print "-"*50 # print "".join(self._header) # print "-"*50 # print "".join(lines) # print "-"*50 lines = self._header + lines if not lines: return None data = ''.join(lines) if self._parser is not None: return self._parser.parse(StringIO(data)) return data if sys.version_info[0] < 3: def next(self): """Python 2 style alias for Python 3 style __next__ method.""" return self.__next__() def __iter__(self): return iter(self.__next__, None) def _re_search(regex, line, error_msg): m = re.search(regex, line) if not m: raise ValueError(error_msg) return m.groups() def _get_cols(line, cols_to_get, ncols=None, expected=None): if expected is None: expected = {} cols = line.split() # Check to make sure number of columns is correct if ncols is not None and len(cols) != ncols: raise ValueError("I expected %d columns (got %d) in line\n%s" % (ncols, len(cols), line)) # Check to make sure columns contain the correct data for k in expected: if cols[k] != expected[k]: raise ValueError("I expected '%s' in column %d in line\n%s" % (expected[k], k, line)) # Construct the answer tuple results = [] for c in cols_to_get: results.append(cols[c]) return tuple(results) def _safe_int(str): try: return int(str) except ValueError: # Something went wrong. Try to clean up the string. # Remove all commas from the string str = str.replace(',', '') # try again after removing commas. # Note int() will return a long rather than overflow try: return int(str) except ValueError: pass # Call float to handle things like "54.3", note could lose precision, e.g. # >>> int("5399354557888517312") # 5399354557888517312 # >>> int(float("5399354557888517312")) # 5399354557888517120 return int(float(str)) def _safe_float(str): # Thomas Rosleff Soerensen (rosleff@mpiz-koeln.mpg.de) noted that # float('e-172') does not produce an error on his platform. Thus, # we need to check the string for this condition. # Sometimes BLAST leaves of the '1' in front of an exponent. if str and str[0] in ['E', 'e']: str = '1' + str try: return float(str) except ValueError: # Remove all commas from the string str = str.replace(',', '') # try again. return float(str) class _BlastErrorConsumer(_BlastConsumer): def __init__(self): _BlastConsumer.__init__(self) def noevent(self, line): if 'Query must be at least wordsize' in line: raise ShortQueryBlastError("Query must be at least wordsize") # Now pass the line back up to the superclass. method = getattr(_BlastConsumer, 'noevent', _BlastConsumer.__getattr__(self, 'noevent')) method(line) class BlastErrorParser(AbstractParser): """Attempt to catch and diagnose BLAST errors while parsing. This utilizes the BlastParser module but adds an additional layer of complexity on top of it by attempting to diagnose ValueErrors that may actually indicate problems during BLAST parsing. Current BLAST problems this detects are: o LowQualityBlastError - When BLASTing really low quality sequences (ie. some GenBank entries which are just short stretches of a single nucleotide), BLAST will report an error with the sequence and be unable to search with this. This will lead to a badly formatted BLAST report that the parsers choke on. The parser will convert the ValueError to a LowQualityBlastError and attempt to provide useful information. """ def __init__(self, bad_report_handle=None): """Initialize a parser that tries to catch BlastErrors. Arguments: o bad_report_handle - An optional argument specifying a handle where bad reports should be sent. This would allow you to save all of the bad reports to a file, for instance. If no handle is specified, the bad reports will not be saved. """ self._bad_report_handle = bad_report_handle # self._b_parser = BlastParser() self._scanner = _Scanner() self._consumer = _BlastErrorConsumer() def parse(self, handle): """Parse a handle, attempting to diagnose errors. """ results = handle.read() try: self._scanner.feed(StringIO(results), self._consumer) except ValueError: # if we have a bad_report_file, save the info to it first if self._bad_report_handle: # send the info to the error handle self._bad_report_handle.write(results) # now we want to try and diagnose the error self._diagnose_error( StringIO(results), self._consumer.data) # if we got here we can't figure out the problem # so we should pass along the syntax error we got raise return self._consumer.data def _diagnose_error(self, handle, data_record): """Attempt to diagnose an error in the passed handle. Arguments: o handle - The handle potentially containing the error o data_record - The data record partially created by the consumer. """ line = handle.readline() while line: # 'Searchingdone' instead of 'Searching......done' seems # to indicate a failure to perform the BLAST due to # low quality sequence if line.startswith('Searchingdone'): raise LowQualityBlastError("Blast failure occurred on query: ", data_record.query) line = handle.readline()
mit
-429,756,769,175,448,800
39.843682
107
0.56711
false
3.827727
false
false
false
karstenw/nodebox-pyobjc
examples/Extended Application/matplotlib/examples/user_interfaces/mpl_with_glade_316_sgskip.py
1
1165
""" ========================= Matplotlib With Glade 316 ========================= """ from gi.repository import Gtk from matplotlib.figure import Figure from matplotlib.axes import Subplot from numpy import arange, sin, pi from matplotlib.backends.backend_gtk3agg import FigureCanvasGTK3Agg as FigureCanvas class Window1Signals(object): def on_window1_destroy(self, widget): Gtk.main_quit() def main(): builder = Gtk.Builder() builder.add_objects_from_file("mpl_with_glade_316.glade", ("window1", "")) builder.connect_signals(Window1Signals()) window = builder.get_object("window1") sw = builder.get_object("scrolledwindow1") # Start of Matplotlib specific code figure = Figure(figsize=(8, 6), dpi=71) axis = figure.add_subplot(111) t = arange(0.0, 3.0, 0.01) s = sin(2*pi*t) axis.plot(t, s) axis.set_xlabel('time [s]') axis.set_ylabel('voltage [V]') canvas = FigureCanvas(figure) # a Gtk.DrawingArea canvas.set_size_request(800, 600) sw.add_with_viewport(canvas) # End of Matplotlib specific code window.show_all() Gtk.main() if __name__ == "__main__": main()
mit
7,270,442,403,777,310,000
24.326087
83
0.641202
false
3.406433
false
false
false
OpenGeoscience/girder_db_items
server/dbs/sqlalchemydb.py
1
19936
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################## # Copyright Kitware Inc. # # Licensed under the Apache License, Version 2.0 ( the "License" ); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. ############################################################################## import six import sqlalchemy import sqlalchemy.engine.reflection import sqlalchemy.orm import time from six.moves import range from girder import logger as log from . import base from .base import DatabaseConnectorException MAX_SCHEMAS_IN_TABLE_LIST = 25 DatabaseOperators = { 'eq': '=', 'ne': '!=', 'gte': '>=', 'gt': '>', 'lte': '<=', 'lt': '<', } _enginePool = {} _enginePoolMaxSize = 5 def getEngine(uri, **kwargs): """ Get a sqlalchemy engine from a pool in case we use the same parameters for multiple connections. """ key = (uri, frozenset(six.viewitems(kwargs))) engine = _enginePool.get(key) if engine is None: engine = sqlalchemy.create_engine(uri, **kwargs) if len(_enginePool) >= _enginePoolMaxSize: _enginePoolMaxSize.clear() _enginePool[key] = engine return engine class SQLAlchemyConnector(base.DatabaseConnector): name = 'sqlalchemy' def __init__(self, *args, **kwargs): super(SQLAlchemyConnector, self).__init__(*args, **kwargs) self.table = kwargs.get('table') self.schema = kwargs.get('schema') self.dbEngine = None self.sessions = {} # dbparams can include values in http://www.postgresql.org/docs/ # current/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS self.dbparams = kwargs.get('dbparams', {}) self.databaseUri = self.adjustDBUri(kwargs.get('uri')) # Additional parameters: # idletime: seconds after which a connection is considered idle # abandontime: seconds after which a connection will be abandoned self.dbIdleTime = float(kwargs.get('idletime', 300)) self.dbAbandonTime = float(kwargs.get('abandontime', self.dbIdleTime * 5)) self.databaseOperators = DatabaseOperators self.fields = None self.allowFieldFunctions = True self.allowSortFunctions = True self.allowFilterFunctions = True self.initialized = True self.types = {type: getattr(sqlalchemy, type) for type in dir(sqlalchemy) if isinstance(getattr(sqlalchemy, type), sqlalchemy.sql.visitors.VisitableType)} class Table(object): """ This is used to handle table properties from SQLAlchemy. """ pass self.tableClass = Table self._allowedFunctions = { 'cast': True, 'count': True, 'distinct': True, } def _addFilter(self, filterList, filter): """ Add a filter to a list of SQLAlchemy filters. :param filterList: a list of SQLAlchemy filters which is modified. :param filter: information on the filter. :return: the modified list. """ if 'group' in filter: sublist = [] for subfilter in filter['value']: sublist = self._addFilter(sublist, subfilter) if filter['group'] == 'and': filterList.append(sqlalchemy.and_(*sublist)) elif filter['group'] == 'or': filterList.append(sqlalchemy.or_(*sublist)) return filterList operator = filter['operator'] operator = base.FilterOperators.get(operator, operator) operator = self.databaseOperators.get(operator, operator) field = self._convertFieldOrFunction(filter['field']) negate = False if operator.startswith('not_'): negate = True operator = operator.split('not_', 1)[1] if operator == 'in': values = filter['value'] if not isinstance(values, (list, tuple)): values = [values] values = [self._convertFieldOrFunction(value, True) for value in values] opfunc = field.in_(values) elif operator == 'is': value = self._convertFieldOrFunction(filter['value'], True) opfunc = field.is_(value) else: value = self._convertFieldOrFunction(filter['value'], True) opfunc = field.op(operator)(value) if negate: opfunc = sqlalchemy.not_(opfunc) filterList.append(opfunc) return filterList def _convertFieldOrFunction(self, fieldOrFunction, preferValue=False): """ Convert a string to a column reference, or a dictionary to a column or function reference. If a function is passed, this should be a canonical function reference ('func' and 'param' are both populated). :param fieldOrFunction: a string with a column name or a dictionary with either a field, function, or value. :param preferValue: if True then if fieldOrFunction is not a dictionary, return it unchanged. :returns: a constructed column or function object, or a bare value. """ if not isinstance(fieldOrFunction, dict): if preferValue: return fieldOrFunction return getattr(self.tableClass, fieldOrFunction) if 'field' in fieldOrFunction: return getattr(self.tableClass, fieldOrFunction['field']) if 'value' in fieldOrFunction: if not preferValue: return sqlalchemy.sql.elements.literal( fieldOrFunction['value']) return fieldOrFunction['value'] fieldOrFunction = self.isFunction(fieldOrFunction) if fieldOrFunction is False: raise DatabaseConnectorException('Not a function') if not self._isFunctionAllowed(fieldOrFunction['func']): raise DatabaseConnectorException('Function %s is not allowed' % fieldOrFunction['func']) param = fieldOrFunction.get('param', fieldOrFunction.get('params', [])) # Determine the function we need to call to apply the function if fieldOrFunction['func'] in ('distinct', 'cast'): if (fieldOrFunction['func'] == 'cast' and len(param) == 2 and isinstance(param[1], dict) and 'value' in param[1]): param[1]['value'] = self.types.get(param[1]['value'], param[1]['value']) funcfunc = getattr(sqlalchemy, fieldOrFunction['func']) else: funcfunc = getattr(sqlalchemy.func, fieldOrFunction['func']) return funcfunc( *[self._convertFieldOrFunction(entry, True) for entry in param]) def _isFunctionAllowed(self, proname): """ Check if the specified function is allowed. Currently, only non-volatile functions are allowed, even though there are volatile functions that are harmless. :param proname: name of the function to check. :returns: True is allowed, False is not. """ return self._allowedFunctions.get(proname, False) @classmethod def adjustDBUri(cls, uri): """ Adjust a uri to match the form sqlalchemy requires. In general, the uri is of the form dialect+driver://username:password@host:port/database. :param uri: the uri to adjust. :returns: the adjusted uri """ # If we specifically ask for a URI starting with sqlalchemy: (e.g., # sqlalchemy:postgresql://127.0.0.1/database), use this generic class # rather than our specific sqlalchemy class. if uri.startswith('sqlalchemy:'): uri = uri.split('sqlalchemy:', 1)[1] else: dialect, _ = base.getDBConnectorClassFromDialect(uri) uri = '%s://%s' % (dialect, uri.split('://', 1)[1]) return uri def connect(self, client=None): """ Connect to the database. :param client: if None, use a new session. If specified, if this client is currently marked in use, cancel the client's existing query and return a connection from the pool fo r the client to use. :return: a SQLAlchemny session object. """ if not self.dbEngine: engine = getEngine(self.databaseUri, **self.dbparams) metadata = sqlalchemy.MetaData(engine) table = sqlalchemy.Table(self.table, metadata, schema=self.schema, autoload=True) # The orm.mapper is used to refer to our columns. If the table or # view we are connecting to does not have any primary keys, the # mapper will fail. Use the first column as a fallback; this is # only safe because we DON'T alter data; we have no guarantee we # can refer to a specific row (but we don't need to). fallbackPrimaryCol = None for col in table.c: if col.primary_key: fallbackPrimaryCol = None break if fallbackPrimaryCol is None: fallbackPrimaryCol = col sqlalchemy.orm.mapper( self.tableClass, table, primary_key=fallbackPrimaryCol) self.dbEngine = engine # If we are asking for a specific client, clean up defunct clients curtime = time.time() if client: for oldsess in list(self.sessions): idle = curtime - self.sessions[oldsess]['last'] if ((idle > self.dbIdleTime and not self.sessions[oldsess]['used']) or idle > self.dbAbandonTime): # Close the session. sqlalchemy keeps them too long # otherwise self.sessions[oldsess]['session'].close() del self.sessions[oldsess] # Cancel an existing query if client in self.sessions and self.sessions[client]['used']: self.sessions[client]['session'].connection().connection.cancel() self.sessions[client]['session'].rollback() self.sessions[client]['used'] = False if client in self.sessions: sess = self.sessions[client]['session'] # Always ensure a fresh query sess.rollback() else: sess = sqlalchemy.orm.sessionmaker(bind=self.dbEngine)() # This is a further guard against changing the database. It isn't # a real guard against change, as if we somehow allow an injection # attack, it could be turned off. Also, volatile functions can # still have side effects (for instance, setseed() changes the # state for generating random numbers which could have # cryptographic implications). self.setSessionReadOnly(sess) if client: if client not in self.sessions: self.sessions[client] = {} self.sessions[client]['used'] = True self.sessions[client]['last'] = curtime self.sessions[client]['session'] = sess return sess def disconnect(self, db, client=None): """ Mark that a client has finished with a database connection and it can be closed or reused without any issue. :param db: the database connection to mark as finished. :param client: the client that owned this connection. """ if client in self.sessions: self.sessions[client]['used'] = False else: # Close the session. sqlalchemy keeps them too long otherwise db.close() def setSessionReadOnly(self, sess): """ Set the specified session to read only if possible. Subclasses should implement the appropriate behavior. :param sess: the session to adjust. """ pass def getFieldInfo(self): """ Return a list of fields that are known and can be queried. :return: a list of known fields. Each entry is a dictionary with name, datatype, and optionally a description. """ if self.fields is not None: return self.fields db = self.connect() fields = [] for column in sqlalchemy.orm.class_mapper( self.tableClass).iterate_properties: if (isinstance(column, sqlalchemy.orm.ColumnProperty) and len(column.columns) == 1): try: coltype = str(column.columns[0].type) except sqlalchemy.exc.CompileError: coltype = 'unknown' fields.append({ 'name': column.key, 'type': coltype }) self.disconnect(db) if len(fields): self.fields = fields return fields @classmethod def getTableList(cls, uri, internalTables=False, dbparams={}, **kwargs): """ Get a list of known databases, each of which has a list of known tables from the database. This is of the form [{'database': (database), 'tables': [{'schema': (schema), 'table': (table 1)}, ...]}] :param uri: uri to connect to the database. :param internaltables: True to return tables about the database itself. :param dbparams: optional parameters to send to the connection. :returns: A list of known tables. """ dbEngine = sqlalchemy.create_engine(cls.adjustDBUri(uri), **dbparams) insp = sqlalchemy.engine.reflection.Inspector.from_engine(dbEngine) schemas = insp.get_schema_names() defaultSchema = insp.default_schema_name tables = [{'name': table, 'table': table} for table in dbEngine.table_names()] tables.extend([{'name': view, 'table': view} for view in insp.get_view_names()]) databaseName = base.databaseFromUri(uri) results = [{'database': databaseName, 'tables': tables}] if len(schemas) <= MAX_SCHEMAS_IN_TABLE_LIST: for schema in schemas: if not internalTables and schema.lower() == 'information_schema': continue if schema != defaultSchema: tables = [{'name': '%s.%s' % (schema, table), 'table': table, 'schema': schema} for table in dbEngine.table_names(schema=schema)] tables.extend([{'name': '%s.%s' % (schema, view), 'table': view, 'schema': schema} for view in insp.get_view_names(schema=schema)]) results[0]['tables'].extend(tables) else: log.info('Not enumerating all schemas for table list (%d schemas)', len(schemas)) return results def performSelect(self, fields, queryProps={}, filters=[], client=None): """ Perform a select query. The results are passed back as a dictionary with the following values: limit: the limit used in the query offset: the offset used in the query sort: the list of sort parameters used in the query. fields: a list of the fields that are being returned in the order that they are returned. data: a list with one entry per row of results. Each entry is a list with one entry per column. :param fields: the results from getFieldInfo. :param queryProps: general query properties, including limit, offset, and sort. :param filters: a list of filters to apply. :param client: if a client is specified, a previous query made by this client can be cancelled. :return: the results of the query. See above. """ if queryProps.get('fields') is None: queryProps['fields'] = [field['name'] for field in fields] result = { 'limit': queryProps.get('limit'), 'offset': queryProps.get('offset'), 'sort': queryProps.get('sort'), 'fields': queryProps.get('fields'), 'data': [] } sess = self.connect(client) query = sess.query(self.tableClass) filterQueries = [] for filter in filters: filterQueries = self._addFilter(filterQueries, filter) if len(filterQueries): query = query.filter(sqlalchemy.and_(*filterQueries)) if queryProps.get('group'): groups = [self._convertFieldOrFunction(field) for field in queryProps['group']] if len(groups): query = query.group_by(*groups) if queryProps.get('sort'): sortList = [] for pos in range(len(queryProps['sort'])): sort = queryProps['sort'][pos] sortCol = self._convertFieldOrFunction(sort[0]) if sort[1] == -1: sortCol = sortCol.desc() sortList.append(sortCol) query = query.order_by(*sortList) if (queryProps.get('limit') is not None and int(queryProps['limit']) >= 0): query = query.limit(int(queryProps['limit'])) if 'offset' in queryProps: query = query.offset(int(queryProps['offset'])) columns = [self._convertFieldOrFunction(field) for field in queryProps['fields']] # Clone the query and set it to return the columns we are interested # in. Using result['data'] = list(query.values(*columns)) is more # compact and skips one internal _clone call, but doesn't allow logging # the actual sql used. with_entities clears the columns we are # selecting (it defaults to all of the native table columns), and # add_columns puts back just what we want, including expressions. query = query.with_entities(*[]) query = query.add_columns(*columns) log.info('Query: %s', ' '.join(str(query.statement.compile( bind=sess.get_bind(), compile_kwargs={'literal_binds': True})).split())) result['data'] = list(query) self.disconnect(sess, client) return result @staticmethod def validate(table=None, **kwargs): """ Validate that the passed arguments are sufficient for connecting to the database. :returns: True if the arguments should allow connecting to the db. """ if not table or not kwargs.get('uri'): return False # We could validate other database parameters, too return True # Make a list of the dialects this module supports. There is no default # dialect. _dialects = { 'dialects': {}, 'priority': 1, } for dialect in getattr(sqlalchemy.dialects, '__all__', []): _dialects['dialects'][dialect] = dialect base.registerConnectorClass(SQLAlchemyConnector.name, SQLAlchemyConnector, _dialects)
apache-2.0
3,844,353,940,656,065,000
40.620042
93
0.578953
false
4.588262
false
false
false
0111001101111010/hyde
hyde/tests/test_model.py
1
5339
# -*- coding: utf-8 -*- """ Use nose `$ pip install nose` `$ nosetests` """ from hyde.model import Config, Expando from fswrap import File, Folder def test_expando_one_level(): d = {"a": 123, "b": "abc"} x = Expando(d) assert x.a == d['a'] assert x.b == d['b'] def test_expando_two_levels(): d = {"a": 123, "b": {"c": 456}} x = Expando(d) assert x.a == d['a'] assert x.b.c == d['b']['c'] def test_expando_three_levels(): d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}} x = Expando(d) assert x.a == d['a'] assert x.b.c == d['b']['c'] assert x.b.d.e == d['b']['d']['e'] def test_expando_update(): d1 = {"a": 123, "b": "abc"} x = Expando(d1) assert x.a == d1['a'] assert x.b == d1['b'] d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"} x.update(d) assert x.a == d1['a'] assert x.b.c == d['b']['c'] assert x.b.d.e == d['b']['d']['e'] assert x.f == d["f"] d2 = {"a": 789, "f": "opq"} y = Expando(d2) x.update(y) assert x.a == 789 assert x.f == "opq" def test_expando_to_dict(): d = {"a": 123, "b": {"c": 456, "d": {"e": "abc"}}} x = Expando(d) assert d == x.to_dict() def test_expando_to_dict_with_update(): d1 = {"a": 123, "b": "abc"} x = Expando(d1) d = {"b": {"c": 456, "d": {"e": "abc"}}, "f": "lmn"} x.update(d) expected = {} expected.update(d1) expected.update(d) assert expected == x.to_dict() d2 = {"a": 789, "f": "opq"} y = Expando(d2) x.update(y) expected.update(d2) assert expected == x.to_dict() TEST_SITE = File(__file__).parent.child_folder('_test') import yaml class TestConfig(object): @classmethod def setup_class(cls): cls.conf1 = """ mode: development content_root: stuff # Relative path from site root media_root: media # Relative path from site root media_url: /media widgets: plugins: aggregators: """ cls.conf2 = """ mode: development deploy_root: ~/deploy_site content_root: site/stuff # Relative path from site root media_root: mmm # Relative path from site root media_url: /media widgets: plugins: aggregators: """ def setUp(self): TEST_SITE.make() TEST_SITE.parent.child_folder('sites/test_jinja').copy_contents_to(TEST_SITE) def tearDown(self): TEST_SITE.delete() def test_default_configuration(self): c = Config(sitepath=TEST_SITE, config_dict={}) for root in ['content', 'layout']: name = root + '_root' path = name + '_path' assert hasattr(c, name) assert getattr(c, name) == root assert hasattr(c, path) assert getattr(c, path) == TEST_SITE.child_folder(root) assert c.media_root_path == c.content_root_path.child_folder('media') assert hasattr(c, 'plugins') assert len(c.plugins) == 0 assert hasattr(c, 'ignore') assert c.ignore == ["*~", "*.bak", ".hg", ".git", ".svn"] assert c.deploy_root_path == TEST_SITE.child_folder('deploy') assert c.not_found == '404.html' assert c.meta.nodemeta == 'meta.yaml' def test_conf1(self): c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf1)) assert c.content_root_path == TEST_SITE.child_folder('stuff') def test_conf2(self): c = Config(sitepath=TEST_SITE, config_dict=yaml.load(self.conf2)) assert c.content_root_path == TEST_SITE.child_folder('site/stuff') assert c.media_root_path == c.content_root_path.child_folder('mmm') assert c.media_url == TEST_SITE.child_folder('/media') assert c.deploy_root_path == Folder('~/deploy_site') def test_read_from_file_by_default(self): File(TEST_SITE.child('site.yaml')).write(self.conf2) c = Config(sitepath=TEST_SITE) assert c.content_root_path == TEST_SITE.child_folder('site/stuff') assert c.media_root_path == c.content_root_path.child_folder('mmm') assert c.media_url == TEST_SITE.child_folder('/media') assert c.deploy_root_path == Folder('~/deploy_site') def test_read_from_specified_file(self): File(TEST_SITE.child('another.yaml')).write(self.conf2) c = Config(sitepath=TEST_SITE, config_file='another.yaml') assert c.content_root_path == TEST_SITE.child_folder('site/stuff') assert c.media_root_path == c.content_root_path.child_folder('mmm') assert c.media_url == TEST_SITE.child_folder('/media') assert c.deploy_root_path == Folder('~/deploy_site') def test_extends(self): another = """ extends: site.yaml mode: production media_root: xxx """ File(TEST_SITE.child('site.yaml')).write(self.conf2) File(TEST_SITE.child('another.yaml')).write(another) c = Config(sitepath=TEST_SITE, config_file='another.yaml') assert c.mode == 'production' assert c.content_root_path == TEST_SITE.child_folder('site/stuff') assert c.media_root_path == c.content_root_path.child_folder('xxx') assert c.media_url == TEST_SITE.child_folder('/media') assert c.deploy_root_path == Folder('~/deploy_site')
mit
-7,763,387,247,039,466,000
32.36875
85
0.561341
false
3.131378
true
false
false
MikeHoffert/caladbolg-engine
caladbolg/agents/character.py
1
2442
import json from caladbolg.agents import formulas from caladbolg.agents.stats import Stats, EquipmentStats class Character: """ Represents a player character. Characters have stats, equipment, and leveling information. """ def __init__(self, character_file): self.level = 1 self.experience_into_level = 0 self.name = None self.equipment_classes = None self.stats = None self.leveling_formula = None self.load_character_file(character_file) def load_character_file(self, character_file): """ Loads a character JSON file, setting the name, equipment classes, and stats. The ``name`` is naturally the character's name as it should appear in-game. While there's no hard limit, it should be kept short to prevent it from being truncated or overlapping. The ``equipment_classes`` is a set of string "classes" corresponding to which classes of equipment can be equipped. Each piece of equipment has one or more classes. If there's at least one class in common, we can equip the piece of equipment. The classes are not mentioned in-game, as the naming is intended to be internal. The ``stats`` is the character's ``Stats`` instance. The equipment stats are not initialized yet as they are not known at this time. :param character_file: The file to load the character data from. """ character_json = json.load(character_file) self.name = character_json['name'] self.equipment_classes = character_json['equipment_classes'] self.stats = Stats(character_json['base_stats']['health'], character_json['base_stats']['stamina'], character_json['base_stats']['strength'], character_json['base_stats']['magic'], character_json['base_stats']['endurance'], character_json['base_stats']['agility']) try: self.leveling_formula = formulas.leveling_formulas[character_json['leveling_formula']] except KeyError: raise ValueError('No leveling formula named "{0}" ({1})'.format(character_json['leveling_formula'], character_file.name)) from None def __str__(self): return str(self.__dict__)
mit
2,006,252,693,176,594,400
44.075472
118
0.613432
false
4.464351
false
false
false
Parclytaxel/Kinross
kinback/algebra.py
1
1782
# Numerical algebra and methods # Parcly Taxel / Jeremy Tan, 2018 # https://parclytaxel.tumblr.com import numpy as np cc_abscissas = [(np.cos(np.arange(1, 2 ** k, 2) / 2 ** k * np.pi) + 1) / 2 for k in range(2, 11)] def ccw_generate(n): """Clenshaw-Curtis weights for n+1 samples where n is a power of two. DFT-based algorithm from Jörg Waldvogel (http://www.sam.math.ethz.ch/~waldvoge/Papers/fejer.pdf).""" w0 = 1 / (n ** 2 - 1) v = [2 / (1 - 4 * k ** 2) - w0 for k in range(n // 2)] dft = np.fft.rfft(v + [-3 * w0] + v[:0:-1]).real / n return np.append(dft, dft[-2::-1]) # ensures mathematically guaranteed symmetry of returned array cc_weights = [ccw_generate(2 ** k) for k in range(2, 11)] def ccquad(f, a, b): """Clenshaw-Curtis quadrature of f in [a, b]. f must be applicable elementwise to NumPy arrays (if not, use vectorize first).""" fs = [f(a), f((a + b) / 2), f(b)] res = (fs[0] + 4 * fs[1] + fs[2]) / 3 for q in range(9): fs = np.insert(fs, range(1, len(fs)), f((b - a) * cc_abscissas[q] + a)) prev, res = res, np.dot(cc_weights[q], fs) if abs(res - prev) <= 1e-12 * abs(prev): break return (b - a) / 2 * res def newton(f, fp, x0, y = 0): """Newton's method for solving f(x) = y. fp is the derivative of f.""" x = x0 for q in range(16): denom = fp(x) if abs(denom) == 0: break delta = (y - f(x)) / denom x += delta if abs(delta) < 1e-12 * abs(x): break return x def quadricl2m(p): """Converts the list representation of a quadric, [a, b, c, d, e, f] with with ax²+bxy+cy²+dx+ey+f=0, into the symmetric matrix representation.""" return np.array([[p[0], p[1] / 2, p[3] / 2], [p[1] / 2, p[2], p[4] / 2], [p[3] / 2, p[4] / 2, p[5]]])
mit
-3,334,467,633,558,380,000
44.615385
150
0.562114
false
2.530583
false
false
false
StarfruitStack/crabapple
crabapple/admin/controller/deployment.py
1
2202
# Copyright 2014 The crabapple Authors. All rights reserved. # Use of this source code is governed by a BSD-style # license that can be found in the LICENSE file. import datetime from flask import render_template, redirect, request from crabapple.objects import Commit, Deployment, DeploymentStatus class ControllerDeployment(object): def __init__(self, server): self.server = server def view_deployments(self): return render_template('index.html', deployments=self.server.store.get_all_deployments(), specs={o.id: o for o in self.server.store.get_all_specs()}) def view_deployment(self, deployment_id): deployment_object = self.server.store.get_deployment(deployment_id) if deployment_object is None: return redirect('/') content = '' try: with open(self.server.config.logdir + '/' + str(deployment_object.id) + '.log') as f: content = f.read() except IOError: content = '' return render_template('deployment_view.html', deployment=deployment_object, content=content) def view_deploy(self): if request.method == 'GET': return render_template('deploy.html', specs=self.server.store.get_all_specs()) elif request.method == 'POST': spec = request.form['spec'] commit = request.form['commit'] c = Commit() c.hash = commit o = Deployment(status=DeploymentStatus.SCHEDULED, triggered_time=datetime.datetime.now()) o.spec_id = int(spec) o.branch = '* Manual *' o.triggered_commit = c o.pusher_name = 'admin' o.pusher_email = '-' self.server.trigger_deployment(o) return redirect('/deployments') def register(self, app): app.add_url_rule('/deployments', 'view_deployments', self.view_deployments) app.add_url_rule('/deploy', 'view_deploy', self.view_deploy, methods=['GET', 'POST']) app.add_url_rule('/deployment/<int:deployment_id>', 'view_deployment', self.view_deployment)
bsd-3-clause
6,292,805,605,715,429,000
37.631579
101
0.601272
false
4.154717
false
false
false
nitely/Spirit
spirit/core/utils/ratelimit/ratelimit.py
1
3853
# -*- coding: utf-8 -*- import hashlib import time from django.core.cache import caches from ...conf import settings from ..deprecations import warn __all__ = ['RateLimit'] TIME_DICT = { 's': 1, 'm': 60} def validate_cache_config(): try: cache = settings.CACHES[settings.ST_RATELIMIT_CACHE] except KeyError: # Django will raise later when using # this cache so we do nothing return if (not settings.ST_RATELIMIT_SKIP_TIMEOUT_CHECK and cache.get('TIMEOUT', 1) is not None): # todo: ConfigurationError in next version warn( 'settings.ST_RATELIMIT_CACHE cache\'s TIMEOUT ' 'must be None (never expire) and it may ' 'be other than the default cache. ' 'To skip this check, for example when using ' 'a third-party backend with no TIMEOUT option, set ' 'settings.ST_RATELIMIT_SKIP_TIMEOUT_CHECK to True. ' 'This will raise an exception in next version.') def split_rate(rate): limit, period = rate.split('/') limit = int(limit) if len(period) > 1: time_ = TIME_DICT[period[-1]] time_ *= int(period[:-1]) else: time_ = TIME_DICT[period] return limit, time_ def fixed_window(period): if settings.ST_TESTS_RATELIMIT_NEVER_EXPIRE: return 0 if not period: # todo: assert on Spirit 0.5 warn('Period must be greater than 0.') return time.time() # Closer to no period timestamp = int(time.time()) return timestamp - timestamp % period def make_hash(key): return (hashlib .sha1(key.encode('utf-8')) .hexdigest()) class RateLimit: def __init__(self, request, uid, methods=None, field=None, rate='5/5m'): validate_cache_config() self.request = request self.uid = uid self.methods = methods or ['POST'] self.rate = rate self.limit = None self.time = None self.cache_keys = [] if self.request.method in self.methods: self.limit, self.time = split_rate(rate) self.cache_keys = self._get_keys(field) def _make_key(self, key): key_uid = '%s:%s:%d' % ( self.uid, key, fixed_window(self.time)) return '%s:%s' % ( settings.ST_RATELIMIT_CACHE_PREFIX, make_hash(key_uid)) def _get_keys(self, field=None): keys = [] if self.request.user.is_authenticated: keys.append('user:%d' % self.request.user.pk) else: keys.append('ip:%s' % self.request.META['REMOTE_ADDR']) if field is not None: field_value = (getattr(self.request, self.request.method) .get(field, '')) if field_value: keys.append('field:%s:%s' % (field, field_value)) return [self._make_key(k) for k in keys] def _get_cache_values(self): return (caches[settings.ST_RATELIMIT_CACHE] .get_many(self.cache_keys)) def _incr(self, key): cache = caches[settings.ST_RATELIMIT_CACHE] cache.add(key, 0) try: # This resets the timeout to # default, see Django ticket #26619 return cache.incr(key) except ValueError: # Key does not exists # The cache is being # pruned too frequently return 1 def incr(self): return [self._incr(k) for k in self.cache_keys] def is_limited(self, increment=True): if not settings.ST_RATELIMIT_ENABLE: return False if increment: cache_values = self.incr() else: cache_values = self._get_cache_values() return any( count > self.limit for count in cache_values)
mit
-127,130,421,655,364,450
25.944056
76
0.563717
false
3.811078
false
false
false
joytunes/JTLocalize
localization_flow/jtlocalize/core/handle_duplicates_in_localization.py
1
2004
#!/usr/bin/env python from localization_utils import * import argparse def parse_args(): """ Parses the arguments given in the command line Returns: args: The configured arguments will be attributes of the returned object. """ parser = argparse.ArgumentParser(description='Omits duplications in a given localizable file.') parser.add_argument("localizable_file", help="The file that requires duplication handling.") parser.add_argument("--log_path", default="", help="The log file path") return parser.parse_args() def handle_duplications(file_path): """ Omits the duplications in the strings files. Keys that appear more than once, will be joined to one appearance and the omit will be documented. Args: file_path (str): The path to the strings file. """ logging.info('Handling duplications for "%s"', file_path) f = open_strings_file(file_path, "r+") comment_key_value_tuples = extract_comment_key_value_tuples_from_file(f) file_elements = [] keys_to_objects = {} duplicates_found = [] for comments, key, value in comment_key_value_tuples: if key in keys_to_objects: keys_to_objects[key].add_comments(comments) duplicates_found.append(key) else: loc_obj = LocalizationEntry(comments, key, value) keys_to_objects[key] = loc_obj file_elements.append(loc_obj) # Sort by key file_elements = sorted(file_elements, key=lambda x: x.key) f.seek(0) for element in file_elements: f.write(unicode(element)) f.write(u"\n") f.truncate() f.close() logging.info("Omitted %d duplicates (%s)" % (len(duplicates_found), ",".join(duplicates_found))) logging.info('Finished handling duplications for "%s"', file_path) # The main method for simple command line run. if __name__ == "__main__": args = parse_args() setup_logging(args) handle_duplications(args.localizable_file)
mit
-1,579,286,691,620,859,600
29.363636
106
0.65519
false
3.78828
false
false
false
berkeley-stat159/project-alpha
code/utils/functions/tgrouping.py
1
3121
from __future__ import absolute_import, division, print_function from mask_phase_2_dimension_change import neighbor_smoothing from mask_phase_2_dimension_change import masking_reshape_start, masking_reshape_end import numpy as np def t_binary_grouping(t, cutoff, prop = False, abs_on = False): """ Evaluated the t values above a cutoff or proportion Parameters ---------- t: t-value of the betas 1d numpy array cutoff: the limit for the false discovery rate prop: logical~ if the cutoff is a proportion or a value abs_on: logical~ if we want to take absolute value of the t input Returns ------- zero_one: vector of ones and zeros where ones are above the cutoff, and zeros are below cutoff: the limit for the false discovery rate Notes ----- If you want the values to be preserved multiply t*zero_one afterwards """ # if you want to use proportion you'll need to provide a logical cutoff value assert(0 <= cutoff*prop and cutoff*prop <= 1) # just to be safe: t= np.ravel(t) # if we'd like to take into account abs(t) if abs_on: t = np.abs(t) # sexy shorting t_sorted = np.sort(t) if prop: num = int((1 - cutoff)*t.shape[0]) cutoff = t_sorted[num] zero_one = np.zeros(t.shape) zero_one[t >= cutoff] = 1 return zero_one, cutoff def t_grouping_neighbor(t_3d, mask, cutoff, neighbors = None, prop = False, abs_on = False, binary = True, off_value = 0, masked_value = .5): """ Masks a 3d array, does t_binary_grouping, and does neighboring Parameters ---------- t_3d: t-value of the betas 3d numpy array mask: a 3d numpy array of 0s and 1s that has the same shape as t_3d cutoff: the limit for the false discovery rate neighbors: number of neighbors for neighbor smoothing (must have binary be true) prop: logical~ if the cutoff is a proportion or a value abs_on: logical~ if we want to take absolute value of the t input binary: if binary, then off_value is ignored and 0 is used as the off_value, 1 as the on value off_value: the value of those not selected Returns ------- output_3d: a 3d numpy array same size as the t_3d with either: (1) binary on_off values for inside the mask and "masked_value" for values outside mask or (2) t values to the accepted values, and "off_values" for lost values, and "masked_value" for values outside mask. MOREOVER, it can have had neighbor smoothing applied the binary case cutoff: the limit for the false discovery rate """ if neighbors != None and binary == False: return False t_1d = masking_reshape_start(t_3d, mask) t_1d = np.ravel(t_1d) zero_one, cutoff = t_binary_grouping(t_1d, cutoff, prop, abs_on) if not binary: t_1d = t_1d*zero_one + off_value*(1 - zero_one) else: t_1d = zero_one output_3d = masking_reshape_end(t_1d, mask, masked_value) if neighbors != None: output_3d = neighbor_smoothing(output_3d, neighbors) return output_3d, cutoff
bsd-3-clause
1,689,663,783,410,457,000
31.175258
92
0.65588
false
3.40349
false
false
false
ksmit799/Toontown-Source
toontown/golf/DistributedGolfHoleAI.py
1
18607
from direct.distributed import DistributedObjectAI from direct.directnotify import DirectNotifyGlobal from toontown.toonbase import ToontownGlobals from pandac.PandaModules import * import DistributedPhysicsWorldAI from direct.fsm.FSM import FSM from toontown.ai.ToonBarrier import * from toontown.golf import GolfGlobals import random from toontown.golf import GolfHoleBase class DistributedGolfHoleAI(DistributedPhysicsWorldAI.DistributedPhysicsWorldAI, FSM, GolfHoleBase.GolfHoleBase): defaultTransitions = {'Off': ['Cleanup', 'WaitTee'], 'WaitTee': ['WaitSwing', 'Cleanup', 'WaitTee', 'WaitPlayback'], 'WaitSwing': ['WaitPlayback', 'Cleanup', 'WaitSwing', 'WaitTee'], 'WaitPlayback': ['WaitSwing', 'Cleanup', 'WaitTee', 'WaitPlayback'], 'Cleanup': ['Off']} id = 0 notify = directNotify.newCategory('DistributedGolfHoleAI') def __init__(self, zoneId, golfCourse, holeId): FSM.__init__(self, 'Golf_%s_FSM' % self.id) DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.__init__(self, simbase.air) GolfHoleBase.GolfHoleBase.__init__(self) self.zoneId = zoneId self.golfCourse = golfCourse self.holeId = holeId self.avIdList = golfCourse.avIdList[:] self.watched = [0, 0, 0, 0] self.barrierPlayback = None self.trustedPlayerId = None self.activeGolferIndex = None self.activeGolferId = None self.holeInfo = GolfGlobals.HoleInfo[self.holeId] self.teeChosen = {} for avId in self.avIdList: self.teeChosen[avId] = -1 self.ballPos = {} for avId in self.avIdList: self.ballPos[avId] = Vec3(0, 0, 0) self.playStarted = False return def curGolfBall(self): return self.ball def generate(self): DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.generate(self) self.ball = self.createBall() self.createRays() if len(self.teePositions) > 1: startPos = self.teePositions[1] else: startPos = self.teePositions[0] startPos += Vec3(0, 0, GolfGlobals.GOLF_BALL_RADIUS) self.ball.setPosition(startPos) def delete(self): self.notify.debug('__delete__') DistributedPhysicsWorldAI.DistributedPhysicsWorldAI.delete(self) self.notify.debug('calling self.terrainModel.removeNode') self.terrainModel.removeNode() self.notify.debug('self.barrierPlayback is %s' % self.barrierPlayback) if self.barrierPlayback: self.notify.debug('calling self.barrierPlayback.cleanup') self.barrierPlayback.cleanup() self.notify.debug('calling self.barrierPlayback = None') self.barrierPlayback = None self.activeGolferId = None return def setZoneId(self, zoneId): self.zoneId = zoneId def setAvatarReadyHole(self): self.notify.debugStateCall(self) avId = self.air.getAvatarIdFromSender() self.golfCourse.avatarReadyHole(avId) def startPlay(self): self.notify.debug('startPlay') self.playStarted = True self.numGolfers = len(self.golfCourse.getGolferIds()) self.selectNextGolfer() def selectNextGolfer(self): self.notify.debug('selectNextGolfer, old golferIndex=%s old golferId=%s' % (self.activeGolferIndex, self.activeGolferId)) if self.golfCourse.isCurHoleDone(): return if self.activeGolferIndex == None: self.activeGolferIndex = 0 self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex] else: self.activeGolferIndex += 1 if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()): self.activeGolferIndex = 0 self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex] safety = 0 while safety < 50 and not self.golfCourse.checkGolferPlaying(self.golfCourse.getGolferIds()[self.activeGolferIndex]): self.activeGolferIndex += 1 self.notify.debug('Index %s' % self.activeGolferIndex) if self.activeGolferIndex >= len(self.golfCourse.getGolferIds()): self.activeGolferIndex = 0 self.activeGolferId = self.golfCourse.getGolferIds()[self.activeGolferIndex] safety += 1 if safety != 50: golferId = self.golfCourse.getGolferIds()[self.activeGolferIndex] if self.teeChosen[golferId] == -1: self.sendUpdate('golferChooseTee', [golferId]) self.request('WaitTee') else: self.sendUpdate('golfersTurn', [golferId]) self.request('WaitSwing') else: self.notify.debug('safety') self.notify.debug('selectNextGolfer, new golferIndex=%s new golferId=%s' % (self.activeGolferIndex, self.activeGolferId)) return def clearWatched(self): self.watched = [1, 1, 1, 1] for index in range(len(self.golfCourse.getGolferIds())): self.watched[index] = 0 def setWatched(self, avId): for index in range(len(self.golfCourse.getGolferIds())): if self.golfCourse.getGolferIds()[index] == avId: self.watched[index] = 1 def checkWatched(self): if 0 not in self.watched: return True else: return False def turnDone(self): self.notify.debug('Turn Done') avId = self.air.getAvatarIdFromSender() if self.barrierPlayback: self.barrierPlayback.clear(avId) def ballInHole(self, golferId = None): self.notify.debug('ballInHole') if golferId: avId = golferId else: avId = self.air.getAvatarIdFromSender() self.golfCourse.setBallIn(avId) if self.golfCourse.isCurHoleDone(): self.notify.debug('ballInHole doing nothing') else: self.notify.debug('ballInHole calling self.selectNextGolfer') self.selectNextGolfer() def getHoleId(self): return self.holeId def finishHole(self): self.notify.debug('finishHole') self.golfCourse.holeOver() def getGolferIds(self): return self.avIdList def loadLevel(self): GolfHoleBase.GolfHoleBase.loadLevel(self) optionalObjects = self.terrainModel.findAllMatches('**/optional*') requiredObjects = self.terrainModel.findAllMatches('**/required*') self.parseLocators(optionalObjects, 1) self.parseLocators(requiredObjects, 0) self.teeNodePath = self.terrainModel.find('**/tee0') if self.teeNodePath.isEmpty(): teePos = Vec3(0, 0, 10) else: teePos = self.teeNodePath.getPos() teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS) self.notify.debug('teeNodePath heading = %s' % self.teeNodePath.getH()) self.teePositions = [teePos] teeIndex = 1 teeNode = self.terrainModel.find('**/tee%d' % teeIndex) while not teeNode.isEmpty(): teePos = teeNode.getPos() teePos.setZ(teePos.getZ() + GolfGlobals.GOLF_BALL_RADIUS) self.teePositions.append(teePos) self.notify.debug('teeNodeP heading = %s' % teeNode.getH()) teeIndex += 1 teeNode = self.terrainModel.find('**/tee%d' % teeIndex) def createLocatorDict(self): self.locDict = {} locatorNum = 1 curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum) while not curNodePath.isEmpty(): self.locDict[locatorNum] = curNodePath locatorNum += 1 curNodePath = self.hardSurfaceNodePath.find('**/locator%d' % locatorNum) def loadBlockers(self): loadAll = simbase.config.GetBool('golf-all-blockers', 0) self.createLocatorDict() self.blockerNums = self.holeInfo['blockers'] for locatorNum in self.locDict: if locatorNum in self.blockerNums or loadAll: locator = self.locDict[locatorNum] locatorParent = locator.getParent() locator.getChildren().wrtReparentTo(locatorParent) else: self.locDict[locatorNum].removeNode() self.hardSurfaceNodePath.flattenStrong() def createBall(self): golfBallGeom = self.createSphere(self.world, self.space, GolfGlobals.GOLF_BALL_DENSITY, GolfGlobals.GOLF_BALL_RADIUS, 1)[1] return golfBallGeom def preStep(self): GolfHoleBase.GolfHoleBase.preStep(self) def postStep(self): GolfHoleBase.GolfHoleBase.postStep(self) def postSwing(self, cycleTime, power, x, y, z, dirX, dirY): avId = self.air.getAvatarIdFromSender() self.storeAction = [avId, cycleTime, power, x, y, z, dirX, dirY] if self.commonHoldData: self.doAction() def postSwingState(self, cycleTime, power, x, y, z, dirX, dirY, curAimTime, commonObjectData): self.notify.debug('postSwingState') if not self.golfCourse.getStillPlayingAvIds(): return avId = self.air.getAvatarIdFromSender() self.storeAction = [avId, cycleTime, power, x, y, z, dirX, dirY] self.commonHoldData = commonObjectData self.trustedPlayerId = self.choosePlayerToSimulate() self.sendUpdateToAvatarId(self.trustedPlayerId, 'assignRecordSwing', [avId, cycleTime, power, x, y, z, dirX, dirY, commonObjectData]) self.golfCourse.addAimTime(avId, curAimTime) def choosePlayerToSimulate(self): stillPlaying = self.golfCourse.getStillPlayingAvIds() playerId = 0 if simbase.air.config.GetBool('golf-trust-driver-first', 0): if stillPlaying: playerId = stillPlaying[0] else: playerId = random.choice(stillPlaying) return playerId def ballMovie2AI(self, cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData): sentFromId = self.air.getAvatarIdFromSender() if sentFromId == self.trustedPlayerId: lastFrameNum = len(movie) - 2 if lastFrameNum < 0: lastFrameNum = 0 lastFrame = movie[lastFrameNum] lastPos = Vec3(lastFrame[1], lastFrame[2], lastFrame[3]) self.ballPos[avId] = lastPos self.golfCourse.incrementScore(avId) for id in self.golfCourse.getStillPlayingAvIds(): if not id == sentFromId: self.sendUpdateToAvatarId(id, 'ballMovie2Client', [cycleTime, avId, movie, spinMovie, ballInFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame, commonObjectData]) if self.state == 'WaitPlayback' or self.state == 'WaitTee': self.notify.warning('ballMovie2AI requesting from %s to WaitPlayback' % self.state) self.request('WaitPlayback') elif self.trustedPlayerId == None: return else: self.doAction() self.trustedPlayerId = None return def performReadyAction(self): avId = self.storeAction[0] if self.state == 'WaitPlayback': self.notify.debugStateCall(self) self.notify.debug('ignoring the postSwing for avId=%d since we are in WaitPlayback' % avId) return if avId == self.activeGolferId: self.golfCourse.incrementScore(self.activeGolferId) else: self.notify.warning('activGolferId %d not equal to sender avId %d' % (self.activeGolferId, avId)) if avId not in self.golfCourse.drivingToons: position = self.ballPos[avId] else: position = Vec3(self.storeAction[3], self.storeAction[4], self.storeAction[5]) self.useCommonObjectData(self.commonHoldData) newPos = self.trackRecordBodyFlight(self.ball, self.storeAction[1], self.storeAction[2], position, self.storeAction[6], self.storeAction[7]) if self.state == 'WaitPlayback' or self.state == 'WaitTee': self.notify.warning('performReadyAction requesting from %s to WaitPlayback' % self.state) self.request('WaitPlayback') self.sendUpdate('ballMovie2Client', [self.storeAction[1], avId, self.recording, self.aVRecording, self.ballInHoleFrame, self.ballTouchedHoleFrame, self.ballFirstTouchedHoleFrame, self.commonHoldData]) self.ballPos[avId] = newPos self.trustedPlayerId = None return def postResult(self, cycleTime, avId, recording, aVRecording, ballInHoleFrame, ballTouchedHoleFrame, ballFirstTouchedHoleFrame): pass def enterWaitSwing(self): pass def exitWaitSwing(self): pass def enterWaitTee(self): pass def exitWaitTee(self): pass def enterWaitPlayback(self): self.notify.debug('enterWaitPlayback') stillPlayingList = self.golfCourse.getStillPlayingAvIds() self.barrierPlayback = ToonBarrier('waitClientsPlayback', self.uniqueName('waitClientsPlayback'), stillPlayingList, 120, self.handleWaitPlaybackDone, self.handlePlaybackTimeout) def hasCurGolferReachedMaxSwing(self): strokes = self.golfCourse.getCurHoleScore(self.activeGolferId) maxSwing = self.holeInfo['maxSwing'] retval = strokes >= maxSwing if retval: av = simbase.air.doId2do.get(self.activeGolferId) if av: if av.getUnlimitedSwing(): retval = False return retval def handleWaitPlaybackDone(self): if self.isCurBallInHole(self.activeGolferId) or self.hasCurGolferReachedMaxSwing(): if self.activeGolferId: self.ballInHole(self.activeGolferId) else: self.selectNextGolfer() def isCurBallInHole(self, golferId): retval = False for holePos in self.holePositions: displacement = self.ballPos[golferId] - holePos length = displacement.length() self.notify.debug('hole %s length=%s' % (holePos, length)) if length <= GolfGlobals.DistanceToBeInHole: retval = True break return retval def exitWaitPlayback(self): self.notify.debug('exitWaitPlayback') if hasattr(self, 'barrierPlayback') and self.barrierPlayback: self.barrierPlayback.cleanup() self.barrierPlayback = None return def enterCleanup(self): pass def exitCleanup(self): pass def handlePlaybackTimeout(self, task = None): self.notify.debug('handlePlaybackTimeout') self.handleWaitPlaybackDone() def getGolfCourseDoId(self): return self.golfCourse.doId def avatarDropped(self, avId): self.notify.warning('avId %d dropped, self.state=%s' % (avId, self.state)) if self.barrierPlayback: self.barrierPlayback.clear(avId) else: if avId == self.trustedPlayerId: self.doAction() if avId == self.activeGolferId and not self.golfCourse.haveAllGolfersExited(): self.selectNextGolfer() def setAvatarTee(self, chosenTee): golferId = self.air.getAvatarIdFromSender() self.teeChosen[golferId] = chosenTee self.ballPos[golferId] = self.teePositions[chosenTee] self.sendUpdate('setAvatarFinalTee', [golferId, chosenTee]) self.sendUpdate('golfersTurn', [golferId]) self.request('WaitSwing') def setBox(self, pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2): self.sendUpdate('sendBox', [pos0, pos1, pos2, quat0, quat1, quat2, quat3, anV0, anV1, anV2, lnV0, lnV1, lnV2]) def parseLocators(self, objectCollection, optional = 0): if optional and objectCollection.getNumPaths(): if self.holeInfo.has_key('optionalMovers'): for optionalMoverId in self.holeInfo['optionalMovers']: searchStr = 'optional_mover_' + str(optionalMoverId) for objIndex in xrange(objectCollection.getNumPaths()): object = objectCollection.getPath(objIndex) if searchStr in object.getName(): self.fillLocator(objectCollection, objIndex) break else: for index in range(objectCollection.getNumPaths()): self.fillLocator(objectCollection, index) def fillLocator(self, objectCollection, index): path = objectCollection[index] pathName = path.getName() pathArray = pathName.split('_') sizeX = None sizeY = None move = None type = None for subString in pathArray: if subString[:1] == 'X': dataString = subString[1:] dataString = dataString.replace('p', '.') sizeX = float(dataString) elif subString[:1] == 'Y': dataString = subString[1:] dataString = dataString.replace('p', '.') sizeY = float(dataString) elif subString[:1] == 'd': dataString = subString[1:] dataString = dataString.replace('p', '.') move = float(dataString) elif subString == 'mover': type = 4 elif subString == 'windmillLocator': type = 3 if type == 4 and move and sizeX and sizeY: self.createCommonObject(4, path.getPos(), path.getHpr(), sizeX, sizeY, move) elif type == 3: self.createCommonObject(3, path.getPos(), path.getHpr()) return
mit
5,974,727,302,163,878,000
36.363454
185
0.604342
false
3.652729
false
false
false
FabriceSalvaire/monitor-server
MonitorServer/Tools/Singleton.py
1
4601
#################################################################################################### # # MonitorServer - A Server Monitoring Application # Copyright (C) 2014 Fabrice Salvaire # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # #################################################################################################### #################################################################################################### """ Singleton snippets. """ #################################################################################################### from __future__ import print_function #################################################################################################### import threading #################################################################################################### class SingletonMetaClass(type): """ A singleton metaclass. This implementation supports subclassing and is thread safe. """ ############################################## def __init__(cls, class_name, super_classes, class_attribute_dict): # It is called just after cls creation in order to complete cls. # print('MetaSingleton __init__:', cls, class_name, super_classes, class_attribute_dict, sep='\n... ') type.__init__(cls, class_name, super_classes, class_attribute_dict) cls._instance = None cls._rlock = threading.RLock() # A factory function that returns a new reentrant lock object. ############################################## def __call__(cls, *args, **kwargs): # It is called when cls is instantiated: cls(...). # type.__call__ dispatches to the cls.__new__ and cls.__init__ methods. # print('MetaSingleton __call__:', cls, args, kwargs, sep='\n... ') with cls._rlock: if cls._instance is None: cls._instance = type.__call__(cls, *args, **kwargs) return cls._instance #################################################################################################### class singleton(object): """ A singleton class decorator. This implementation doesn't support subclassing. """ ############################################## def __init__(self, cls): # print('singleton __init__: On @ decoration', cls, sep='\n... ') self._cls = cls self._instance = None ############################################## def __call__(self, *args, **kwargs): # print('singleton __call__: On instance creation', self, args, kwargs, sep='\n... ') if self._instance is None: self._instance = self._cls(*args, **kwargs) return self._instance #################################################################################################### def singleton_func(cls): """ A singleton function decorator. This implementation doesn't support subclassing. """ # print('singleton_func: On @ decoration', cls, sep='\n... ') instances = {} def get_instance(*args, **kwargs): # print('singleton_func: On instance creation', cls, sep='\n... ') if cls not in instances: instances[cls] = cls(*args, **kwargs) return instances[cls] return get_instance #################################################################################################### class monostate(object): """ A monostate base class. """ _shared_state = {} ############################################## def __new__(cls, *args, **kwargs): # print('monostate __new__:', cls, args, kwargs, sep='\n... ') obj = super(monostate, cls).__new__(cls, *args, **kwargs) obj.__dict__ = cls._shared_state return obj #################################################################################################### # # End # ####################################################################################################
gpl-3.0
-2,197,945,327,112,230,400
30.513699
110
0.433601
false
5.312933
false
false
false
RyanChinSang/ECNG3020-ORSS4SCVI
BETA/TestCode/SpchRecg/APP-SpchRecg3.py
1
1382
import os import pocketsphinx as ps modeldir = "C:/Python36-64/Lib/site-packages/pocketsphinx/model/" # datadir = "C:/Python36-64/Lib/site-packages/pocketsphinx/data/" # Create a decoder with certain model config = ps.Decoder.default_config() config.set_string('-hmm', os.path.join(modeldir, 'en-us')) config.set_string('-lm', os.path.join(modeldir, 'en-us.lm.bin')) config.set_string('-dict', os.path.join(modeldir, 'cmudict-en-us.dict')) # config.set_string('-kws', 'command.list') # Open file to read the data # stream = open(os.path.join(datadir, "goforward.raw"), "rb") # Alternatively you can read from microphone import pyaudio # # p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt16, channels=1, rate=16000, input=True, frames_per_buffer=1024) stream.start_stream() # Process audio chunk by chunk. On keyword detected perform action and restart search decoder = ps.Decoder(config) # decoder = Decoder() decoder.start_utt() while True: buf = stream.read(1024) if buf: decoder.process_raw(buf, False, False) else: break if decoder.hyp() is not None: print(decoder.hyp().hypstr) # print([(seg.word, seg.prob, seg.start_frame, seg.end_frame) for seg in decoder.seg()]) # print("Detected keyword, restarting search") decoder.end_utt() decoder.start_utt() # print(decoder.hyp().hypstr)
gpl-3.0
2,085,290,420,487,328,000
32.707317
99
0.68958
false
3.112613
true
false
false
Kocal/django-tornado-websockets
tornado_websockets/modules/progressbar.py
1
3746
# coding=utf-8 from tornado_websockets.modules.module import Module class ProgressBar(Module): """ Initialize a new ProgressBar module instance. If ``min`` and ``max`` values are equal, this progress bar has its indeterminate state set to ``True``. :param min: Minimum value :param max: Maximum value :type min: int :type max: int """ def __init__(self, name='', min=0, max=100, indeterminate=False): if name: name = '_' + name super(ProgressBar, self).__init__('progressbar' + name) if max < min: raise ValueError('Param « min » can not be greater or equal than param « max ».') self.min = self.current = min self.max = max self.indeterminate = indeterminate def initialize(self): @self.on def open(): self.emit_init() def tick(self, label=None): """ Increments progress bar's _current by ``1`` and emit ``update`` event. Can also emit ``done`` event if progression is done. Call :meth:`~tornado_websockets.modules.progress_bar.ProgressBar.emit_update` method each time this method is called. Call :meth:`~tornado_websockets.modules.progress_bar.ProgressBar.emit_done` method if progression is done. :param label: A label which can be displayed on the client screen :type label: str """ if not self.indeterminate and self.current < self.max: self.current += 1 self.emit_update(label) if self.is_done(): self.emit_done() def reset(self): """ Reset progress bar's progression to its minimum value. """ self.current = self.min def is_done(self): """ Return ``True`` if progress bar's progression is done, otherwise ``False``. Returns ``False`` if progress bar is indeterminate, returns ``True`` if progress bar is determinate and current value is equals to ``max`` value. Returns ``False`` by default. :rtype: bool """ if self.indeterminate: return False if self.current is self.max: return True return False def emit_init(self): """ Emit ``before_init``, ``init`` and ``after_init`` events to initialize a client-side progress bar. If progress bar is not indeterminate, ``min``, ``max`` and ``value`` values are sent with ``init`` event. """ data = {'indeterminate': self.indeterminate} if not self.indeterminate: data.update({ 'min': int(self.min), 'max': int(self.max), 'current': int(self.current), }) self.emit('before_init') self.emit('init', data) self.emit('after_init') def emit_update(self, label=None): """ Emit ``before_update``, ``update`` and ``after_update`` events to update a client-side progress bar. :param label: A label which can be displayed on the client screen :type label: str """ data = {} if not self.indeterminate: data.update({'current': self.current}) if label: data.update({'label': label}) self.emit('before_update') self.emit('update', data) self.emit('after_update') def emit_done(self): """ Emit ``done`` event when progress bar's progression :meth:`~tornado_websockets.modules.progress_bar.ProgressBar.is_done`. """ self.emit('done')
gpl-3.0
7,786,795,235,155,894,000
28.234375
117
0.552378
false
4.316032
false
false
false
zhouzhaoze/dip
project3/Proj04-04/main.py
1
2549
#!/bin/python # *-* encoding=utf-8 *-* ''' Image Priting Program Based on Haftoning ''' import sys import numpy, scipy from scipy import ndimage from scipy import misc import scipy.fftpack as fftpack import matplotlib.pyplot as plt sys.path.append('../Proj04-01') from DFT import DFT_2D, IDFT_2D def en_padding(img): M, N = img.shape P, Q = 2 * M, 2 * N _img = numpy.zeros(P*Q).reshape((P, Q)) for x in range(M): for y in range(N): _img[x][y] = img[x][y] return _img def de_padding(img): P, Q = img.shape M, N = P/2, Q/2 _img = numpy.zeros(M*N).reshape((M, N)) for x in range(M): for y in range(N): _img[x][y] = img[x][y] return _img def shift(img): M, N = img.shape _img = img.copy() for x in range(M): for y in range(N): _img[x][y] = img[x][y] * numpy.power(-1.0, (x+y)) return _img def sqdistance(p1, p2): return ((p1[0]-p2[0])*(p1[0]-p2[0])) + \ ((p1[1]-p2[1])*(p1[1]-p2[1])) def lowpass_mask(P, Q, cuf_off_frequency): center = (P/2.0, Q/2.0) mask = numpy.zeros(P * Q).reshape(P, Q) for u in range(P): for v in range(Q): mask[u][v] = numpy.exp(-1*sqdistance(center, (u, v)) / (2*(cuf_off_frequency*cuf_off_frequency))) return mask def highpass_mask(P, Q, cuf_off_frequency): return 1.0 - lowpass_mask(P, Q, cuf_off_frequency) # center = (P/2.0, Q/2.0) # mask = numpy.zeros(P * Q).reshape(P, Q) # for u in range(P): # for v in range(Q): # mask[u][v] = 1.0-numpy.exp(-1*sqdistance(center, (u, v)) / (2*(cuf_off_frequency*cuf_off_frequency))) # return mask def main(): img_file = 'Fig0441(a)(characters_test_pattern).tif' img = misc.imread(img_file) padding_img = en_padding(img) padding_img = shift(padding_img) dft_img = DFT_2D(padding_img) for cut_off_frequency in [30, 60, 160]: print cut_off_frequency hp_mask = highpass_mask(dft_img.shape[0], dft_img.shape[1], cut_off_frequency) misc.imsave('%s_hpmask_%d.tif' % (img_file, cut_off_frequency), 255 * hp_mask) hp_img = numpy.multiply(dft_img, hp_mask) misc.imsave('%s_fft_%d.tif' % (img_file, cut_off_frequency), numpy.log(1+numpy.abs(hp_img))) hp_idtft_img = shift(IDFT_2D(hp_img).real) hp_idtft_img = de_padding(hp_idtft_img) print hp_idtft_img.shape misc.imsave('%s_hp_%d.tif' % (img_file, cut_off_frequency), hp_idtft_img) if __name__ == '__main__': main()
apache-2.0
-3,213,598,260,219,212,000
27.322222
114
0.567674
false
2.585193
false
false
false
solarpermit/solarpermit
website/migrations/0086_auto__del_actiontutorial__del_person__del_userreward__del_applicationh.py
1
68516
# -*- coding: utf-8 -*- import datetime from south.db import db from south.v2 import SchemaMigration from django.db import models class Migration(SchemaMigration): def forwards(self, orm): # Deleting model 'ActionTutorial' db.delete_table('website_actiontutorial') # Deleting model 'Person' db.delete_table('website_person') # Deleting model 'UserReward' db.delete_table('website_userreward') # Deleting model 'ApplicationHistory' db.delete_table('website_applicationhistory') # Deleting model 'Document' db.delete_table('website_document') # Deleting model 'UserTutorialHistory' db.delete_table('website_usertutorialhistory') # Deleting model 'Tutorial' db.delete_table('website_tutorial') # Deleting model 'DocumentCategory' db.delete_table('website_documentcategory') # Deleting model 'QuestionDependency' db.delete_table('website_questiondependency') # Deleting model 'ApplicationAnswer' db.delete_table('website_applicationanswer') # Deleting model 'PersonAddress' db.delete_table('website_personaddress') # Deleting model 'Application' db.delete_table('website_application') # Deleting model 'Region' db.delete_table('website_region') # Deleting model 'UserTutorialPageHistory' db.delete_table('website_usertutorialpagehistory') # Deleting model 'TutorialPage' db.delete_table('website_tutorialpage') # Deleting field 'Jurisdiction.region' db.delete_column('website_jurisdiction', 'region_id') # Deleting field 'OrganizationAddress.address_type' db.delete_column('website_organizationaddress', 'address_type') # Deleting field 'OrganizationMember.person' db.delete_column('website_organizationmember', 'person_id') def backwards(self, orm): # Adding model 'ActionTutorial' db.create_table('website_actiontutorial', ( ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('action_identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)), )) db.send_create_signal('website', ['ActionTutorial']) # Adding model 'Person' db.create_table('website_person', ( ('phone_primary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)), ('last_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('first_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('phone_secondary', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)), ('phone_mobile', self.gf('django.contrib.localflavor.us.models.PhoneNumberField')(max_length=20, null=True, blank=True)), ('email', self.gf('django.db.models.fields.EmailField')(max_length=75, null=True, blank=True)), )) db.send_create_signal('website', ['Person']) # Adding model 'UserReward' db.create_table('website_userreward', ( ('reward_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)), ('reward', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.RewardCategory'], null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal('website', ['UserReward']) # Adding model 'ApplicationHistory' db.create_table('website_applicationhistory', ( ('status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)), ('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])), ('status_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('status_by', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['ApplicationHistory']) # Adding model 'Document' db.create_table('website_document', ( ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('file_name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('reviewed', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), ('accepted', self.gf('django.db.models.fields.BooleanField')(default=False, db_index=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('title', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)), ('region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True)), ('file_path', self.gf('django.db.models.fields.files.FileField')(max_length=100)), )) db.send_create_signal('website', ['Document']) # Adding model 'UserTutorialHistory' db.create_table('website_usertutorialhistory', ( ('view_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)), )) db.send_create_signal('website', ['UserTutorialHistory']) # Adding model 'Tutorial' db.create_table('website_tutorial', ( ('start_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), ('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('identifier', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('active', self.gf('django.db.models.fields.BooleanField')(default=True)), ('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=128, null=True, db_index=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('end_datetime', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)), )) db.send_create_signal('website', ['Tutorial']) # Adding model 'DocumentCategory' db.create_table('website_documentcategory', ( ('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(max_length=64, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), )) db.send_create_signal('website', ['DocumentCategory']) # Adding model 'QuestionDependency' db.create_table('website_questiondependency', ( ('required', self.gf('django.db.models.fields.BooleanField')(default=False)), ('answer_text', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('question2', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question2', to=orm['website.Question'])), ('question1', self.gf('django.db.models.fields.related.ForeignKey')(related_name='_questionDependency_question1', to=orm['website.Question'])), ('strength', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['QuestionDependency']) # Adding model 'ApplicationAnswer' db.create_table('website_applicationanswer', ( ('application', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Application'])), ('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'])), ('file_upload', self.gf('django.db.models.fields.files.FileField')(max_length=100, null=True, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('question', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Question'])), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('value', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['ApplicationAnswer']) # Adding model 'PersonAddress' db.create_table('website_personaddress', ( ('person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True)), ('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)), ('display_order', self.gf('django.db.models.fields.PositiveSmallIntegerField')(null=True, blank=True)), ('address_type', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['PersonAddress']) # Adding model 'Application' db.create_table('website_application', ( ('jurisdiction', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Jurisdiction'], null=True, blank=True)), ('template', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Template'], null=True, blank=True)), ('address', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Address'], null=True, blank=True)), ('applicant', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'], null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('current_status', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['Application']) # Adding model 'Region' db.create_table('website_region', ( ('state', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True)), ('description', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('name', self.gf('django.db.models.fields.CharField')(blank=True, max_length=64, null=True, db_index=True)), ('latitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('longitude', self.gf('django.db.models.fields.DecimalField')(null=True, max_digits=10, decimal_places=7, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['Region']) # Adding model 'UserTutorialPageHistory' db.create_table('website_usertutorialpagehistory', ( ('checked', self.gf('django.db.models.fields.BooleanField')(default=False)), ('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)), ('user_email', self.gf('django.db.models.fields.EmailField')(blank=True, max_length=75, null=True, db_index=True)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('page', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.TutorialPage'], null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['UserTutorialPageHistory']) # Adding model 'TutorialPage' db.create_table('website_tutorialpage', ( ('selector', self.gf('django.db.models.fields.CharField')(max_length=128, null=True, blank=True)), ('display_order', self.gf('django.db.models.fields.SmallIntegerField')(default=0)), ('create_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now_add=True, null=True, blank=True)), ('tip', self.gf('django.db.models.fields.TextField')(null=True, blank=True)), ('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)), ('tutorial', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Tutorial'], null=True, blank=True)), ('modify_datetime', self.gf('django.db.models.fields.DateTimeField')(auto_now=True, null=True, blank=True)), )) db.send_create_signal('website', ['TutorialPage']) # Adding field 'Jurisdiction.region' db.add_column('website_jurisdiction', 'region', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Region'], null=True, blank=True), keep_default=False) # Adding field 'OrganizationAddress.address_type' db.add_column('website_organizationaddress', 'address_type', self.gf('django.db.models.fields.CharField')(blank=True, max_length=8, null=True, db_index=True), keep_default=False) # Adding field 'OrganizationMember.person' db.add_column('website_organizationmember', 'person', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['website.Person'], null=True, blank=True), keep_default=False) models = { 'auth.group': { 'Meta': {'object_name': 'Group'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, 'auth.permission': { 'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, 'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'contenttypes.contenttype': { 'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"}, 'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}) }, 'website.action': { 'Meta': {'object_name': 'Action'}, 'action_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ActionCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}), 'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.actioncategory': { 'Meta': {'object_name': 'ActionCategory'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}) }, 'website.address': { 'Meta': {'object_name': 'Address'}, 'address1': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'address2': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}) }, 'website.answerchoice': { 'Meta': {'object_name': 'AnswerChoice'}, 'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']"}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}) }, 'website.answerchoicegroup': { 'Meta': {'object_name': 'AnswerChoiceGroup'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}) }, 'website.answerreference': { 'Meta': {'object_name': 'AnswerReference'}, 'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'file_upload': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_callout': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'migrated_answer_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}), 'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}), 'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'website.applicability': { 'Meta': {'object_name': 'Applicability'}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'website.comment': { 'Meta': {'object_name': 'Comment'}, 'approval_status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}), 'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'comment_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'parent_comment': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_reference'", 'null': 'True', 'to': "orm['website.Comment']"}), 'rating': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'rating_status': ('django.db.models.fields.CharField', [], {'default': "'U'", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.entityview': { 'Meta': {'object_name': 'EntityView'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'latest_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'session_key': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.entityviewcount': { 'Meta': {'object_name': 'EntityViewCount'}, 'count_30_days': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'total_count': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) }, 'website.jurisdiction': { 'Meta': {'object_name': 'Jurisdiction'}, 'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}), 'last_contributed': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'last_contributed_by': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'last_contributed_by_org': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_contributor'", 'null': 'True', 'to': "orm['website.Organization']"}), 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_org_jurisdiction'", 'null': 'True', 'to': "orm['website.Organization']"}), 'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parent_jurisdiction'", 'null': 'True', 'to': "orm['website.Jurisdiction']"}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '8', 'null': 'True', 'blank': 'True'}) }, 'website.jurisdictioncontributor': { 'Meta': {'object_name': 'JurisdictionContributor'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'points': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.migrationhistory': { 'Meta': {'object_name': 'MigrationHistory'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'notes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'notes2': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'source_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'source_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'target_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'target_table': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'website.organization': { 'Meta': {'object_name': 'Organization'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.OrganizationCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'fax': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'logo_scaled': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'parent_org': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'phone': ('django.contrib.localflavor.us.models.PhoneNumberField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'A'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'status_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, 'website.organizationaddress': { 'Meta': {'object_name': 'OrganizationAddress'}, 'address': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Address']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}) }, 'website.organizationcategory': { 'Meta': {'object_name': 'OrganizationCategory'}, 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'website.organizationmember': { 'Meta': {'object_name': 'OrganizationMember'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'invitation_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'invitor': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_invitor'", 'null': 'True', 'to': "orm['auth.User']"}), 'join_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'requested_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'role': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RoleType']", 'null': 'True', 'blank': 'True'}), 'status': ('django.db.models.fields.CharField', [], {'default': "'P'", 'max_length': '8', 'null': 'True', 'db_index': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'_member_user'", 'null': 'True', 'to': "orm['auth.User']"}) }, 'website.organizationrating': { 'Meta': {'object_name': 'OrganizationRating'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']", 'null': 'True', 'blank': 'True'}), 'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}) }, 'website.question': { 'Meta': {'object_name': 'Question'}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'answer_choice_group': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.AnswerChoiceGroup']", 'null': 'True', 'blank': 'True'}), 'applicability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Applicability']", 'null': 'True', 'blank': 'True'}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'default_value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'display_template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'field_attributes': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'field_suffix': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'form_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}), 'has_multivalues': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'instruction': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'js': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'label': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'migration_type': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'qtemplate': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']", 'null': 'True'}), 'question': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'state_exclusive': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'template': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'terminology': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'validation_class': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'website.questioncategory': { 'Meta': {'object_name': 'QuestionCategory'}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}) }, 'website.ratingcategory': { 'Meta': {'object_name': 'RatingCategory'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'rating_type': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}) }, 'website.ratinglevel': { 'Meta': {'object_name': 'RatingLevel'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'rank': ('django.db.models.fields.PositiveSmallIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}) }, 'website.reaction': { 'Meta': {'object_name': 'Reaction'}, 'action': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Action']", 'null': 'True', 'blank': 'True'}), 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.ReactionCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'question_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.QuestionCategory']", 'null': 'True', 'blank': 'True'}), 'reaction_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.reactioncategory': { 'Meta': {'object_name': 'ReactionCategory'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'points': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'rating_category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}) }, 'website.rewardcategory': { 'Meta': {'object_name': 'RewardCategory'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'website.roletype': { 'Meta': {'object_name': 'RoleType'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}) }, 'website.servervariable': { 'Meta': {'object_name': 'ServerVariable'}, 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'value': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}) }, 'website.template': { 'Meta': {'object_name': 'Template'}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}), 'template_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}) }, 'website.templatequestion': { 'Meta': {'object_name': 'TemplateQuestion'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}), 'template': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Template']"}) }, 'website.usercommentview': { 'Meta': {'object_name': 'UserCommentView'}, 'comments_count': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'last_comment': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Comment']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'view_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}) }, 'website.userdetail': { 'Meta': {'object_name': 'UserDetail'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_preference': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'latitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'longitude': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'migrated_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'notification_preference': ('django.db.models.fields.CharField', [], {'max_length': '2', 'null': 'True', 'blank': 'True'}), 'old_password': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}), 'reset_password_key': ('django.db.models.fields.CharField', [], {'max_length': '128', 'null': 'True', 'blank': 'True'}), 'title': ('django.db.models.fields.CharField', [], {'max_length': '124', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.userfavorite': { 'Meta': {'object_name': 'UserFavorite'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.userpageview': { 'Meta': {'object_name': 'UserPageView'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'last_page_view_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.userrating': { 'Meta': {'object_name': 'UserRating'}, 'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingCategory']", 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'level': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.RatingLevel']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'scale': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'updated_datetime': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.usersearch': { 'Meta': {'object_name': 'UserSearch'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'entity_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}), 'entity_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'search_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}) }, 'website.view': { 'Meta': {'object_name': 'View'}, 'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'jurisdiction': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Jurisdiction']", 'null': 'True', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}), 'reviewed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']", 'null': 'True', 'blank': 'True'}), 'view_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'db_index': 'True', 'blank': 'True'}) }, 'website.vieworgs': { 'Meta': {'object_name': 'ViewOrgs'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'organization': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Organization']"}), 'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"}) }, 'website.viewquestions': { 'Meta': {'object_name': 'ViewQuestions'}, 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'display_order': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'question': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.Question']"}), 'view': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['website.View']"}) }, 'website.zipcode': { 'Meta': {'object_name': 'Zipcode'}, 'city': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'county': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '64', 'null': 'True', 'blank': 'True'}), 'create_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}), 'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'latitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'longitude': ('django.db.models.fields.DecimalField', [], {'db_index': 'True', 'null': 'True', 'max_digits': '10', 'decimal_places': '7', 'blank': 'True'}), 'modify_datetime': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'}), 'state': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '2', 'null': 'True', 'blank': 'True'}), 'zip_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'db_index': 'True'}) } } complete_apps = ['website']
bsd-3-clause
3,161,687,375,410,409,000
87.983117
200
0.573939
false
3.582536
false
false
false
Upande/MaMaSe
apps/event/migrations/0001_initial.py
1
3706
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations import wagtail.wagtailcore.fields from django.conf import settings import modelcluster.fields class Migration(migrations.Migration): dependencies = [ ('taggit', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('wagtailcore', '0001_squashed_0016_change_page_url_path_to_text_field'), ] operations = [ migrations.CreateModel( name='Attendee', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('tickets', models.IntegerField(default=1)), ('date', models.DateTimeField(auto_now_add=True)), ], ), migrations.CreateModel( name='EventIndexPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('intro', wagtail.wagtailcore.fields.RichTextField(blank=True)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='EventIndexRelatedLink', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('sort_order', models.IntegerField(null=True, editable=False, blank=True)), ('link_external', models.URLField(verbose_name=b'External link', blank=True)), ('title', models.CharField(help_text=b'Link title', max_length=255)), ], options={ 'ordering': ['sort_order'], 'abstract': False, }, ), migrations.CreateModel( name='EventPage', fields=[ ('page_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='wagtailcore.Page')), ('start_date', models.DateTimeField(verbose_name=b'Start date')), ('end_date', models.DateTimeField(verbose_name=b'End date')), ('body', wagtail.wagtailcore.fields.RichTextField(blank=True)), ('location', wagtail.wagtailcore.fields.RichTextField(max_length=200)), ('maps_url', models.URLField(verbose_name=b'Map Link', blank=True)), ('cost', models.IntegerField(default=0)), ], options={ 'abstract': False, }, bases=('wagtailcore.page',), ), migrations.CreateModel( name='EventPageTag', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('content_object', modelcluster.fields.ParentalKey(related_name='tagged_items', to='event.EventPage')), ('tag', models.ForeignKey(related_name='event_eventpagetag_items', to='taggit.Tag')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='watchlist', fields=[ ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)), ('date', models.DateTimeField(auto_now_add=True)), ('event', models.ForeignKey(to='event.EventPage')), ('user', models.ForeignKey(to=settings.AUTH_USER_MODEL)), ], ), ]
apache-2.0
5,118,050,073,958,929,000
41.113636
146
0.552887
false
4.470446
false
false
false
marcoscastro/facebook_profile_photo
gui.py
1
1243
from tkinter import * from face_photo import * from PIL import Image, ImageTk window = Tk() # cria uma janela window.title('Facebook profile photo') # seta o título da janela window.geometry('450x300') # seta o tamanho da janela entry = Entry(window, width=25, justify='center') # cria uma entrada de texto entry.insert(0, 'Digite o ID do Facebook') # seta o texto entry.pack() # gerenciador de geometria entry.focus_set() # obtém o foco para a entrada de texto old_label_image = None # função para o evento de clique do botão def click_button(): global old_label_image ID = entry.get() # obtém o texto if not ID: # verifica se o texto está vazio entry.insert(0, 'Digite o ID do Facebook') else: if get_photo(ID): # carregando a imagem img = ImageTk.PhotoImage(Image.open(ID + '.png')) # criando um panel para inserir a imagem label_image = Label(window, image=img) label_image.pack_forget() label_image.image = img label_image.pack() if old_label_image is not None: old_label_image.destroy() old_label_image = label_image else: pass # cria um botão btn = Button(window, text='Show photo', width=20, command=click_button) btn.pack() # loop principal da aplicação window.mainloop()
mit
2,414,084,095,627,359,000
26.422222
77
0.70884
false
2.815068
false
false
false
oVirt/ovirt-scheduler-proxy
src/ovirtscheduler/oschedproxyd.py
1
3539
# # Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import logging import os import socketserver import sys from logging.handlers import RotatingFileHandler from time import strftime from xmlrpc.server import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler from ovirtscheduler import API from ovirtscheduler.request_handler import RequestHandler class SimpleThreadedXMLRPCServer(socketserver.ThreadingMixIn, SimpleXMLRPCServer): pass class XMLPRPCRequestHandler(SimpleXMLRPCRequestHandler): if sys.version_info[:2] == (2, 6): # Override BaseHTTPServer.BaseRequestHandler implementation to avoid # pointless and slow attempt to get the fully qualified host name from # the client address. This method is not used any more in Python 2.7. def address_string(self): return self.client_address[0] def setup_logging(path): file_handler = RotatingFileHandler(path, maxBytes=50*1024, backupCount=6) log_formatter = logging.Formatter('%(asctime)s %(levelname)-8s' ' [process:%(processName)s,' ' thread:%(threadName)s] ' '%(message)s', '%a, %d %b %Y %H:%M:%S') file_handler.setFormatter(log_formatter) logger = logging.getLogger() logger.addHandler(file_handler) logger.setLevel(logging.DEBUG) class ProxyServer(object): def __init__(self, plugin_path=None): self._server = None self._handler = None if plugin_path is None: self._plugin_path = os.path.join(os.getcwd(), "plugins") else: self._plugin_path = plugin_path def setup(self): logging.info("Setting up server") self._server = SimpleThreadedXMLRPCServer( ("localhost", 18781), allow_none=True, requestHandler=XMLPRPCRequestHandler) analyzer_path = os.path.dirname(__file__) logging.info("Loading modules from %s" % self._plugin_path) logging.info("Loading analyzer from %s" % analyzer_path) self._handler = RequestHandler( self._plugin_path, analyzer_path) def run(self): logging.info("Publishing API") self._server.register_introspection_functions() self._server.register_instance(API.API(self._handler)) self._server.serve_forever() # for test runs def main(): server = ProxyServer(os.environ.get("OSCHEDPROXY_PLUGINS", None)) server.setup() server.run() if __name__ == "__main__": log_filename = '/var/log/ovirt-scheduler-proxy/ovirt-scheduler-proxy.log' try: setup_logging(log_filename) except IOError: log_filename = './ovirt-scheduler-proxy.' \ + strftime("%Y%m%d_%H%M%S") + '.log' setup_logging(log_filename) main()
apache-2.0
651,295,214,918,901,600
32.386792
78
0.627578
false
4.274155
false
false
false
imapp-pl/golem
golem/ethereum/client.py
1
2118
import logging import rlp from eth_rpc_client import Client as EthereumRpcClient from .node import NodeProcess log = logging.getLogger('golem.ethereum') class Client(EthereumRpcClient): """ RPC interface client for Ethereum node.""" STATIC_NODES = ["enode://f1fbbeff7e9777a3a930f1e55a5486476845f799f7d603f71be7b00898df98f2dc2e81b854d2c774c3d266f1fa105d130d4a43bc58e700155c4565726ae6804e@94.23.17.170:30900"] # noqa node = None def __init__(self, datadir, nodes=None): if not nodes: nodes = Client.STATIC_NODES if not Client.node: Client.node = NodeProcess(nodes, datadir) else: assert Client.node.datadir == datadir, \ "Ethereum node's datadir cannot be changed" if not Client.node.is_running(): Client.node.start(rpc=True) super(Client, self).__init__(port=Client.node.rpcport) @staticmethod def _kill_node(): # FIXME: Keeping the node as a static object might not be the best. if Client.node: Client.node.stop() Client.node = None def get_peer_count(self): """ https://github.com/ethereum/wiki/wiki/JSON-RPC#net_peerCount """ response = self.make_request("net_peerCount", []) return int(response['result'], 16) def is_syncing(self): """ https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_syncing """ response = self.make_request("eth_syncing", []) result = response['result'] return bool(result) def get_transaction_count(self, address): """ https://github.com/ethereum/wiki/wiki/JSON-RPC#eth_gettransactioncount """ response = self.make_request("eth_getTransactionCount", [address, "pending"]) return int(response['result'], 16) def send_raw_transaction(self, data): response = self.make_request("eth_sendRawTransaction", [data]) return response['result'] def send(self, transaction): return self.send_raw_transaction(rlp.encode(transaction).encode('hex'))
gpl-3.0
3,904,709,206,332,040,700
32.09375
186
0.635033
false
3.53
false
false
false
rajul/tvb-framework
tvb/interfaces/command/demos/datatypes/search_and_export.py
1
4973
# -*- coding: utf-8 -*- # # # TheVirtualBrain-Framework Package. This package holds all Data Management, and # Web-UI helpful to run brain-simulations. To use it, you also need do download # TheVirtualBrain-Scientific Package (for simulators). See content of the # documentation-folder for more details. See also http://www.thevirtualbrain.org # # (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest") # # This program is free software; you can redistribute it and/or modify it under # the terms of the GNU General Public License version 2 as published by the Free # Software Foundation. This program is distributed in the hope that it will be # useful, but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public # License for more details. You should have received a copy of the GNU General # Public License along with this program; if not, you can download it here # http://www.gnu.org/licenses/old-licenses/gpl-2.0 # # # CITATION: # When using The Virtual Brain for scientific publications, please cite it as follows: # # Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide, # Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013) # The Virtual Brain: a simulator of primate brain network dynamics. # Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010) # # """ Demo script on how to filter datatypes and later export them. .. moduleauthor:: Lia Domide <lia.domide@codemart.ro> """ if __name__ == "__main__": from tvb.basic.profile import TvbProfile TvbProfile.set_profile(TvbProfile.COMMAND_PROFILE) from tvb.basic.filters.chain import FilterChain from tvb.core.entities.file.files_helper import FilesHelper from tvb.core.entities.storage import dao from tvb.core.entities.transient.structure_entities import DataTypeMetaData from tvb.datatypes.time_series import TimeSeriesRegion from tvb.datatypes.connectivity import Connectivity from sys import argv import os TVB_EXPORTER = "TVBExporter" def _retrieve_entities_by_filters(kind, project_id, filters): named_tuple_array, counter = dao.get_values_of_datatype(project_id, kind, filters) print "Found " + str(counter) + " entities of type " + str(kind) result = [] for named_tuple in named_tuple_array: dt_id = named_tuple[0] result.append(dao.get_generic_entity(kind, dt_id)[0]) return result def search_and_export_ts(project_id, export_folder=os.path.join("~", "TVB")): #### This is the simplest filter you could write: filter and entity by Subject filter_connectivity = FilterChain(fields=[FilterChain.datatype + '.subject'], operations=["=="], values=[DataTypeMetaData.DEFAULT_SUBJECT]) connectivities = _retrieve_entities_by_filters(Connectivity, project_id, filter_connectivity) #### A more complex filter: by linked entity (connectivity), BOLD monitor, sampling, operation param: filter_timeseries = FilterChain(fields=[FilterChain.datatype + '._connectivity', FilterChain.datatype + '._title', FilterChain.datatype + '._sample_period', FilterChain.datatype + '._sample_rate', FilterChain.operation + '.parameters' ], operations=["==", "like", ">=", "<=", "like"], values=[connectivities[0].gid, "Bold", "500", "0.002", '"conduction_speed": "3.0"' ] ) #### If you want to filter another type of TS, change the kind class bellow, #### instead of TimeSeriesRegion use TimeSeriesEEG, or TimeSeriesSurface, etc. timeseries = _retrieve_entities_by_filters(TimeSeriesRegion, project_id, filter_timeseries) for ts in timeseries: print "=============================" print ts.summary_info print " Original file: " + str(ts.get_storage_file_path()) destination_file = os.path.expanduser(os.path.join(export_folder, ts.get_storage_file_name())) FilesHelper.copy_file(ts.get_storage_file_path(), destination_file) if os.path.exists(destination_file): print " TS file copied at: " + destination_file else: print " Some error happened when trying to copy at destination folder!!" if __name__ == '__main__': if len(argv) < 2: PROJECT_ID = 1 else: PROJECT_ID = int(argv[1]) print "We will try to search datatypes in project with ID:" + str(PROJECT_ID) search_and_export_ts(PROJECT_ID)
gpl-2.0
4,476,070,721,329,385,000
40.45
105
0.626986
false
4.010484
false
false
false
andreimaximov/algorithms
leetcode/algorithms/recover-binary-tree/solution.py
1
2334
#!/usr/bin/env python class Solution(object): def inorder(self, root): """ Returns the inorder traversal of nodes in the tree. """ current = root stack = [] inorder = [] while current is not None or len(stack) > 0: # Traverse to the left most (first) node in the tree rooted at the # current node. while current is not None: stack.append(current) current = current.left # The stack is guaranteed to have at least one node due to the # condition of the outer while loop. current = stack.pop() inorder.append(current) # Current might be None after this, but then the algorithm will # just continue traversing up. current = current.right return inorder def findOutOfOrder(self, root): """ Finds the first two out of order nodes in a binary tree. """ inorder = self.inorder(root) # Impossible for any values to be out of order with 0 or 1 nodes. if len(inorder) < 2: return () outOfOrder = [] # Stores indices where inorder[i] >= inorder[i + 1] for i in range(0, len(inorder) - 1): if inorder[i].val >= inorder[i + 1].val: outOfOrder.append(i) if len(outOfOrder) == 2: break n = len(outOfOrder) if n == 0: # No out of order nodes. return () elif n == 1: # Out of order nodes are next to each other. i = outOfOrder[0] return (inorder[i], inorder[i + 1]) elif n == 2: # Out of order nodes are not next to each other. i = outOfOrder[0] j = outOfOrder[1] return (inorder[i], inorder[j + 1]) def recoverTree(self, root): nodes = self.findOutOfOrder(root) assert len(nodes) == 2 # Swap the values in the two out of order nodes. firstValue = nodes[0].val nodes[0].val = nodes[1].val nodes[1].val = firstValue def main(): print('Please run this solution on LeetCode.') print('https://leetcode.com/problems/recover-binary-search-tree/') if __name__ == '__main__': main()
mit
140,343,525,249,919,440
29.311688
78
0.532562
false
4.052083
false
false
false
Rabuske/HeartRatePython
HeartRatePython/HeartRatePython/Token.py
1
1293
import datetime class Token(object): accessToken = None refreshToken = None generationDate = None expiresIn = None # in seconds type = None userId = None def __init__(self, userId, accessToken, refreshToken, type, expiresIn, generationDate=None): self.userId = userId self.accessToken = accessToken self.refreshToken = refreshToken self.type = type self.expiresIn = expiresIn self.generationDate = generationDate if self.generationDate == None: self.generationDate = datetime.datetime.utcnow().timestamp() def isExpired(self): expirationDate = float(self.generationDate) + float(self.expiresIn) if datetime.datetime.utcnow().timestamp() > expirationDate: return True else: return False # Create an object of the type Token based on a response from the Requests package def createFromDDIC(dictionary): try: return Token(dictionary[0], dictionary[1], dictionary[2], dictionary[3], dictionary[4], dictionary[5]) except KeyError as e: return Token(dictionary["user_id"], dictionary["access_token"], dictionary["refresh_token"], dictionary["token_type"], dictionary["expires_in"])
gpl-3.0
5,066,700,721,045,928,000
37.058824
164
0.649652
false
4.536842
false
false
false
mpkasp/django-bom
bom/migrations/0031_auto_20200104_1352.py
1
2301
# Generated by Django 2.2.8 on 2020-01-04 13:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('bom', '0030_auto_20200101_2253'), ] operations = [ migrations.AlterField( model_name='partrevision', name='height_units', field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True), ), migrations.AlterField( model_name='partrevision', name='length_units', field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True), ), migrations.AlterField( model_name='partrevision', name='value_units', field=models.CharField(blank=True, choices=[('', '-----'), ('Ohms', 'Ω'), ('mOhms', 'mΩ'), ('kOhms', 'kΩ'), ('F', 'F'), ('pF', 'pF'), ('nF', 'nF'), ('uF', 'μF'), ('V', 'V'), ('uV', 'μV'), ('mV', 'mV'), ('A', 'A'), ('uA', 'μA'), ('mA', 'mA'), ('C', '°C'), ('F', '°F'), ('H', 'H'), ('mH', 'mH'), ('uH', 'μH'), ('Hz', 'Hz'), ('kHz', 'kHz'), ('MHz', 'MHz'), ('GHz', 'GHz'), ('Other', 'Other')], default=None, max_length=5, null=True), ), migrations.AlterField( model_name='partrevision', name='wavelength_units', field=models.CharField(blank=True, choices=[('', '-----'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('A', 'Å'), ('Other', 'Other')], default=None, max_length=5, null=True), ), migrations.AlterField( model_name='partrevision', name='width_units', field=models.CharField(blank=True, choices=[('', '-----'), ('mil', 'mil'), ('in', 'in'), ('ft', 'ft'), ('yd', 'yd'), ('km', 'km'), ('m', 'm'), ('cm', 'cm'), ('mm', 'mm'), ('um', 'μm'), ('nm', 'nm'), ('Other', 'Other')], default=None, max_length=5, null=True), ), ]
gpl-3.0
-3,665,462,270,932,256,000
59.157895
442
0.453193
false
2.996068
false
false
false
fjruizruano/ngs-protocols
mitobim_run.py
1
3707
#!/usr/bin/python import sys import os from subprocess import call from Bio import SeqIO print "Usage: mitobim_run.py NumberOfReads ListOfFiles Reference [miramito/quickmito/seedmito] missmatch" try: nreads = sys.argv[1] except: nreads = raw_input("Introduce number of reads: ") try: lista = sys.argv[2] except: lista = raw_input("Introduce list of files: ") try: ref = sys.argv[3] except: ref = raw_input("Introduce Fasta file as reference: ") try: prot = sys.argv[4] except: prot = raw_input("Introduce protocol name (miramito/quickmito/seedmito): ") try: mism = sys.argv[5] except: mism = "15" manifest = """echo "\n#manifest file for basic mapping assembly with illumina data using MIRA 4\n\nproject = initial-mapping-testpool-to-Salpinus-mt\n\njob=genome,mapping,accurate\n\nparameters = -NW:mrnl=0 -AS:nop=1 SOLEXA_SETTINGS -CO:msr=no\n\nreadgroup\nis_reference\ndata = reference.fa\nstrain = Salpinus-mt-genome\n\nreadgroup = reads\ndata = reads.fastq\ntechnology = solexa\nstrain = testpool\n" > manifest.conf""" miramito = """mira manifest.conf && MITObim_1.8.pl --missmatch %s --clean -start 1 -end 1000 -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq -maf initial-mapping-testpool-to-Salpinus-mt_assembly/initial-mapping-testpool-to-Salpinus-mt_d_results/initial-mapping-testpool-to-Salpinus-mt_out.maf > log""" % mism quickmito = """MITObim_1.8.pl -start 1 -end 1000 -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq --missmatch %s --quick reference.fa --clean > log""" % mism seedmito = """MITObim_1.8.pl -sample testpool -ref Salpinus_mt_genome -readpool reads.fastq --quick reference.fa --missmatch %s -end 1000 --clean > log""" % mism miramitoout = """/testpool-Salpinus_mt_genome_assembly/testpool-Salpinus_mt_genome_d_results/testpool-Salpinus_mt_genome_out_testpool.unpadded.fasta""" pairs = open(lista).readlines() npairs = len(pairs)/2 for npair in range(0,npairs): pairone = pairs[npair*2][:-1] pairtwo = pairs[(npair*2)+1][:-1] name = "" paironesplit = pairone.split(".") if paironesplit[-1] == "gz": name = ".".join(paironesplit[0:-2]) elif paironesplit[-1] == "fastq" or paironesplit[-1] == "fq": name = ".".join(paironesplit[0:-1]) name = name[:-2] foldername = "%s_%s" % (name,prot) call("mkdir %s" % foldername , shell=True) os.chdir(foldername) print "\nStarting with " + name call("seqtk sample -s100 ../%s %s > %s" % (pairone,nreads,name+".fq.subset1"), shell=True) call("seqtk sample -s100 ../%s %s > %s" % (pairtwo,nreads,name+".fq.subset2"), shell=True) call("shuffleSequences_fastq.pl %s %s %s" % (name+".fq.subset1",name+".fq.subset2",name+".shuffled.fastq"), shell=True) call("ln -sf %s reads.fastq" % (name+".shuffled.fastq"), shell=True) call("ln -sf ../%s reference.fa" % ref, shell=True) if prot == "miramito": call(manifest, shell=True) call(miramito, shell=True) elif prot == "quickmito": call(quickmito, shell=True) elif prot == "seedmito": call(seedmito, shell=True) else: break list_dir = os.listdir(".") list_dir.sort() iterations = [] for dir in list_dir: if dir.startswith("iteration"): iterations.append(dir) os.chdir("../") consensus = "%s/%s" % (foldername,iterations[-1]+miramitoout) secus = SeqIO.parse(open(consensus), "fasta") out = open("%s_%s.fa" % (name,prot), "w") i = 0 for secu in secus: i+=1 s = str(secu.seq) s = s.replace("x","n") out.write(">%s_%s_%s\n%s\n" % (name,prot,i, s)) out.close() print name + " finalized!!!"
gpl-3.0
8,431,332,318,362,962,000
37.216495
423
0.647694
false
2.827613
true
false
false
eboreapps/Scikit-Learn-Playground
ScikitlearnPlayground/CrossValidationKNN.py
1
1171
#Copyright 2016 EBORE APPS (http://www.eboreapps.com) #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at #http://www.apache.org/licenses/LICENSE-2.0 #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. from sklearn.cross_validation import cross_val_score from sklearn.neighbors import KNeighborsClassifier from sklearn.datasets import load_iris import matplotlib.pyplot as plt iris = load_iris() X = iris.data y = iris.target k_range = range(1, 50) k_scores = [] for k in k_range: knn = KNeighborsClassifier(n_neighbors=k) scores = cross_val_score(knn, X, y, scoring='accuracy', cv=10) k_scores.append(scores.mean()) # visualize plt.plot(k_range, k_scores) plt.xlabel("Value of K for KNN") plt.ylabel("Cross-Validated accuracy") plt.show() # best model is with n_neighbors = 20
apache-2.0
-6,109,181,488,564,701,000
27.585366
73
0.760034
false
3.384393
false
false
false
skosukhin/spack
var/spack/repos/builtin/packages/everytrace/package.py
1
2163
############################################################################## # Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC. # Produced at the Lawrence Livermore National Laboratory. # # This file is part of Spack. # Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved. # LLNL-CODE-647188 # # For details, see https://github.com/spack/spack # Please also see the NOTICE and LICENSE files for our notice and the LGPL. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License (as # published by the Free Software Foundation) version 2.1, February 1999. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and # conditions of the GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public # License along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ############################################################################## from spack import * class Everytrace(CMakePackage): """Get stack trace EVERY time a program exits.""" homepage = "https://github.com/citibeth/everytrace" url = "https://github.com/citibeth/everytrace/tarball/0.2.0" version('0.2.0', '2af0e5b6255064d5191accebaa70d222') version('develop', git='https://github.com/citibeth/everytrace.git', branch='develop') variant('mpi', default=True, description='Enables MPI parallelism') variant('fortran', default=True, description='Enable use with Fortran programs') depends_on('mpi', when='+mpi') def cmake_args(self): spec = self.spec return [ '-DUSE_MPI=%s' % ('YES' if '+mpi' in spec else 'NO'), '-DUSE_FORTRAN=%s' % ('YES' if '+fortran' in spec else 'NO')] def setup_environment(self, spack_env, run_env): run_env.prepend_path('PATH', join_path(self.prefix, 'bin'))
lgpl-2.1
4,433,843,952,145,421,000
41.411765
79
0.652797
false
3.8625
false
false
false
platsch/OctoPNP
octoprint_OctoPNP/ImageProcessing.py
1
11846
# -*- coding: utf-8 -*- """ This file is part of OctoPNP OctoPNP is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. OctoPNP is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with OctoPNP. If not, see <http://www.gnu.org/licenses/>. Main author: Florens Wasserfall <wasserfall@kalanka.de> """ import cv2 import numpy as np import math import os import shutil class ImageProcessing: def __init__(self, box_size, bed_cam_binary_thresh, head_cam_binary_thresh): self.box_size=box_size self.bed_binary_thresh = bed_cam_binary_thresh self.head_binary_thresh = head_cam_binary_thresh #self.lower_mask_color = np.array([22,28,26]) # green default #self.upper_mask_color = np.array([103,255,255]) self.lower_mask_color = np.array([0,85,76]) self.upper_mask_color = np.array([100,255,255]) self._img_path = "" self._last_saved_image_path = None self._last_error = "" self._interactive=False self._debug = True # Locates a part in a box. Box size must be given to constructor. Image must contain only # one box with white background. # Returns displacement with respect to the center of the box if a part is detected, False otherwise. # boolean relative_to_camera sets wether the offset should be relative to the box or to the camera. #=================================================================================================== def locatePartInBox(self,img_path, relative_to_camera): result = False self._img_path = img_path # open image file img=cv2.imread(img_path,cv2.IMREAD_COLOR) #detect box boundaries rotated_crop_rect = self._rotatedBoundingBox(img, self.head_binary_thresh, 0.6, 0.95) if(rotated_crop_rect): rotated_box = cv2.boxPoints(rotated_crop_rect) left_x = int(min(rotated_box[0][0],rotated_box[1][0])) right_x = int(max(rotated_box[2][0],rotated_box[3][0])) upper_y = int(min(rotated_box[1][1],rotated_box[2][1])) lower_y = int(max(rotated_box[0][1],rotated_box[3][1])) # workaround for bounding boxes that are bigger then the image if(left_x < 0): left_x = 0 if(upper_y < 0): upper_y = 0 if(right_x < 0): right_x = img.shape[1] if(lower_y < 0): lower_y = img.shape[0] #Crop image img_crop=img[upper_y:lower_y, left_x:right_x] # now find part inside the box cm_rect = self._rotatedBoundingBox(img_crop, self.head_binary_thresh, 0.001, 0.7) if(cm_rect): cm_x = cm_rect[0][0] cm_y = cm_rect[0][1] res_x = img_crop.shape[1] res_y = img_crop.shape[0] displacement_x=(cm_x-res_x/2)*self.box_size/res_x displacement_y=((res_y-cm_y)-res_y/2)*self.box_size/res_y if relative_to_camera: #incorporate the position of the tray box in relation to the image displacement_x += (left_x - (img.shape[1]-right_x))/2 * self.box_size/res_x displacement_y -= (upper_y - (img.shape[0]-(lower_y)))/2 * self.box_size/res_y result = displacement_x,displacement_y # Generate result image and return cv2.circle(img_crop,(int(cm_x),int(cm_y)), 5, (0,255,0), -1) filename="/finalcm_"+os.path.basename(self._img_path) finalcm_path=os.path.dirname(self._img_path)+filename cv2.imwrite(finalcm_path,img_crop) self._last_saved_image_path = finalcm_path if self._interactive: cv2.imshow("Part in box: ",img_crop) if self._interactive: cv2.waitKey(0) else: self._last_error = "Unable to find part in box" else: self._last_error = "Unable to locate box" return result # Get part orientation by computing a rotated bounding box around contours # and determining the main orientation of this box # Returns the angle of main edges relativ to the # next main axis [-45°:45°] def getPartOrientation(self,img_path, pxPerMM, offset=0): self._img_path = img_path result = False # open image file img=cv2.imread(img_path,cv2.IMREAD_COLOR) mask = self._maskBackground(img) # we should use actual object size here min_area_factor = pxPerMM**2 / (img.shape[1] * img.shape[0]) # 1mm² rect = self._rotatedBoundingBox(img, 50, 0.005, 0.7, mask) if(rect): # draw rotated bounding box for visualization box = cv2.boxPoints(rect) box = np.int0(box) cv2.drawContours(img,[box],0,(0,0,255),2) # compute rotation offset rotation = rect[2] - offset # normalize to positive PI range if rotation < 0: rotation = (rotation % -180) + 180 rotation = rotation % 90 result = -rotation if rotation < 45 else 90-rotation if self._debug: print("Part deviation measured by bed camera: " + str(result)) else: self._last_error = "Unable to locate part for finding the orientation" if self._debug: print(self._last_error) result = False if self._interactive: cv2.imshow("contours",img) if self._interactive: cv2.waitKey(0) #save result as image for GUI filename="/orientation_"+os.path.basename(self._img_path) orientation_img_path=os.path.dirname(self._img_path)+filename cv2.imwrite(orientation_img_path, img) self._last_saved_image_path = orientation_img_path return result # Find the position of a (already rotated) part. Returns the offset between the # center of the image and the parts center of mass, 0,0 if no part is detected. #============================================================================== def getPartPosition(self, img_path, pxPerMM): self._img_path = img_path result = False # open image file img=cv2.imread(img_path,cv2.IMREAD_COLOR) mask = self._maskBackground(img) res_x = img.shape[1] res_y = img.shape[0] # we should use actual object size here min_area_factor = pxPerMM**2 / (res_x * res_y) # 1mm² rect = self._rotatedBoundingBox(img, 50, min_area_factor, 0.7, mask) if(rect): cm_x = rect[0][0] cm_y = rect[0][1] displacement_x=(cm_x-res_x/2)/pxPerMM displacement_y=((res_y-cm_y)-res_y/2)/pxPerMM result = [displacement_x, -displacement_y] else: self._last_error = "Unable to locate part for correcting the position" if self._debug: print(self._last_error) result = False # write image for UI cv2.circle(img,(int(cm_x),int(cm_y)),5,(0,255,0),-1) filename="/final_"+os.path.basename(self._img_path) final_img_path=os.path.dirname(self._img_path)+filename cv2.imwrite(final_img_path,img) self._last_saved_image_path = final_img_path if self._interactive: cv2.imshow("Center of Mass",img) if self._interactive: cv2.waitKey(0) return result #============================================================================== def getLastSavedImagePath(self): if self._last_saved_image_path: return self._last_saved_image_path else: return False #============================================================================== def getLastErrorMessage(self): return self._last_error #============================================================================== def _rotatedBoundingBox(self, img, binary_thresh, min_area_factor, max_area_factor, binary_img = ()): result = False if (len(binary_img) == 0): #convert image to grey and blur gray_img=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY) gray_img=cv2.blur(gray_img, (3,3)) ret, binary_img = cv2.threshold(gray_img, binary_thresh, 255, cv2.THRESH_BINARY) # depending on the OpenCV Version findContours returns 2 or 3 objects... #contours, hierarchy = cv2.findContours(binary_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, (0, 0)); contours = cv2.findContours(binary_img, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE, offset=(0, 0))[0] #cv2.drawContours(img, contours, -1, (0,255,0), 3) # draw basic contours minArea = binary_img.shape[0] * binary_img.shape[1] * min_area_factor; # how to find a better value??? input from part description? maxArea = binary_img.shape[0] * binary_img.shape[1] * max_area_factor # Y*X | don't detect full image rectPoints = []; for contour in contours: rect = cv2.minAreaRect(contour) rectArea = rect[1][0] * rect[1][1] if(rectArea > minArea and rectArea < maxArea): box = cv2.boxPoints(rect) for point in box: rectPoints.append(np.array(point, dtype=np.int32)) if self._interactive: box = np.int0(box) if self._interactive: cv2.drawContours(img,[box],0,(0,0,255),2) #cv2.imshow("contours",binary_img) #cv2.waitKey(0) if self._interactive: cv2.imshow("Binarized image",binary_img) if self._interactive: cv2.waitKey(0) if self._interactive: cv2.imshow("contours",img) if self._interactive: cv2.waitKey(0) if (len(rectPoints) >= 4): rectArray = np.array(rectPoints) rect = cv2.minAreaRect(rectArray) # draw rotated bounding box for visualization box = cv2.boxPoints(rect) box = np.int0(box) cv2.drawContours(img,[box],0,(0,0,255),2) result = rect else: self._last_error = "Unable to find contour in image" return result # Compute a binary image / mask by removing all pixels in the given color range # mask_corners: remove all pixels outside a circle touching the image boundaries # to crop badly illuminated corners #============================================================================== def _maskBackground(self, img, mask_corners = True): h,w,c = np.shape(img) blur_img=cv2.blur(img, (5,5)) hsv = cv2.cvtColor(blur_img, cv2.COLOR_BGR2HSV) lower_color = np.array([22,28,26]) upper_color = np.array([103,255,255]) # create binary mask by finding background color range mask = cv2.inRange(hsv, self.lower_mask_color, self.upper_mask_color) # remove the corners from mask since they are prone to illumination problems if(mask_corners): circle_mask = np.zeros((h, w), np.uint8) circle_mask[:, :] = 255 cv2.circle(circle_mask,(int(w/2), int(h/2)), min(int(w/2), int(h/2)), 0, -1) mask = cv2.bitwise_or(mask,circle_mask) # invert mask to get white objects on black background #inverse_mask = 255 - mask if self._interactive: cv2.imshow("binary mask", mask) if self._interactive: cv2.waitKey(0) return mask
agpl-3.0
-1,268,269,883,371,438,300
39.006757
139
0.574565
false
3.588485
false
false
false
pgmillon/ansible
lib/ansible/modules/database/postgresql/postgresql_ping.py
1
3674
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2018, Andrew Klychkov (@Andersson007) <aaklychkov@mail.ru> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type ANSIBLE_METADATA = { 'metadata_version': '1.1', 'status': ['preview'], 'supported_by': 'community' } DOCUMENTATION = r''' --- module: postgresql_ping short_description: Check remote PostgreSQL server availability description: - Simple module to check remote PostgreSQL server availability. version_added: '2.8' options: db: description: - Name of a database to connect to. type: str aliases: - login_db author: - Andrew Klychkov (@Andersson007) extends_documentation_fragment: postgres ''' EXAMPLES = r''' # PostgreSQL ping dbsrv server from the shell: # ansible dbsrv -m postgresql_ping # In the example below you need to generate certificates previously. # See https://www.postgresql.org/docs/current/libpq-ssl.html for more information. - name: PostgreSQL ping dbsrv server using not default credentials and ssl postgresql_ping: db: protected_db login_host: dbsrv login_user: secret login_password: secret_pass ca_cert: /root/root.crt ssl_mode: verify-full ''' RETURN = r''' is_available: description: PostgreSQL server availability. returned: always type: bool sample: true server_version: description: PostgreSQL server version. returned: always type: dict sample: { major: 10, minor: 1 } ''' try: from psycopg2.extras import DictCursor except ImportError: # psycopg2 is checked by connect_to_db() # from ansible.module_utils.postgres pass from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.postgres import ( connect_to_db, exec_sql, get_conn_params, postgres_common_argument_spec, ) # =========================================== # PostgreSQL module specific support methods. # class PgPing(object): def __init__(self, module, cursor): self.module = module self.cursor = cursor self.is_available = False self.version = {} def do(self): self.get_pg_version() return (self.is_available, self.version) def get_pg_version(self): query = "SELECT version()" raw = exec_sql(self, query, add_to_executed=False)[0][0] if raw: self.is_available = True raw = raw.split()[1].split('.') self.version = dict( major=int(raw[0]), minor=int(raw[1]), ) # =========================================== # Module execution. # def main(): argument_spec = postgres_common_argument_spec() argument_spec.update( db=dict(type='str', aliases=['login_db']), ) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=True, ) # Set some default values: cursor = False db_connection = False result = dict( changed=False, is_available=False, server_version=dict(), ) conn_params = get_conn_params(module, module.params) db_connection = connect_to_db(module, conn_params, fail_on_conn=False) if db_connection is not None: cursor = db_connection.cursor(cursor_factory=DictCursor) # Do job: pg_ping = PgPing(module, cursor) if cursor: # If connection established: result["is_available"], result["server_version"] = pg_ping.do() db_connection.rollback() module.exit_json(**result) if __name__ == '__main__': main()
gpl-3.0
6,587,041,869,324,349,000
23.657718
92
0.633642
false
3.699899
false
false
false
chrisspen/homebot
src/test/google_speech/vad_example.py
1
3455
#!/usr/bin/env python import collections import contextlib import sys import wave import webrtcvad def read_wave(path): with contextlib.closing(wave.open(path, 'rb')) as wf: num_channels = wf.getnchannels() assert num_channels == 1 sample_width = wf.getsampwidth() assert sample_width == 2 sample_rate = wf.getframerate() assert sample_rate in (8000, 16000, 32000), 'Sample rate is: %s' % sample_rate pcm_data = wf.readframes(wf.getnframes()) return pcm_data, sample_rate def write_wave(path, audio, sample_rate): with contextlib.closing(wave.open(path, 'wb')) as wf: wf.setnchannels(1) wf.setsampwidth(2) wf.setframerate(sample_rate) wf.writeframes(audio) class Frame(object): def __init__(self, bytes, timestamp, duration): self.bytes = bytes self.timestamp = timestamp self.duration = duration def frame_generator(frame_duration_ms, audio, sample_rate): n = int(sample_rate * (frame_duration_ms / 1000.0) * 2) offset = 0 timestamp = 0.0 duration = (float(n) / sample_rate) / 2.0 while offset + n < len(audio): yield Frame(audio[offset:offset + n], timestamp, duration) timestamp += duration offset += n def vad_collector(sample_rate, frame_duration_ms, padding_duration_ms, vad, frames): num_padding_frames = int(padding_duration_ms / frame_duration_ms) ring_buffer = collections.deque(maxlen=num_padding_frames) triggered = False voiced_frames = [] for frame in frames: sys.stdout.write( '1' if vad.is_speech(frame.bytes, sample_rate) else '0') if not triggered: ring_buffer.append(frame) num_voiced = len([f for f in ring_buffer if vad.is_speech(f.bytes, sample_rate)]) if num_voiced > 0.9 * ring_buffer.maxlen: sys.stdout.write('+(%s)' % (ring_buffer[0].timestamp,)) triggered = True voiced_frames.extend(ring_buffer) ring_buffer.clear() else: voiced_frames.append(frame) ring_buffer.append(frame) num_unvoiced = len([f for f in ring_buffer if not vad.is_speech(f.bytes, sample_rate)]) if num_unvoiced > 0.9 * ring_buffer.maxlen: sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) triggered = False yield b''.join([f.bytes for f in voiced_frames]) ring_buffer.clear() voiced_frames = [] if triggered: sys.stdout.write('-(%s)' % (frame.timestamp + frame.duration)) sys.stdout.write('\n') if voiced_frames: yield b''.join([f.bytes for f in voiced_frames]) def main(args): if len(args) != 2: sys.stderr.write( 'Usage: example.py <aggressiveness> <path to wav file>\n') sys.exit(1) audio, sample_rate = read_wave(args[1]) vad = webrtcvad.Vad(int(args[0])) frames = frame_generator(30, audio, sample_rate) frames = list(frames) segments = vad_collector(sample_rate, 30, 300, vad, frames) for i, segment in enumerate(segments): path = 'chunk-%002d.wav' % (i,) print(' Writing %s' % (path,)) write_wave(path, segment, sample_rate) if __name__ == '__main__': main(sys.argv[1:])
mit
2,947,711,968,839,872,500
33.207921
86
0.582923
false
3.561856
false
false
false
dhanababum/dj-wkhtmltopdf
djwkhtmltopdf/url_helper.py
1
2993
import re from django.core.exceptions import ViewDoesNotExist from django.core.urlresolvers import RegexURLPattern, RegexURLResolver from django.conf import settings def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None): """ Return a list of views from a list of urlpatterns. Each object in the returned list is a two-tuple: (view_func, regex) :param: urlpatterns: It will take url patterns. :type: list :param: base: string :type: base: str or unicode :param: namespace: namespace will doesn't allow collision. :type: namespace: str or unicode :returns: (view_func, regex) :raise: ViewDoesNotExist: if view doesn't exists :raise: ImportError: I can't help you """ views = [] for p in urlpatterns: if isinstance(p, RegexURLPattern): try: if not p.name: name = p.name elif namespace: name = '{0}:{1}'.format(namespace, p.name) else: name = p.name views.append((p.callback, base + p.regex.pattern, name)) except ViewDoesNotExist: continue elif isinstance(p, RegexURLResolver): try: patterns = p.url_patterns # Hi there except ImportError: continue views.extend(extract_views_from_urlpatterns( patterns, base + p.regex.pattern, namespace=(namespace or p.namespace))) elif hasattr(p, '_get_callback'): try: views.append( (p._get_callback(), base + p.regex.pattern, p.name)) except ViewDoesNotExist: continue elif hasattr(p, 'url_patterns') or hasattr(p, '_get_url_patterns'): try: patterns = p.url_patterns except ImportError: continue views.extend(extract_views_from_urlpatterns( patterns, base + p.regex.pattern, namespace=namespace)) else: raise TypeError("%s does not appear to be a urlpattern object" % p) return views def get_all_views(): """ Collecting all views from top level project """ views = [] try: urlconf = __import__(settings.ROOT_URLCONF, {}, {}, ['']) except Exception as e: print(e) pass view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns) for (func, regex, url_name) in view_functions: if hasattr(func, '__name__'): func_name = func.__name__ elif hasattr(func, '__class__'): func_name = '%s()' % func.__class__.__name__ else: func_name = re.sub(r' at 0x[0-9a-f]+', '', repr(func)) module = '{0}.{1}'.format(func.__module__, func_name) views.append((module, module)) return views
bsd-2-clause
-2,542,504,369,017,222,000
33.802326
79
0.546275
false
4.460507
false
false
false
abdelhalim/gits3
src/gits3/amazon_s3_transport.py
1
5828
# Copyright (C) 2009 Abdelhalim Ragab <abdelhalim@gmail.com> # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; version 3 # of the License or (at your option) any later version of # the License. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. ''' @author: abdelhalim ''' from boto.s3.connection import S3Connection from boto.s3.key import Key from git_config import GitConfig import re import os class S3Transport(object): URL_PATTERN = re.compile( r'(?P<protocol>[^:]+)://' r'(?P<config>[^@]+)@' r'(?P<bucket>[^/]+)/' r'(?P<prefix>.*)' ) def __init__(self, url): self.url = url o = self.URL_PATTERN.match(self.url) if o: bucket_name = o.group('bucket') self.prefix = o.group('prefix') if self.prefix.endswith('/'): self.prefix = self.prefix[:-1] # read the jgit config file to access S3 config_file = o.group('config') homedir = os.path.expanduser('~') config_path = homedir + '/' + config_file # print config_path props = self.open_properties(config_path) accesskey = props['accesskey'] secretkey = props['secretkey'] # print 'accesskey=',accesskey # print 'secretkey=',secretkey self.s3Conn = S3Connection(accesskey,secretkey) self.bucket = self.s3Conn.get_bucket(bucket_name, False) # print self.bucket def open_properties(self, properties_file): propFile= file( properties_file, "rU" ) propDict= dict() for propLine in propFile: propDef= propLine.strip() if len(propDef) == 0: continue if propDef[0] in ( '!', '#' ): continue punctuation= [ propDef.find(c) for c in ':= ' ] + [ len(propDef) ] found= min( [ pos for pos in punctuation if pos != -1 ] ) name= propDef[:found].rstrip() value= propDef[found:].lstrip(":= ").rstrip() propDict[name]= value propFile.close() return propDict def upload_pack(self, file_name): pack_full_path = self.prefix + '/objects/pack/' self.upload_file(pack_full_path, file_name) def upload_file(self, prefix, file_name): new_key = self.bucket.new_key(prefix + file_name) new_key.set_contents_from_file(open(file_name)) new_key.set_acl('public-read') pass def upload_string(self, path, contents): key_path = self.prefix + '/' + path key = self.bucket.get_key(key_path) if not key: key = self.bucket.new_key(key_path) key.set_contents_from_string(contents) key.set_acl('public-read') def get_pack_names(self): if self.bucket: path = self.prefix + '/objects/pack' keys = self.bucket.list(path) packs = [] for key in keys: if key.name.endswith('.pack'): if key.name.startswith(path): packs.append(key.name[len(path)+1:len(key.name)]) return packs def get_advertised_refs(self): refs = {} if self.bucket: # get loose refs path = self.prefix + '/refs' keys = self.bucket.list(path) for key in keys: name = key.name[len(self.prefix + '/'):] s = key.get_contents_as_string() ref = self.get_ref(s, refs) refs[name] = {name:ref} # read HEAD path = self.prefix + '/HEAD' key = self.bucket.get_key(path) if key: s = key.get_contents_as_string() ref = self.get_ref(s, refs) refs['HEAD'] = {'HEAD':ref} return refs def get_ref(self, s, refs): if s.startswith('ref: '): target = s[len('ref: '):] target = target.strip() try: target_ref = refs[target] except KeyError: target_ref = None if target_ref: return target_ref[target] return s def create_new_repo(self, refs): if self.bucket: # .git/config file config_str = '[core]\n' + '\trepositoryformatversion = 0\n' key = self.bucket.new_key(self.prefix + '/config') key.set_contents_from_string(config_str) key.set_acl('public-read') # .git/HEAD if refs.startswith('refs/heads'): head_str = 'ref: ' + refs + '\n' else: head_str = 'refs: refs/heads/' + refs + '\n' key = self.bucket.new_key(self.prefix + '/HEAD') key.set_contents_from_string(head_str) key.set_acl('public-read')
gpl-2.0
1,939,522,590,412,960,500
31.20442
78
0.498284
false
4.098453
true
false
false
CoderDojoPL/minecraft-python
rzeka.py
1
1568
# -*- coding: utf-8 -*- # Autor: Wojtek Gembalczyk w.gembalczyk@coderdojo.org.pl import mcpi.minecraft as minecraft import mcpi.block as block import time import math def woda3x3(x, z): mc.setBlocks(x, 0, z-6, x, 2, z+6, block.AIR.id) #powietrze nad rzeką mc.setBlocks(x, 0, z-5, x, 0, z+5, block.DIRT.id) #waly powodziowe mc.setBlocks(x, 0, z-4, x, 1, z+4, block.DIRT.id) #waly powodziowe mc.setBlocks(x, -1, z-2, x, 1, z+2, block.WATER.id) #woda def fragment_mostu(x, y, z): mc.setBlocks(x-2, y, z, x+2, y+1, z, block.GLASS.id) mc.setBlocks(x-1, y, z, x+1, y, z, block.STONE.id) mc.setBlocks(x-1, y+1, z, x+1, y+1, z, block.AIR.id) def most(x, z): fragment_mostu(x, 0, z-10) fragment_mostu(x, 1, z-9) fragment_mostu(x, 2, z-8) for i in range(-7, 8): fragment_mostu(x, 3, z+i) fragment_mostu(x, 2, z+8) fragment_mostu(x, 1, z+9) fragment_mostu(x, 0, z+10) mc = minecraft.Minecraft.create() dlugosc = 100 mosty_fr = 40 mc.setBlocks(-50, 0, 0, dlugosc+50, 50, 100, block.AIR.id) mc.setBlocks(-50, -5, 0, dlugosc+50, -1, 100, block.DIRT.id) time.sleep(2) mc.setBlocks(-2, -1, 45, -1, 1, 55, block.DIRT.id) for i in range(0, dlugosc*10, 7): z = float(i)/100 woda3x3(int(z*10), int(math.sin(z)*7+50)) time.sleep(0.02) if i%(7*mosty_fr)==0 and i<>0: most(int(z*10), int(math.sin(z)*7+50)) ostatni_x = int(z*10) ostatni_z = int(math.sin(z)*7+50) mc.setBlocks(ostatni_x-1, -1, ostatni_z-5, ostatni_x, 1, ostatni_z+5, block.DIRT.id)
gpl-2.0
-6,279,255,793,626,819,000
29.72549
84
0.596682
false
2.035065
false
false
false
EventTeam/beliefs
src/beliefs/referent.py
1
4770
""" This file defines two classes, TaxonomyCell and Referent, which work in tandem to provide the 'kind' property to all referents, and the generalization structure. The generalization structure (a taxonomy-- a directed acyclic graph of IS-A relationships) is automatically constructed using the object-oriented inheritance structures of classes that inherit from Referents. Referent is a sub-class of DictCell and contains an instance of TaxonomyCell, which is initialized to the base class of whatever subclasses Referent. In the file that loads all Referent subclasses, usually __init__.py, after all classes are loaded, there must be a special call to initialize TaxonomyCell's domain: >> import sys >> from beliefs.referent import * >> >> TaxonomyCell.initialize(sys.modules[__name__]) """ import inspect import networkx as nx import numpy as np from beliefs.cells import * class TaxonomyCell(PartialOrderedCell): """ A taxonomy of all DictCell subclasses.""" def __init__(self, initial_value=None): if not self.has_domain(): # only initialize once #raise Exception("TaxonomyCell.initialize(sys.modules[__name__]) must be called after importing classes") print "initializing" # represents IS-A relationships PartialOrderedCell.__init__(self, None) if initial_value: self.merge(initial_value) @classmethod def initialize(clz, modules): taxonomy = TaxonomyCell.build_class_graph(modules) clz.set_domain(taxonomy) @staticmethod def build_class_graph(modules, klass=None, graph=None): """ Builds up a graph of the DictCell subclass structure """ if klass is None: class_graph = nx.DiGraph() for name, classmember in inspect.getmembers(modules, inspect.isclass): if issubclass(classmember, Referent) and classmember is not Referent: TaxonomyCell.build_class_graph(modules, classmember, class_graph) return class_graph else: parents = getattr(klass, '__bases__') for parent in parents: if parent != Referent: graph.add_edge(parent.__name__, klass.__name__) # store pointer to classes in property 'class' graph.node[parent.__name__]['class'] = parent graph.node[klass.__name__]['class'] = klass if issubclass(parent, Referent): TaxonomyCell.build_class_graph(modules, parent, graph) class Referent(DictCell): """ Thin DictCell subclass to inject the TaxonomyCell property after initialization """ def __init__(self, *args, **kwargs): DictCell.__init__(self, *args, **kwargs) self.kind = TaxonomyCell(self.__class__.__name__) self.num = IntervalCell(0, 100) @classmethod def cells_from_defaults(clz, jsonobj): """ Creates a referent instance of type `json.kind` and initializes it to default values. """ # convert strings to dicts if isinstance(jsonobj, (str, unicode)): jsonobj = json.loads(jsonobj) assert 'cells' in jsonobj, "No cells in object" domain = TaxonomyCell.get_domain() cells = [] for num, cell_dna in enumerate(jsonobj['cells']): assert 'kind' in cell_dna, "No type definition" classgenerator = domain.node[cell_dna['kind']]['class'] cell = classgenerator() cell['num'].merge(num) for attr, val in cell_dna.items(): if not attr in ['kind']: cell[attr].merge(val) cells.append(cell) return cells @classmethod def from_defaults(clz, defaults): """ Given a dictionary of defaults, ie {attribute: value}, this classmethod constructs a new instance of the class and merges the defaults""" if isinstance(defaults, (str, unicode)): defaults = json.loads(defaults) c = clz() for attribute in defaults.keys(): if attribute in c: value = defaults[attribute] c[attribute].merge(value) # in case any values were not specified, attempt to merge them with # the settings provided by clz.random() cr = clz.random() for attribute, value in cr: try: c[attribute].merge(value) except Contradiction: pass return c class Nameable(Referent): """ A referent with a name """ def __init__(self): Referent.__init__(self) self.name = NameCell()
gpl-2.0
2,732,122,672,072,562,700
36.857143
117
0.607757
false
4.360146
false
false
false
leonth/private-configs
sublime-text-3/Packages/SublimePythonIDE/server/linter.py
1
6132
# -*- coding: utf-8 -*- import sys import os import os.path sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../lib")) import _ast import pep8 import pyflakes.checker as pyflakes pyflakes.messages.Message.__str__ = ( lambda self: self.message % self.message_args ) class PyflakesLoc: """ Error location data for pyflakes. pyflakes 0.7 wants loc as {lineno, col_offset} object we ducktype it here. Apparently AST code has been upgraded in some point? Online lineno attribute is required. """ def __init__(self, lineno): self.lineno = lineno class PythonLintError(pyflakes.messages.Message): def __init__( self, filename, loc, level, message, message_args, offset=0, text=None): super(PythonLintError, self).__init__(filename, PyflakesLoc(loc)) self.level = level self.message = message self.message_args = message_args self.offset = offset if text is not None: self.text = text class Pep8Error(PythonLintError): def __init__(self, filename, loc, offset, code, text): # PEP 8 Errors are downgraded to "warnings" super(Pep8Error, self).__init__( filename, loc, 'W', '[W] PEP 8 (%s): %s', (code, text), offset=offset, text=text ) class Pep8Warning(PythonLintError): def __init__(self, filename, loc, offset, code, text): # PEP 8 Warnings are downgraded to "violations" super(Pep8Warning, self).__init__( filename, loc, 'V', '[V] PEP 8 (%s): %s', (code, text), offset=offset, text=text ) class OffsetError(PythonLintError): def __init__(self, filename, loc, text, offset): super(OffsetError, self).__init__( filename, loc, 'E', '[E] %r', (text,), offset=offset + 1, text=text ) class PythonError(PythonLintError): def __init__(self, filename, loc, text): super(PythonError, self).__init__( filename, loc, 'E', '[E] %r', (text,), text=text ) def pyflakes_check(code, encoding, filename, ignore=None): try: tree = compile(code.encode(encoding), filename, "exec", _ast.PyCF_ONLY_AST) except (SyntaxError, IndentationError) as value: msg = value.args[0] (lineno, offset, text) = value.lineno, value.offset, value.text # If there's an encoding problem with the file, the text is None. if text is None: # Avoid using msg, since for the only known case, it contains a # bogus message that claims the encoding the file declared was # unknown. if msg.startswith('duplicate argument'): arg = msg.split('duplicate argument ', 1)[1].split(' ', 1)[0] arg = arg.strip('\'"') error = pyflakes.messages.DuplicateArgument( filename, lineno, arg ) else: error = PythonError(filename, lineno, msg) else: line = text.splitlines()[-1] if offset is not None: offset = offset - (len(text) - len(line)) if offset is not None: error = OffsetError(filename, lineno, msg, offset) else: error = PythonError(filename, lineno, msg) return [error] except ValueError as e: return [PythonError(filename, 1, e.args[0])] else: # Okay, it's syntactically valid. Now check it. w = pyflakes.Checker(tree, filename, builtins=ignore) return w.messages def pep8_check(code, filename, ignore=None, max_line_length=pep8.MAX_LINE_LENGTH): messages = [] _lines = code.split('\n') if _lines: class SublimeLinterReport(pep8.BaseReport): def error(self, line_number, offset, text, check): """Report an error, according to options.""" code = text[:4] message = text[5:] if self._ignore_code(code): return if code in self.counters: self.counters[code] += 1 else: self.counters[code] = 1 self.messages[code] = message # Don't care about expected errors or warnings if code in self.expected: return self.file_errors += 1 self.total_errors += 1 if code.startswith('E'): messages.append(Pep8Error( filename, line_number, offset, code, message) ) else: messages.append(Pep8Warning( filename, line_number, offset, code, message) ) return code _ignore = ignore + pep8.DEFAULT_IGNORE.split(',') options = pep8.StyleGuide( reporter=SublimeLinterReport, ignore=_ignore).options options.max_line_length = max_line_length good_lines = [l + '\n' for l in _lines] good_lines[-1] = good_lines[-1].rstrip('\n') if not good_lines[-1]: good_lines = good_lines[:-1] try: pep8.Checker(filename, good_lines, options=options).check_all() except Exception as e: print("An exception occured when running pep8 checker: %s" % e) return messages def do_linting(lint_settings, code, encoding, filename): errors = [] if lint_settings.get("pep8", True): params = { 'ignore': lint_settings.get('pep8_ignore', []), 'max_line_length': lint_settings.get( 'pep8_max_line_length', None) or pep8.MAX_LINE_LENGTH, } errors.extend(pep8_check( code, filename, **params) ) pyflakes_ignore = lint_settings.get('pyflakes_ignore', None) pyflakes_disabled = lint_settings.get('pyflakes_disabled', False) if not pyflakes_disabled: errors.extend(pyflakes_check(code, encoding, filename, pyflakes_ignore)) return errors
mit
9,175,192,041,530,635,000
29.81407
83
0.558056
false
4.015717
false
false
false
cedriclaunay/gaffer
python/GafferImageTest/MergeTest.py
1
6664
########################################################################## # # Copyright (c) 2013, Image Engine Design Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are # met: # # * Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # # * Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials provided with # the distribution. # # * Neither the name of John Haddon nor the names of # any other contributors to this software may be used to endorse or # promote products derived from this software without specific prior # written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS # IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR # PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF # LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ########################################################################## import unittest import IECore import GafferImage import os class MergeTest( unittest.TestCase ) : rPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/redWithDataWindow.100x100.exr" ) gPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/greenWithDataWindow.100x100.exr" ) bPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/blueWithDataWindow.100x100.exr" ) checkerPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/checkerboard.100x100.exr" ) checkerRGBPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgbOverChecker.100x100.exr" ) rgbPath = os.path.expandvars( "$GAFFER_ROOT/python/GafferTest/images/rgb.100x100.exr" ) # Do several tests to check the cache is working correctly: def testHashes( self ) : r1 = GafferImage.ImageReader() r1["fileName"].setValue( self.checkerPath ) r2 = GafferImage.ImageReader() r2["fileName"].setValue( self.gPath ) ########################################## # Test to see if the hash changes. ########################################## merge = GafferImage.Merge() merge["operation"].setValue(8) # 8 is the Enum value of the over operation. merge["in"].setInput(r1["out"]) merge["in1"].setInput(r2["out"]) h1 = merge["out"].image().hash() # Switch the inputs. merge["in1"].setInput(r1["out"]) merge["in"].setInput(r2["out"]) h2 = merge["out"].image().hash() self.assertNotEqual( h1, h2 ) ########################################## # Test to see if the hash remains the same # when the output should be the same but the # input plugs used are not. ########################################## merge = GafferImage.Merge() merge["operation"].setValue(8) # 8 is the Enum value of the over operation. expectedHash = h1 # Connect up a load of inputs ... merge["in"].setInput(r1["out"]) merge["in1"].setInput(r1["out"]) merge["in2"].setInput(r1["out"]) merge["in3"].setInput(r2["out"]) # but then disconnect two so that the result should still be the same... merge["in"].setInput( None ) merge["in2"].setInput( None ) h1 = merge["out"].image().hash() self.assertEqual( h1, expectedHash ) def testHashPassThrough( self ) : r1 = GafferImage.ImageReader() r1["fileName"].setValue( self.checkerPath ) ########################################## # Test to see if the input has is always passed # through if only the first input is connected. ########################################## merge = GafferImage.Merge() merge["operation"].setValue(8) # 8 is the Enum value of the over operation. expectedHash = r1["out"].image().hash() merge["in"].setInput(r1["out"]) h1 = merge["out"].image().hash() self.assertEqual( h1, expectedHash ) ########################################## # Test that if we disable the node the hash gets passed through. ########################################## merge["enabled"].setValue(False) h1 = merge["out"].image().hash() self.assertEqual( h1, expectedHash ) # Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect. def testOverRGBA( self ) : r = GafferImage.ImageReader() r["fileName"].setValue( self.rPath ) g = GafferImage.ImageReader() g["fileName"].setValue( self.gPath ) b = GafferImage.ImageReader() b["fileName"].setValue( self.bPath ) merge = GafferImage.Merge() merge["operation"].setValue(8) # 8 is the Enum value of the over operation. merge["in"].setInput(r["out"]) merge["in1"].setInput(g["out"]) merge["in2"].setInput(b["out"]) mergeResult = merge["out"].image() expected = IECore.Reader.create( self.rgbPath ).read() self.assertTrue( not IECore.ImageDiffOp()( imageA = expected, imageB = mergeResult, skipMissingChannels = False, maxError = 0.001 ).value ) # Overlay a red, green and blue tile of different data window sizes and check the data window is expanded on the result and looks as we expect. def testOverRGBAonRGB( self ) : c = GafferImage.ImageReader() c["fileName"].setValue( self.checkerPath ) r = GafferImage.ImageReader() r["fileName"].setValue( self.rPath ) g = GafferImage.ImageReader() g["fileName"].setValue( self.gPath ) b = GafferImage.ImageReader() b["fileName"].setValue( self.bPath ) merge = GafferImage.Merge() merge["operation"].setValue(8) # 8 is the Enum value of the over operation. merge["in"].setInput(c["out"]) merge["in1"].setInput(r["out"]) merge["in2"].setInput(g["out"]) merge["in3"].setInput(b["out"]) mergeResult = merge["out"].image() expected = IECore.Reader.create( self.checkerRGBPath ).read() self.assertTrue( not IECore.ImageDiffOp()( imageA = expected, imageB = mergeResult, skipMissingChannels = False, maxError = 0.001 ).value ) if __name__ == "__main__": unittest.main()
bsd-3-clause
524,376,069,707,290,940
36.438202
144
0.656212
false
3.633588
true
false
false
softcert/vsroom
vsroom/common/historian4.py
1
10451
import os import time import math import errno import marshal import collections import idiokit from idiokit import timer, xmlcore from idiokit.xmpp.jid import JID from abusehelper.core import bot, events, taskfarm, services from vsroom.common import eventdb NS = "vsr#historian" try: import json JSONDecodeError = ValueError except ImportError: import simplejson as json JSONDecodeError = getattr(json, "JSONDecodeError", ValueError) def current_time(multiplier=10**6): return time.time() * multiplier def is_valid(event): """Return whether an event contains values for keys other than "id". >>> event = events.Event() >>> is_valid(event) False >>> event.add("id", "1") >>> is_valid(event) False >>> event.add("other", "2") >>> is_valid(event) True """ contains_other = False for key in event.keys(): if key != "id": contains_other = True break return event.contains("id") and contains_other class Timeout(Exception): pass class EventDB(object): def __init__(self, filename): self.db = eventdb.Writer(filename) self.jids = dict() self.ids = dict() def commit(self): self.db.commit(current_time()) def close(self): self.db.close(current_time()) def add_event(self, jid, event): timestamp = current_time() ids = event.values("id") if not ids: obj = dict((x, list(event.values(x))) for x in event.keys()) self.db.append_obj(timestamp, timestamp, marshal.dumps(obj)) return for id in ids: copy = events.Event(event) copy.clear("id") copy.add("id", id) if is_valid(copy): obj = dict((x, list(copy.values(x))) for x in copy.keys()) self._open(timestamp, id, jid, marshal.dumps(obj)) else: self._close(timestamp, id) def purge_jid(self, jid): ids = list(self.jids.get(jid, ())) for id in ids: self._close(current_time(), id) def _close(self, timestamp, id): if id in self.ids: jid = self.ids.pop(id) ids = self.jids.get(jid, set()) ids.discard(id) if not ids: self.jids.pop(jid, None) self.db.set_obj(timestamp, id, None) def _open(self, timestamp, id, jid, obj): self._close(timestamp, id) self.ids[id] = jid self.jids.setdefault(jid, set()).add(id) self.db.set_obj(timestamp, id, obj) def query(self, start=None, end=None): start = None if start is None else start * 10**6 end = None if end is None else end * 10**6 self.commit() for start, end, obj in self.db.query(start, end): start = int(start * 10**-6) end = None if end is None else int(end * 10**-6) yield start, end, marshal.loads(obj) def histogram(self, id, h_start, h_end, step): self.commit() step = 2**max(0, int(math.ceil(math.log(step, 2)))) h_start = step * math.floor(h_start / step) h_end = step * math.ceil(h_end / step) deltas = dict() deltas[h_start] = 0 for start, end, obj in self.db.query(h_start * 10**6, h_end * 10**6): start *= 10**-6 start -= start % step deltas[start] = deltas.get(start, 0) + 1 if end is not None: end *= 10**-6 end += step - end % step deltas[end] = deltas.get(end, 0) - 1 data = list() count = 0 for time, delta in sorted(deltas.items()): count += delta if h_start <= time < h_end: if not data or data[-1]["value"] != count: data.append(dict(offset=int(time-h_start), value=count)) result = dict(id=id, start=h_start, end=h_end, step=step, values=data) element = xmlcore.Element("histogram", xmlns=NS) element.text = json.dumps(result) return element class QuerySet(object): def __init__(self): self.ids = dict() def __nonzero__(self): for query, amounts in self.ids.itervalues(): if amounts: return True return False def start(self, jid, id, query): self.ids[(jid, id)] = query, collections.deque() def load(self, jid, id, size): if (jid, id) in self.ids: query, sizes = self.ids[(jid, id)] sizes.append(size) def discard_jid(self, discarded_jid): for (jid, id) in list(self.ids): if jid == discarded_jid: del self.ids[(jid, id)] def __iter__(self): for (jid, id), (query, sizes) in list(self.ids.iteritems()): if not sizes: continue events = list() result = dict(id=id) while sizes[0] > 0 and len(events) < 10: sizes[0] -= 1 try: start, end, event_dict = query.next() except StopIteration: result.update(done=True) del self.ids[(jid, id)] break else: event_info = dict(start=start, event=event_dict) if end is not None: event_info.update(end=end) events.append(event_info) result.update(events=events) result.update(remains=sizes[0]) if sizes[0] <= 0: sizes.popleft() element = xmlcore.Element("dump", xmlns=NS) element.text = json.dumps(result) yield jid, element class HistorianService(bot.ServiceBot): bot_state_file = bot.Param() def __init__(self, bot_state_file=None, **keys): bot.ServiceBot.__init__(self, bot_state_file=None, **keys) self.rooms = taskfarm.TaskFarm(self.handle_room) self.db_dir = bot_state_file try: os.makedirs(self.db_dir) except OSError, ose: if errno.EEXIST != ose.errno: raise ose @idiokit.stream def session(self, state, src_room): try: yield self.rooms.inc(src_room) except services.Stop: idiokit.stop() @idiokit.stream def handle_room(self, name): db_file = os.path.join(self.db_dir, name) db = EventDB(db_file) try: self.log.info("Joining room %r", name) room = yield self.xmpp.muc.join(name, self.bot_name) self.log.info("Joined room %r", name) try: yield room | self.parse(db) | self.commit(db) finally: self.log.info("Left room %r", name) finally: db.close() @idiokit.stream def _timeout(self, timeout): yield timer.sleep(timeout) raise Timeout() @idiokit.stream def parse(self, db): queries = QuerySet() while True: next = idiokit.next() if queries: idiokit.pipe(self._timeout(0.0), next) try: elements = yield next except Timeout: pass else: for element in elements.with_attrs("from"): sender = JID(element.get_attr("from")) if element.named("presence").with_attrs(type="unavailable"): db.purge_jid(sender) queries.discard_jid(sender) for message in element.named("message"): if not message.with_attrs(type="groupchat"): continue for event in events.Event.from_elements(message): db.add_event(sender, event) for query in element.named("message").children(ns=NS): try: args = json.loads(query.text) except JSONDecodeError: self.log.error("Invalid query data from %r: %r", sender, query.text) continue if "id" not in args: self.log.error("Query without an ID from %r: %r", sender, args) continue id = args.get("id") if query.named("start"): start = args.get("start", None) end = args.get("end", None) queries.start(sender, id, db.query(start, end)) self.log.info("Start from %r: %r", sender, args) elif query.named("load"): if "size" in args: queries.load(sender, id, args.get("size")) self.log.debug("Load from %r: %r", sender, args) else: self.log.error("Load without an ID from %r: %r", sender, args) elif query.named("histogram"): start = args.get("start", None) end = args.get("end", None) step = args.get("step", None) if None not in (start, end, step): element = db.histogram(id, start, end, step) self.xmpp.core.message(sender, element) self.log.debug("Histogram from %r: %r", sender, args) elif query.named("cancel"): queries.cancel(sender, id) self.log.info("Cancel from %r: %r", sender, args) for sender, element in queries: yield self.xmpp.core.message(sender, element) @idiokit.stream def commit(self, db, commit_interval=1.0): while True: yield timer.sleep(commit_interval) db.commit() if __name__ == "__main__": HistorianService.from_command_line().execute()
mit
4,675,445,567,437,754,000
31.557632
80
0.48847
false
4.121057
false
false
false
mesutcang/mongodb-word2vec-doc2vec
main.py
1
1814
# -*- encoding: utf-8 -*- from glob import glob from pymongo import MongoClient from gensim import models from sklearn.metrics.pairwise import cosine_similarity import numpy as np def fillMongo(db): """ gets the mongodb connection and fills the database. """ for index, file in enumerate(glob('./**/*.txt',recursive=True)): db.deneme.insert_one( { "id" : index + 1, "filename" : file, "class" : file.split("/")[-2], "text" : open(file, encoding="iso-8859-9").read().strip() }) def mongoDocumentsSplitted( db ): splitted_records = [] for record in db["deneme"].find(): splitted_records.extend( record["text"].split() ) return splitted_records def mongoDocuments2Sentences( db ): sentences = [] for record in db["deneme"].find(): sentence = models.doc2vec.LabeledSentence( words = record["text"].split(), tags = record["class"] ) sentences.append(sentence) return sentences def main(): """ Main application execution. """ db = MongoClient('localhost', 27017).test fillMongo(db) sentences = mongoDocumentsSplitted(db) w2v_model = models.Word2Vec(sentences, workers=4) w2v_model.save("word2vec.bin") d2v_model = models.Doc2Vec( mongoDocuments2Sentences( db ), workers=4 ) d2v_model.save("doc2vec.bin") random_records = db.deneme.aggregate( [ { "$sample": {"size": 10} } ] ) infer_vectors= [] vectors=[] for record in random_records: vectors.append(record["text"]) infer_vectors.append(np.array(d2v_model.infer_vector(record['text'].split(), alpha=0.025, min_alpha=0.025, steps=20)).reshape(-1, 1)) for i in range(len(infer_vectors)-1): print("vector1: ", vectors[i]) print("vector2: ", vectors[i+1]) print("cosine: ", cosine_similarity(infer_vectors[i], infer_vectors[i+1])) # Print out = ~0.00795774 if __name__ == "__main__": main()
gpl-3.0
2,473,236,960,632,347,600
26.074627
135
0.675854
false
2.944805
false
false
false
HewlettPackard/python-hpOneView
tests/unit/resources/networking/test_connection_templates.py
2
3312
# -*- coding: utf-8 -*- ### # (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### import unittest import mock from hpOneView.connection import connection from hpOneView.resources.networking.connection_templates import ConnectionTemplates from hpOneView.resources.resource import ResourceClient class ConnectionTemplatesTest(unittest.TestCase): def setUp(self): self.host = '127.0.0.1' self.connection = connection(self.host) self._connection_templates = ConnectionTemplates(self.connection) @mock.patch.object(ResourceClient, 'get') def test_get_called_once(self, mock_get): self._connection_templates.get('7a9f7d09-3c24-4efe-928f-50a1af411120') mock_get.assert_called_once_with( '7a9f7d09-3c24-4efe-928f-50a1af411120') @mock.patch.object(ResourceClient, 'get_all') def test_get_all_called_once(self, mock_get_all): filter = 'name=TestName' sort = 'name:ascending' self._connection_templates.get_all(2, 500, filter, sort) mock_get_all.assert_called_once_with(2, 500, filter=filter, sort=sort) @mock.patch.object(ResourceClient, 'get_by') def test_get_by_called_once(self, mock_get_by): self._connection_templates.get_by( 'name', 'name1128673347-1465916352647') mock_get_by.assert_called_once_with( 'name', 'name1128673347-1465916352647') @mock.patch.object(ResourceClient, 'get') def test_get_default_called_once(self, mock_get): self._connection_templates.get_default() uri = '/rest/connection-templates/defaultConnectionTemplate' mock_get.assert_called_once_with(uri) @mock.patch.object(ResourceClient, 'update') def test_update_called_once(self, mock_update): con_template = { "type": "connection-templates", "bandwidth": { "maximumBandwidth": 10000, "typicalBandwidth": 2000 }, "name": "CT-23" } self._connection_templates.update(con_template, 70) mock_update.assert_called_once_with(con_template, timeout=70, default_values=self._connection_templates.DEFAULT_VALUES)
mit
-7,821,508,190,935,121,000
39.390244
101
0.694143
false
3.82448
true
false
false
whyflyru/django-cacheops
cacheops/query.py
1
20852
# -*- coding: utf-8 -*- import sys import json import six from funcy import select_keys, cached_property, once, once_per, monkey, wraps from funcy.py2 import mapcat, map from .cross import pickle, md5 import django from django.utils.encoding import smart_str from django.core.exceptions import ImproperlyConfigured from django.db import DEFAULT_DB_ALIAS from django.db.models import Manager, Model from django.db.models.query import QuerySet from django.db.models.sql.datastructures import EmptyResultSet from django.db.models.signals import pre_save, post_save, post_delete, m2m_changed try: from django.db.models.query import MAX_GET_RESULTS except ImportError: MAX_GET_RESULTS = None from .conf import model_profile, redis_client, handle_connection_failure, LRU, ALL_OPS from .utils import monkey_mix, get_model_name, stamp_fields, load_script, \ func_cache_key, cached_view_fab, get_thread_id, family_has_profile from .tree import dnfs from .invalidation import invalidate_obj, invalidate_dict __all__ = ('cached_as', 'cached_view_as', 'install_cacheops') _local_get_cache = {} @handle_connection_failure def cache_thing(cache_key, data, cond_dnfs, timeout): """ Writes data to cache and creates appropriate invalidators. """ load_script('cache_thing', LRU)( keys=[cache_key], args=[ pickle.dumps(data, -1), json.dumps(cond_dnfs, default=str), timeout ] ) def cached_as(*samples, **kwargs): """ Caches results of a function and invalidates them same way as given queryset. NOTE: Ignores queryset cached ops settings, just caches. """ timeout = kwargs.get('timeout') extra = kwargs.get('extra') key_func = kwargs.get('key_func', func_cache_key) # If we unexpectedly get list instead of queryset return identity decorator. # Paginator could do this when page.object_list is empty. # TODO: think of better way doing this. if len(samples) == 1 and isinstance(samples[0], list): return lambda func: func def _get_queryset(sample): if isinstance(sample, Model): queryset = sample.__class__.objects.inplace().filter(pk=sample.pk) elif isinstance(sample, type) and issubclass(sample, Model): queryset = sample.objects.all() else: queryset = sample queryset._require_cacheprofile() return queryset querysets = map(_get_queryset, samples) cond_dnfs = mapcat(dnfs, querysets) key_extra = [qs._cache_key() for qs in querysets] key_extra.append(extra) if not timeout: timeout = min(qs._cacheconf['timeout'] for qs in querysets) def decorator(func): @wraps(func) def wrapper(*args, **kwargs): cache_key = 'as:' + key_func(func, args, kwargs, key_extra) cache_data = redis_client.get(cache_key) if cache_data is not None: return pickle.loads(cache_data) result = func(*args, **kwargs) cache_thing(cache_key, result, cond_dnfs, timeout) return result return wrapper return decorator def cached_view_as(*samples, **kwargs): return cached_view_fab(cached_as)(*samples, **kwargs) class QuerySetMixin(object): @cached_property def _cacheprofile(self): profile = model_profile(self.model) if profile: self._cacheconf = profile.copy() self._cacheconf['write_only'] = False return profile @cached_property def _cloning(self): return 1000 def _require_cacheprofile(self): if self._cacheprofile is None: raise ImproperlyConfigured( 'Cacheops is not enabled for %s.%s model.\n' 'If you don\'t want to cache anything by default ' 'you can configure it with empty ops.' % (self.model._meta.app_label, get_model_name(self.model))) def _cache_key(self, extra=''): """ Compute a cache key for this queryset """ md = md5() md.update('%s.%s' % (self.__class__.__module__, self.__class__.__name__)) # Vary cache key for proxy models md.update('%s.%s' % (self.model.__module__, self.model.__name__)) # Protect from field list changes in model md.update(stamp_fields(self.model)) # Use query SQL as part of a key try: sql, params = self.query.get_compiler(self._db or DEFAULT_DB_ALIAS).as_sql() try: sql_str = sql % params except UnicodeDecodeError: sql_str = sql % map(smart_str, params) md.update(smart_str(sql_str)) except EmptyResultSet: pass # If query results differ depending on database if self._cacheprofile and not self._cacheprofile['db_agnostic']: md.update(self.db) if extra: md.update(str(extra)) # Thing only appeared in Django 1.8 and was renamed in Django 1.9 it_class = getattr(self, '_iterator_class', None) or getattr(self, '_iterable_class', None) if it_class: md.update('%s.%s' % (it_class.__module__, it_class.__name__)) # 'flat' attribute changes results formatting for values_list() in Django 1.8 and earlier if hasattr(self, 'flat'): md.update(str(self.flat)) return 'q:%s' % md.hexdigest() def _cache_results(self, cache_key, results): cond_dnfs = dnfs(self) cache_thing(cache_key, results, cond_dnfs, self._cacheconf['timeout']) def cache(self, ops=None, timeout=None, write_only=None): """ Enables caching for given ops ops - a subset of {'get', 'fetch', 'count', 'exists'}, ops caching to be turned on, all enabled by default timeout - override default cache timeout write_only - don't try fetching from cache, still write result there NOTE: you actually can disable caching by omiting corresponding ops, .cache(ops=[]) disables caching for this queryset. """ self._require_cacheprofile() if ops is None or ops == 'all': ops = ALL_OPS if isinstance(ops, str): ops = [ops] self._cacheconf['ops'] = set(ops) if timeout is not None: self._cacheconf['timeout'] = timeout if write_only is not None: self._cacheconf['write_only'] = write_only return self def nocache(self): """ Convinience method, turns off caching for this queryset """ # cache profile not present means caching is not enabled for this model if self._cacheprofile is None: return self else: return self.cache(ops=[]) def cloning(self, cloning=1000): self._cloning = cloning return self def inplace(self): return self.cloning(0) if django.VERSION >= (1, 9): def _clone(self, **kwargs): if self._cloning: return self.clone(**kwargs) else: self.__dict__.update(kwargs) return self def clone(self, **kwargs): kwargs.setdefault('_cacheprofile', self._cacheprofile) if hasattr(self, '_cacheconf'): kwargs.setdefault('_cacheconf', self._cacheconf) clone = self._no_monkey._clone(self, **kwargs) clone._cloning = self._cloning - 1 if self._cloning else 0 return clone else: def _clone(self, klass=None, setup=False, **kwargs): if self._cloning: return self.clone(klass, setup, **kwargs) elif klass is not None: # HACK: monkey patch self.query.clone for single call # to return itself instead of cloning original_query_clone = self.query.clone def query_clone(): self.query.clone = original_query_clone return self.query self.query.clone = query_clone return self.clone(klass, setup, **kwargs) else: self.__dict__.update(kwargs) return self def clone(self, klass=None, setup=False, **kwargs): kwargs.setdefault('_cacheprofile', self._cacheprofile) if hasattr(self, '_cacheconf'): kwargs.setdefault('_cacheconf', self._cacheconf) clone = self._no_monkey._clone(self, klass, setup, **kwargs) clone._cloning = self._cloning - 1 if self._cloning else 0 return clone def iterator(self): # TODO: do not cache empty queries in Django 1.6 superiter = self._no_monkey.iterator cache_this = self._cacheprofile and 'fetch' in self._cacheconf['ops'] if cache_this: cache_key = self._cache_key() if not self._cacheconf['write_only'] and not self._for_write: # Trying get data from cache cache_data = redis_client.get(cache_key) if cache_data is not None: results = pickle.loads(cache_data) for obj in results: yield obj raise StopIteration # Cache miss - fallback to overriden implementation results = [] for obj in superiter(self): if cache_this: results.append(obj) yield obj if cache_this: self._cache_results(cache_key, results) raise StopIteration def count(self): if self._cacheprofile and 'count' in self._cacheconf['ops']: # Optmization borrowed from overriden method: # if queryset cache is already filled just return its len # NOTE: there is no self._iter in Django 1.6+, so we use getattr() for compatibility if self._result_cache is not None and not getattr(self, '_iter', None): return len(self._result_cache) return cached_as(self)(lambda: self._no_monkey.count(self))() else: return self._no_monkey.count(self) def get(self, *args, **kwargs): # .get() uses the same .iterator() method to fetch data, # so here we add 'fetch' to ops if self._cacheprofile and 'get' in self._cacheconf['ops']: # NOTE: local_get=True enables caching of simple gets in local memory, # which is very fast, but not invalidated. # Don't bother with Q-objects, select_related and previous filters, # simple gets - thats what we are really up to here. if self._cacheprofile['local_get'] \ and not args \ and not self.query.select_related \ and not self.query.where.children: # NOTE: We use simpler way to generate a cache key to cut costs. # Some day it could produce same key for diffrent requests. key = (self.__class__, self.model) + tuple(sorted(kwargs.items())) try: return _local_get_cache[key] except KeyError: _local_get_cache[key] = self._no_monkey.get(self, *args, **kwargs) return _local_get_cache[key] except TypeError: # If some arg is unhashable we can't save it to dict key, # we just skip local cache in that case pass if 'fetch' in self._cacheconf['ops']: qs = self else: qs = self._clone().cache() else: qs = self return qs._no_monkey.get(qs, *args, **kwargs) if django.VERSION >= (1, 6): def exists(self): if self._cacheprofile and 'exists' in self._cacheconf['ops']: if self._result_cache is not None: return bool(self._result_cache) return cached_as(self)(lambda: self._no_monkey.exists(self))() else: return self._no_monkey.exists(self) if django.VERSION >= (1, 5): def bulk_create(self, objs, batch_size=None): objs = self._no_monkey.bulk_create(self, objs, batch_size=batch_size) if family_has_profile(self.model): for obj in objs: invalidate_obj(obj) return objs elif django.VERSION >= (1, 4): def bulk_create(self, objs): objs = self._no_monkey.bulk_create(self, objs) if family_has_profile(self.model): for obj in objs: invalidate_obj(obj) return objs def connect_first(signal, receiver, sender): old_receivers = signal.receivers signal.receivers = [] signal.connect(receiver, sender=sender) signal.receivers += old_receivers # We need to stash old object before Model.save() to invalidate on its properties _old_objs = {} class ManagerMixin(object): @once_per('cls') def _install_cacheops(self, cls): cls._cacheprofile = model_profile(cls) if family_has_profile(cls): # Set up signals connect_first(pre_save, self._pre_save, sender=cls) connect_first(post_save, self._post_save, sender=cls) connect_first(post_delete, self._post_delete, sender=cls) # Install auto-created models as their module attributes to make them picklable module = sys.modules[cls.__module__] if not hasattr(module, cls.__name__): setattr(module, cls.__name__, cls) def contribute_to_class(self, cls, name): self._no_monkey.contribute_to_class(self, cls, name) # Django 1.7+ migrations create lots of fake models, just skip them # NOTE: we make it here rather then inside _install_cacheops() # because we don't want @once_per() to hold refs to all of them. if cls.__module__ != '__fake__': self._install_cacheops(cls) def _pre_save(self, sender, instance, **kwargs): if instance.pk is not None: try: _old_objs[get_thread_id(), sender, instance.pk] = sender.objects.get(pk=instance.pk) except sender.DoesNotExist: pass def _post_save(self, sender, instance, **kwargs): # Invoke invalidations for both old and new versions of saved object old = _old_objs.pop((get_thread_id(), sender, instance.pk), None) if old: invalidate_obj(old) invalidate_obj(instance) # NOTE: it's possible for this to be a subclass, e.g. proxy, without cacheprofile, # but its base having one. Or vice versa. # We still need to invalidate in this case, but cache on save better be skipped. if not instance._cacheprofile: return # Enabled cache_on_save makes us write saved object to cache. # Later it can be retrieved with .get(<cache_on_save_field>=<value>) # <cache_on_save_field> is pk unless specified. # This sweet trick saves a db request and helps with slave lag. cache_on_save = instance._cacheprofile.get('cache_on_save') if cache_on_save: # HACK: We get this object "from field" so it can contain # some undesirable attributes or other objects attached. # RelatedField accessors do that, for example. # # So we strip down any _*_cache attrs before saving # and later reassign them unwanted_dict = select_keys(r'^_.*_cache$', instance.__dict__) for k in unwanted_dict: del instance.__dict__[k] key = 'pk' if cache_on_save is True else cache_on_save # Django doesn't allow filters like related_id = 1337. # So we just hacky strip _id from end of a key # TODO: make it right, _meta.get_field() should help filter_key = key[:-3] if key.endswith('_id') else key cond = {filter_key: getattr(instance, key)} qs = sender.objects.inplace().filter(**cond).order_by() if MAX_GET_RESULTS: qs = qs[:MAX_GET_RESULTS + 1] qs._cache_results(qs._cache_key(), [instance]) # Reverting stripped attributes instance.__dict__.update(unwanted_dict) def _post_delete(self, sender, instance, **kwargs): """ Invalidation upon object deletion. """ # NOTE: this will behave wrong if someone changed object fields # before deletion (why anyone will do that?) invalidate_obj(instance) # Django 1.5- compatability if not hasattr(Manager, 'get_queryset'): def get_queryset(self): return self.get_query_set() def inplace(self): return self.get_queryset().inplace() def cache(self, *args, **kwargs): return self.get_queryset().cache(*args, **kwargs) def nocache(self): return self.get_queryset().nocache() def invalidate_m2m(sender=None, instance=None, model=None, action=None, pk_set=None, reverse=None, **kwargs): """ Invoke invalidation on m2m changes. """ # Skip this machinery for explicit through tables, # since post_save and post_delete events are triggered for them if not sender._meta.auto_created: return if action not in ('pre_clear', 'post_add', 'pre_remove'): return m2m = next(m2m for m2m in instance._meta.many_to_many + model._meta.many_to_many if m2m.rel.through == sender) # TODO: optimize several invalidate_objs/dicts at once if action == 'pre_clear': # TODO: always use column names here once Django 1.3 is dropped instance_field = m2m.m2m_reverse_field_name() if reverse else m2m.m2m_field_name() objects = sender.objects.filter(**{instance_field: instance.pk}) for obj in objects: invalidate_obj(obj) elif action in ('post_add', 'pre_remove'): instance_column, model_column = m2m.m2m_column_name(), m2m.m2m_reverse_name() if reverse: instance_column, model_column = model_column, instance_column # NOTE: we don't need to query through objects here, # cause we already know all their meaningfull attributes. for pk in pk_set: invalidate_dict(sender, { instance_column: instance.pk, model_column: pk }) @once def install_cacheops(): """ Installs cacheops by numerous monkey patches """ monkey_mix(Manager, ManagerMixin) monkey_mix(QuerySet, QuerySetMixin) QuerySet._cacheprofile = QuerySetMixin._cacheprofile QuerySet._cloning = QuerySetMixin._cloning # DateQuerySet existed in Django 1.7 and earlier # Values*QuerySet existed in Django 1.8 and earlier from django.db.models import query for cls_name in ('ValuesQuerySet', 'ValuesListQuerySet', 'DateQuerySet'): if hasattr(query, cls_name): cls = getattr(query, cls_name) monkey_mix(cls, QuerySetMixin, ['iterator']) try: # Use app registry in Django 1.7 from django.apps import apps admin_used = apps.is_installed('django.contrib.admin') get_models = apps.get_models except ImportError: # Introspect INSTALLED_APPS in older djangos from django.conf import settings admin_used = 'django.contrib.admin' in settings.INSTALLED_APPS from django.db.models import get_models # Install profile and signal handlers for any earlier created models for model in get_models(include_auto_created=True): model._default_manager._install_cacheops(model) # Turn off caching in admin if admin_used: from django.contrib.admin.options import ModelAdmin # Renamed queryset to get_queryset in Django 1.6 method_name = 'get_queryset' if hasattr(ModelAdmin, 'get_queryset') else 'queryset' @monkey(ModelAdmin, name=method_name) def get_queryset(self, request): return get_queryset.original(self, request).nocache() # Bind m2m changed handler m2m_changed.connect(invalidate_m2m) # Make buffers/memoryviews pickleable to serialize binary field data if six.PY2: import copy_reg copy_reg.pickle(buffer, lambda b: (buffer, (bytes(b),))) if six.PY3: import copyreg copyreg.pickle(memoryview, lambda b: (memoryview, (bytes(b),)))
bsd-3-clause
-3,087,637,271,154,831,400
37.543438
100
0.590255
false
4.13484
false
false
false
timlau/dnf-daemon
daemon/dnfdaemon-session.py
1
13512
#!/usr/bin/python3 # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA # 02110-1301, USA. # (C) 2013-2014 Tim Lauridsen <timlau@fedoraproject.org> # # dnf session bus dBus service (Readonly) # from dnfdaemon.server import Logger import argparse import dbus import dbus.service import dbus.mainloop.glib import dnfdaemon.server import logging DAEMON_ORG = 'org.baseurl.DnfSession' DAEMON_INTERFACE = DAEMON_ORG logger = logging.getLogger('dnfdaemon.session') #--------------------------------------------------------------- DBus Exception class AccessDeniedError(dbus.DBusException): _dbus_error_name = DAEMON_ORG + '.AccessDeniedError' class LockedError(dbus.DBusException): _dbus_error_name = DAEMON_ORG + '.LockedError' class NotImplementedError(dbus.DBusException): _dbus_error_name = DAEMON_ORG + '.NotImplementedError' #------------------------------------------------------------------- Main class class DnfDaemon(dnfdaemon.server.DnfDaemonBase): def __init__(self): dnfdaemon.server.DnfDaemonBase.__init__(self) bus_name = dbus.service.BusName(DAEMON_ORG, bus=dbus.SessionBus()) dbus.service.Object.__init__(self, bus_name, '/') #========================================================================= # DBus Methods #========================================================================= @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='i') def GetVersion(self): ''' Get the daemon version ''' return dnfdaemon.server.API_VERSION @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='b', sender_keyword='sender') def Exit(self, sender=None): ''' Exit the daemon :param sender: ''' if self._can_quit: self.mainloop_quit() return True else: return False @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='b', sender_keyword='sender') def Lock(self, sender=None): ''' Get the yum lock :param sender: ''' if not self._lock: self._lock = sender logger.info('LOCK: Locked by : %s' % sender) return True return False @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='b', out_signature='b', sender_keyword='sender') def SetWatchdogState(self, state, sender=None): ''' Set the Watchdog state :param state: True = Watchdog active, False = Watchdog disabled :type state: boolean (b) ''' self._watchdog_disabled = not state return state @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='b', sender_keyword='sender') def ExpireCache(self, sender=None): ''' Enabled a list of repositories, disabled all other repos :param repo_ids: list of repo ids to enable :param sender: ''' self.working_start(sender) rc = self.expire_cache() return self.working_ended(rc) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='s', out_signature='as', sender_keyword='sender') def GetRepositories(self, filter, sender=None): ''' Get the value a list of repo ids :param filter: filter to limit the listed repositories :param sender: ''' self.working_start(sender) repos = self.get_repositories(filter) return self.working_ended(repos) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='as', out_signature='', sender_keyword='sender') def SetEnabledRepos(self, repo_ids, sender=None): ''' Enabled a list of repositories, disabled all other repos :param repo_ids: list of repo ids to enable :param sender: ''' self.working_start(sender) self.set_enabled_repos(repo_ids) return self.working_ended() @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='s', out_signature='s', sender_keyword='sender') def GetConfig(self, setting, sender=None): ''' Get the value of a yum config setting it will return a JSON string of the config :param setting: name of setting (debuglevel etc..) :param sender: ''' self.working_start(sender) value = self.get_config(setting) return self.working_ended(value) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='s', out_signature='s', sender_keyword='sender') def GetRepo(self, repo_id, sender=None): ''' Get information about a give repo_id the repo setting will be returned as dictionary in JSON format :param repo_id: :param sender: ''' self.working_start(sender) value = self.get_repo(repo_id) return self.working_ended(value) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='sas', out_signature='s', sender_keyword='sender') def GetPackages(self, pkg_filter, fields, sender=None): ''' Get a list of package ids, based on a package pkg_filterer :param pkg_filter: pkg pkg_filter string ('installed','updates' etc) :param sender: ''' self.working_start(sender) value = self.get_packages(pkg_filter, fields) return self.working_ended(value) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='sasb', out_signature='s', sender_keyword='sender') def GetPackagesByName(self, name, attrs, newest_only, sender=None): ''' Get a list of packages from a name pattern :param name: name pattern :param newest_only: True = get newest packages only :param attrs: list of package attributes to get :param sender: ''' self.working_start(sender) values = self.get_packages_by_name_with_attr(name, attrs, newest_only) return self.working_ended(values) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='ss', out_signature='s', sender_keyword='sender') def GetAttribute(self, id, attr, sender=None): ''' Get an attribute from a yum package id it will return a python repr string of the attribute :param id: yum package id :param attr: name of attribute (summary, size, description, changelog etc..) :param sender: ''' self.working_start(sender) value = self.get_attribute(id, attr) return self.working_ended(value) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='b', sender_keyword='sender') def Unlock(self, sender=None): ''' release the lock''' if self.check_lock(sender): logger.info('UNLOCK: Lock Release by %s' % self._lock) self._lock = None self._reset_base() return True @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='asasasbbb', out_signature='s', sender_keyword='sender') def Search(self, fields, keys, attrs, match_all, newest_only, tags, sender=None): ''' Search for for packages, where given fields contain given key words :param fields: list of fields to search in :param keys: list of keywords to search for :param attrs: list of extra attributes to get :param match_all: match all flag, if True return only packages matching all keys :param newest_only: return only the newest version of a package :param tags: seach pkgtags ''' self.working_start(sender) result = self.search_with_attr( fields, keys, attrs, match_all, newest_only, tags) return self.working_ended(result) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='', out_signature='s', sender_keyword='sender') def GetGroups(self, sender=None): ''' Return a category/group tree ''' self.working_start(sender) value = self.get_groups() return self.working_ended(value) @Logger @dbus.service.method(DAEMON_INTERFACE, in_signature='ssas', out_signature='s', sender_keyword='sender') def GetGroupPackages(self, grp_id, grp_flt, fields, sender=None): ''' Get packages in a group by grp_id and grp_flt :param grp_id: The Group id :param grp_flt: Group Filter (all or default) :param fields: list of package attributes to include in list :param sender: ''' self.working_start(sender) value = self.get_group_pkgs(grp_id, grp_flt, fields) return self.working_ended(value) # # Template for new method # # @dbus.service.method(DAEMON_INTERFACE, # in_signature='', # out_signature='', # sender_keyword='sender') # def NewMethod(self, sender=None ): # ''' # # ''' # self.working_start(sender) # value = True # return self.working_ended(value) # #========================================================================= # DBus signals #========================================================================= # Parallel Download Progress signals @dbus.service.signal(DAEMON_INTERFACE) def ErrorMessage(self, error_msg): ''' Send an error message ''' pass @dbus.service.signal(DAEMON_INTERFACE) def DownloadStart(self, num_files, num_bytes): ''' Starting a new parallel download batch ''' pass @dbus.service.signal(DAEMON_INTERFACE) def DownloadProgress(self, name, frac, total_frac, total_files): ''' Progress for a single instance in the batch ''' pass @dbus.service.signal(DAEMON_INTERFACE) def DownloadEnd(self, name, status, msg): ''' Download of af single instace ended ''' pass @dbus.service.signal(DAEMON_INTERFACE) def RepoMetaDataProgress(self, name, frac): ''' Repository Metadata Download progress ''' #========================================================================= # Helper methods #========================================================================= def working_start(self, sender): self.check_lock(sender) self._is_working = True self._watchdog_count = 0 def working_ended(self, value=None): self._is_working = False return value def check_lock(self, sender): ''' Check that the current sender is owning the yum lock :param sender: ''' if self._lock == sender: return True else: raise LockedError('dnf is locked by another application') def main(): parser = argparse.ArgumentParser(description='Yum D-Bus Session Daemon') parser.add_argument('-v', '--verbose', action='store_true') parser.add_argument('-d', '--debug', action='store_true') parser.add_argument('--notimeout', action='store_true') args = parser.parse_args() if args.verbose: if args.debug: dnfdaemon.server.doTextLoggerSetup(logroot='dnfdaemon', loglvl=logging.DEBUG) else: dnfdaemon.server.doTextLoggerSetup(logroot='dnfdaemon') # setup the DBus mainloop dbus.mainloop.glib.DBusGMainLoop(set_as_default=True) yd = DnfDaemon() if not args.notimeout: yd._setup_watchdog() yd.mainloop_run() if __name__ == '__main__': main()
gpl-2.0
1,706,098,923,312,927,200
32.864662
79
0.544627
false
4.411361
false
false
false
westpark/wallball
docs/steps/code/s2d.py
1
1630
WIDTH = 640 HEIGHT = 480 class Ball(ZRect): pass # # The ball is a red square halfway across the game screen # ball = Ball(0, 0, 30, 30) ball.center = WIDTH / 2, HEIGHT / 2 ball.colour = "red" # # The ball moves one step right and one step down each tick # ball.direction = 1, 1 # # The ball moves at a speed of 3 steps each tick # ball.speed = 3 class Bat(ZRect): pass # # The bat is a green oblong which starts just along the bottom # of the screen and halfway across. # BAT_W = 150 BAT_H = 15 bat = Bat(WIDTH / 2, HEIGHT - BAT_H, BAT_W, BAT_H) bat.colour = "green" def draw(): # # Clear the screen and place the ball at its current position # screen.clear() screen.draw.filled_rect(ball, ball.colour) screen.draw.filled_rect(bat, bat.colour) def on_mouse_move(pos): # # Make the bat follow the horizontal movement of the mouse. # x, y = pos bat.centerx = x def update(): # # Move the ball along its current direction at its current speed # dx, dy = ball.direction ball.move_ip(ball.speed * dx, ball.speed * dy) # # Bounce the ball off the bat # if ball.colliderect(bat): ball.direction = dx, -dy # # Bounce the ball off the left or right walls # if ball.right >= WIDTH or ball.left <= 0: ball.direction = -dx, dy # # If the ball hits the bottom of the screen, you lose # if ball.bottom >= HEIGHT: exit() # # Bounce the ball off the top wall # if ball.top <= 0: ball.direction = dx, -dy
mit
6,844,163,819,388,710,000
20.054054
68
0.590798
false
3.012939
false
false
false
glottobank/pycldf
src/pycldf/cli_util.py
1
2128
from clldutils.clilib import PathType from pycldf import Dataset, Database # # Copied from distutils.util - because we don't want to deal with deprecation warnings. # def strtobool(val): # pragma: no cover """Convert a string representation of truth to true (1) or false (0). True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if 'val' is anything else. """ val = val.lower() if val in ('y', 'yes', 't', 'true', 'on', '1'): return 1 elif val in ('n', 'no', 'f', 'false', 'off', '0'): return 0 else: raise ValueError("invalid truth value %r" % (val,)) class FlagOrPathType(PathType): def __call__(self, string): try: return bool(strtobool(string)) except ValueError: return super().__call__(string) def add_dataset(parser): parser.add_argument( 'dataset', metavar='DATASET', help="Dataset specification (i.e. path to a CLDF metadata file or to the data file)", type=PathType(type='file'), ) def get_dataset(args): if args.dataset.suffix == '.json': return Dataset.from_metadata(args.dataset) return Dataset.from_data(args.dataset) def add_database(parser, must_exist=True): add_dataset(parser) parser.add_argument( 'db', metavar='SQLITE_DB_PATH', help='Path to the SQLite db file', type=PathType(type='file', must_exist=must_exist), ) parser.add_argument('--infer-primary-keys', action='store_true', default=False) def get_database(args): return Database(get_dataset(args), fname=args.db, infer_primary_keys=args.infer_primary_keys) def add_catalog_spec(parser, name): parser.add_argument( '--' + name, metavar=name.upper(), type=PathType(type='dir'), help='Path to repository clone of {0} data'.format(name.capitalize())) parser.add_argument( '--{0}-version'.format(name), help='Version of {0} data to checkout'.format(name.capitalize()), default=None)
apache-2.0
8,012,565,799,063,346,000
28.555556
97
0.610902
false
3.546667
false
false
false
azumimuo/family-xbmc-addon
plugin.video.salts/scrapers/yifystreaming_scraper.py
1
4784
""" SALTS XBMC Addon Copyright (C) 2014 tknorris This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ import re import urllib import urlparse from salts_lib import dom_parser from salts_lib import kodi from salts_lib import log_utils from salts_lib import scraper_utils from salts_lib.constants import FORCE_NO_MATCH from salts_lib.constants import VIDEO_TYPES from salts_lib.constants import QUALITIES import scraper BASE_URL = 'http://yss.rocks' GK_URL = '/plugins/gkpluginsphp.php' CATEGORIES = {VIDEO_TYPES.MOVIE: 'category-movies', VIDEO_TYPES.EPISODE: 'category-tv-series'} LOCAL_USER_AGENT = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.106 Safari/537.36' class YifyStreaming_Scraper(scraper.Scraper): base_url = BASE_URL def __init__(self, timeout=scraper.DEFAULT_TIMEOUT): self.timeout = timeout self.base_url = kodi.get_setting('%s-base_url' % (self.get_name())) @classmethod def provides(cls): return frozenset([VIDEO_TYPES.MOVIE]) @classmethod def get_name(cls): return 'yify-streaming' def resolve_link(self, link): return link def format_source_label(self, item): return '[%s] %s' % (item['quality'], item['host']) def get_sources(self, video): source_url = self.get_url(video) hosters = [] if source_url and source_url != FORCE_NO_MATCH: url = urlparse.urljoin(self.base_url, source_url) html = self._http_get(url, cache_limit=.5) match = re.search('<iframe[^>]+src="([^"]+watch=([^"]+))', html) if match: iframe_url, link_id = match.groups() data = {'link': link_id} headers = {'Referer': iframe_url} headers['User-Agent'] = LOCAL_USER_AGENT gk_url = urlparse.urljoin(self.base_url, GK_URL) html = self._http_get(gk_url, data=data, headers=headers, cache_limit=.5) js_data = scraper_utils.parse_json(html, gk_url) if 'link' in js_data: if isinstance(js_data['link'], list): sources = dict((link['link'], scraper_utils.height_get_quality(link['label'])) for link in js_data['link']) direct = True else: sources = {js_data['link']: QUALITIES.HIGH} direct = False for source in sources: source = source.replace('\\/', '/') if direct: host = self._get_direct_hostname(source) else: host = urlparse.urlparse(source).hostname hoster = {'multi-part': False, 'url': source, 'class': self, 'quality': sources[source], 'host': host, 'rating': None, 'views': None, 'direct': direct} hosters.append(hoster) return hosters def get_url(self, video): return self._default_get_url(video) def search(self, video_type, title, year): search_url = urlparse.urljoin(self.base_url, '/?s=') search_url += urllib.quote_plus(title) html = self._http_get(search_url, cache_limit=.25) elements = dom_parser.parse_dom(html, 'li', {'class': '[^"]*post-\d+[^"]*'}) results = [] for element in elements: match = re.search('href="([^"]+)[^>]+>\s*([^<]+)', element, re.DOTALL) if match: url, match_title_year = match.groups() match = re.search('(.*?)(?:\s+\(?(\d{4})\)?)', match_title_year) if match: match_title, match_year = match.groups() else: match_title = match_title_year match_year = '' if not year or not match_year or year == match_year: result = {'title': match_title, 'year': match_year, 'url': scraper_utils.pathify_url(url)} results.append(result) return results
gpl-2.0
7,210,410,083,957,632,000
40.241379
175
0.567099
false
3.908497
false
false
false
ua-snap/downscale
snap_scripts/epscor_sc/compare_downscaling_versions_plots_cmip5_epscor_sc_pr_compare_withNOFIX.py
1
9093
# # # # # compare tasmin, tas, tasmax in a timeseries of GeoTiff files # # # # def transform_from_latlon( lat, lon ): ''' simple way to make an affine transform from lats and lons coords ''' from affine import Affine lat = np.asarray( lat ) lon = np.asarray(lon) trans = Affine.translation(lon[0], lat[0]) scale = Affine.scale(lon[1] - lon[0], lat[1] - lat[0]) return trans * scale def rasterize( shapes, coords, latitude='latitude', longitude='longitude', fill=None, **kwargs ): ''' Rasterize a list of (geometry, fill_value) tuples onto the given xarray coordinates. This only works for 1d latitude and longitude arrays. ''' from rasterio import features if fill == None: fill = np.nan transform = transform_from_latlon( coords[ latitude ], coords[ longitude ] ) out_shape = ( len( coords[ latitude ] ), len( coords[ longitude ] ) ) raster = features.rasterize(shapes, out_shape=out_shape, fill=fill, transform=transform, dtype=float, **kwargs) spatial_coords = {latitude: coords[latitude], longitude: coords[longitude]} return xr.DataArray(raster, coords=spatial_coords, dims=(latitude, longitude)) def sort_files( files, split_on='_', elem_month=-2, elem_year=-1 ): ''' sort a list of files properly using the month and year parsed from the filename. This is useful with SNAP data since the standard is to name files like '<prefix>_MM_YYYY.tif'. If sorted using base Pythons sort/sorted functions, things will be sorted by the first char of the month, which makes thing go 1, 11, ... which sucks for timeseries this sorts it properly following SNAP standards as the default settings. ARGUMENTS: ---------- files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob. split_on = [str] `str` character to split the filename on. default:'_', SNAP standard. elem_month = [int] slice element from resultant split filename list. Follows Python slicing syntax. default:-2. For SNAP standard. elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax. default:-1. For SNAP standard. RETURNS: -------- sorted `list` by month and year ascending. ''' import pandas as pd months = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_month]) for fn in files ] years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ] df = pd.DataFrame( {'fn':files, 'month':months, 'year':years} ) df_sorted = df.sort_values( ['year', 'month' ] ) return df_sorted.fn.tolist() def only_years( files, begin=1901, end=2100, split_on='_', elem_year=-1 ): ''' return new list of filenames where they are truncated to begin:end ARGUMENTS: ---------- files = [list] list of `str` pathnames to be sorted by month and year. usually from glob.glob. begin = [int] four digit integer year of the begin time default:1901 end = [int] four digit integer year of the end time default:2100 split_on = [str] `str` character to split the filename on. default:'_', SNAP standard. elem_year = [int] slice element from resultant split filename list. Follows Python slicing syntax. default:-1. For SNAP standard. RETURNS: -------- sliced `list` to begin and end year. ''' import pandas as pd years = [ int(os.path.basename( fn ).split('.')[0].split( split_on )[elem_year]) for fn in files ] df = pd.DataFrame( { 'fn':files, 'year':years } ) df_slice = df[ (df.year >= begin ) & (df.year <= end ) ] return df_slice.fn.tolist() def masked_mean( fn, bounds=None ): ''' get mean of the full domain since the data are already clipped mostly used for processing lots of files in parallel.''' import numpy as np import rasterio with rasterio.open( fn ) as rst: if bounds: window = rst.window( *bounds ) else: window = rst.window( *rst.bounds ) mask = (rst.read_masks( 1 ) == 0) arr = np.ma.masked_array( rst.read( 1, window=window ), mask=mask ) return np.mean( arr ) if __name__ == '__main__': import os, glob import geopandas as gpd import numpy as np import xarray as xr import matplotlib matplotlib.use( 'agg' ) from matplotlib import pyplot as plt from pathos.mp_map import mp_map import pandas as pd import geopandas as gpd # args / set working dir base_dir = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data' os.chdir( base_dir ) # scenarios = ['rcp60', 'rcp85'] scenarios = ['historical'] shp_fn = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/SCTC_studyarea/Kenai_StudyArea.shp' shp = gpd.read_file( shp_fn ) bounds = shp.bounds # models = ['5ModelAvg','CRU_TS323','GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4'] # models = ['GFDL-CM3','GISS-E2-R','IPSL-CM5A-LR','MRI-CGCM3','NCAR-CCSM4'] models = ['ts323'] variables_list = [['pr']]# ['tasmax', 'tas', 'tasmin']]#, # models = ['CRU_TS323'] # begin_end_groups = [[2016,2016],[2010,2020],[2095, 2100]] begin_end_groups = [[1916, 1916],[1950, 1960],[1995, 2000]] for scenario in scenarios: for variables in variables_list: for m in models: for begin, end in begin_end_groups: # not fully wired-up yet if m == 'ts323': old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/CRU/CRU_TS32' # begin = 1950 # end = 1965 else: if scenario == 'historical': old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/historical/AR5_CMIP5_models' # begin = 1950 # end = 1965 else: old_dir = '/Data/Base_Data/Climate/AK_CAN_2km/projected/AR5_CMIP5_models' # begin = 2095 # end = 2100 figsize = (16,9) out = {} for v in variables: path = os.path.join( base_dir,'downscaled', m, scenario, v ) print( path ) files = glob.glob( os.path.join( path, '*.tif' ) ) files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) ) out[ v ] = mp_map( masked_mean, files, nproc=4 ) if v == 'tas' or v == 'pr': if m == 'ts323': path = os.path.join( old_dir, v ) print( path ) else: path = os.path.join( old_dir, scenario, m, v ) files = glob.glob( os.path.join( path, '*.tif' ) ) files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) ) out[ v+'_old' ] = mp_map( masked_mean, files, nproc=4 ) # nofix path = os.path.join( base_dir,'downscaled_pr_nofix', m, scenario, v ) print( path ) files = glob.glob( os.path.join( path, '*.tif' ) ) files = sort_files( only_years( files, begin=begin, end=end, split_on='_', elem_year=-1 ) ) out[ v+'_nofix' ] = mp_map( masked_mean, files, nproc=4 ) plot_df = pd.DataFrame( out ) plot_df.index = pd.date_range( start=str(begin), end=str(end+1), freq='M' ) # sort the columns for output plotting cleanliness: if 'tas' in variables: col_list = ['tasmax', 'tas_old', 'tas', 'tasmin'] elif 'pr' in variables: col_list = ['pr', 'pr_old', 'pr_nofix'] plot_df = plot_df[ col_list ] # get em in the order for plotting if v == 'pr': plot_df = plot_df.round()[['pr','pr_old']] # now plot the dataframe if begin == end: title = 'EPSCoR SC AOI Temp Metrics {} {} {}'.format( m, scenario, begin ) else: title = 'EPSCoR SC AOI Temp Metrics {} {} {} - {}'.format( m, scenario, begin, end ) if 'tas' in variables: colors = ['red', 'black', 'blue', 'red' ] else: colors = [ 'blue', 'black', 'darkred' ] ax = plot_df.plot( kind='line', title=title, figsize=figsize, color=colors ) output_dir = os.path.join( base_dir, 'compare_downscaling_versions_PR_no_fix' ) if not os.path.exists( output_dir ): os.makedirs( output_dir ) # now plot the dataframe out_metric_fn = 'temps' if 'pr' in variables: out_metric_fn = 'prec' if begin == end: output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin ) ) else: output_filename = os.path.join( output_dir,'mean_{}_epscor_sc_{}_{}_{}_{}.png'.format( out_metric_fn, m, scenario, begin, end ) ) plt.savefig( output_filename, dpi=400 ) plt.close() # # # PRISM TEST VERSION DIFFERENCES # # # # # # # # import rasterio # import numpy as np # import os, glob, itertools # base_path = '/workspace/Shared/Tech_Projects/EPSCoR_Southcentral/project_data/prism/raw_prism' # variables = [ 'tmax', 'tmin' ] # for variable in variables: # ak_olds = sorted( glob.glob( os.path.join( base_path, 'prism_raw_older', 'ak', variable, '*.asc' ) ) ) # ak_news = sorted( glob.glob( os.path.join( base_path, 'prism_raw_2016', 'ak', variable, '*.asc' ) ) ) # olds = np.array([ rasterio.open( i ).read( 1 ) for i in ak_olds if '_14' not in i ]) # news = np.array([ rasterio.open( i ).read( 1 ) *.10 for i in ak_news if '_14' not in i ]) # out = olds - news # out[ (olds == -9999.0) | (news == -9999.0) ] = 0 # uniques = np.unique( out ) # uniques[ uniques > 0.01 ]
mit
-2,754,196,695,637,991,000
37.858974
135
0.634114
false
2.894017
false
false
false
Ape/sc2skills
main.py
1
2472
#!/usr/bin/env python3 import argparse import collections import enum import trueskill from ladder import Ladder DRAW_PROBABILITY = 0.001 OPPONENT_SIGMA = 0.1 Result = enum.Enum("Result", "win loss") Game = collections.namedtuple("Game", "result mmr label") def load_games(games_file): games = [] with open(games_file, "r") as f: lines = f.readlines() for line in lines: data = line.strip().split("#")[0] if data: result, mmr, label = data.split() games.append(Game(Result[result], int(mmr), label)) return games def print_board(ladder, ratings): if len(ratings) == 0: print("No ratings.") return def sort_by_score(items): return reversed(sorted(items, key=lambda x: x[1].mu)) def max_name_width(items): return max(len(x[0]) for x in items) items = ratings.items() items = sort_by_score(items) items = list(items) name_width = max_name_width(items) for name, rating in items: league = ladder.get_league(rating.mu) print("{name:{width}s} {mu:.0f} ± {sigma:.0f} ({league})" .format(name=name, width=name_width, mu=rating.mu, sigma=2*rating.sigma, league=league)) def rate(ladder, rating, game): opponent = trueskill.Rating(mu=game.mmr, sigma=OPPONENT_SIGMA * ladder.sigma) if game.result is Result.win: return trueskill.rate_1vs1(rating, opponent)[0] else: return trueskill.rate_1vs1(opponent, rating)[1] def main(): parser = argparse.ArgumentParser() parser.add_argument("region") parser.add_argument("games_file") args = parser.parse_args() try: ladder = Ladder(args.region) except KeyError: print("Error: Region '{}' not recognized".format(args.region)) return trueskill.setup(mu=ladder.mu, sigma=ladder.sigma, beta=0.5 * ladder.sigma, tau=0.01 * ladder.sigma, draw_probability=DRAW_PROBABILITY) ratings = collections.defaultdict(lambda: trueskill.Rating()) try: games = load_games(args.games_file) except OSError as e: print("Error: Cannot read the provided games file:") print(" {}".format(e)) return for game in games: ratings[game.label] = rate(ladder, ratings[game.label], game) print_board(ladder, ratings) if __name__ == "__main__": main()
mit
-6,900,255,387,101,599,000
25.569892
79
0.604614
false
3.408276
false
false
false
srkukarni/heron
integration_test/src/python/integration_test/topology/one_bolt_multi_tasks.py
1
1322
# copyright 2016 twitter. all rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=missing-docstring from heron.api.src.python.stream import Grouping from integration_test.src.python.integration_test.core import TestTopologyBuilder from integration_test.src.python.integration_test.common.bolt import IdentityBolt from integration_test.src.python.integration_test.common.spout import ABSpout def one_bolt_multi_tasks_builder(topology_name, http_server_url): builder = TestTopologyBuilder(topology_name, http_server_url) ab_spout = builder.add_spout("ab-spout", ABSpout, 1) builder.add_bolt("identity-bolt", IdentityBolt, inputs={ab_spout: Grouping.SHUFFLE}, par=3, optional_outputs=['word']) return builder.create_topology()
apache-2.0
2,974,093,918,747,431,000
41.645161
81
0.748109
false
3.831884
true
false
false
yaybu/touchdown
touchdown/tests/stubs/aws/rest_api.py
1
1606
# Copyright 2016 Isotoma Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from .service import ServiceStubber class RestApiStubber(ServiceStubber): client_service = "apigateway" def add_get_rest_apis_empty_response(self): return self.add_response( "get_rest_apis", service_response={}, expected_params={} ) def add_get_rest_apis_one_response(self): return self.add_response( "get_rest_apis", service_response={ "items": [ {"name": self.resource.name, "id": self.make_id(self.resource.name)} ] }, expected_params={}, ) def add_create_rest_api(self): return self.add_response( "create_rest_api", service_response={}, expected_params={"name": self.resource.name}, ) def add_delete_rest_api(self): return self.add_response( "delete_rest_api", service_response={}, expected_params={"restApiId": self.make_id(self.resource.name)}, )
apache-2.0
-3,704,852,946,402,137,000
31.12
88
0.62142
false
4.076142
false
false
false
gedaskir/qmeq
qmeq/approach/base/redfield.py
1
6053
"""Module containing python functions, which generate first order Redfield kernel. For docstrings see documentation of module neumann1.""" import numpy as np import itertools from ...wrappers.mytypes import doublenp from ...wrappers.mytypes import complexnp from ..aprclass import Approach from .neumann1 import Approach1vN # --------------------------------------------------------------------------------------------------- # Redfield approach # --------------------------------------------------------------------------------------------------- class ApproachRedfield(Approach): kerntype = 'pyRedfield' def prepare_arrays(self): Approach1vN.prepare_arrays(self) def clean_arrays(self): Approach1vN.clean_arrays(self) def generate_fct(self): Approach1vN.generate_fct(self) def generate_coupling_terms(self, b, bp, bcharge): Tba, phi1fct = self.leads.Tba, self.phi1fct si, kh = self.si, self.kernel_handler nleads, statesdm = si.nleads, si.statesdm acharge = bcharge-1 ccharge = bcharge+1 # -------------------------------------------------- for a, ap in itertools.product(statesdm[acharge], statesdm[acharge]): if kh.is_included(a, ap, acharge): bpap = si.get_ind_dm1(bp, ap, acharge) ba = si.get_ind_dm1(b, a, acharge) fct_aap = 0 for l in range(nleads): fct_aap += (+ Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, bpap, 0].conjugate() - Tba[l, b, a]*Tba[l, ap, bp]*phi1fct[l, ba, 0]) kh.set_matrix_element(fct_aap, b, bp, bcharge, a, ap, acharge) # -------------------------------------------------- for bpp in statesdm[bcharge]: if kh.is_included(bpp, bp, bcharge): fct_bppbp = 0 for a in statesdm[acharge]: bppa = si.get_ind_dm1(bpp, a, acharge) for l in range(nleads): fct_bppbp += +Tba[l, b, a]*Tba[l, a, bpp]*phi1fct[l, bppa, 1].conjugate() for c in statesdm[ccharge]: cbpp = si.get_ind_dm1(c, bpp, bcharge) for l in range(nleads): fct_bppbp += +Tba[l, b, c]*Tba[l, c, bpp]*phi1fct[l, cbpp, 0] kh.set_matrix_element(fct_bppbp, b, bp, bcharge, bpp, bp, bcharge) # -------------------------------------------------- if kh.is_included(b, bpp, bcharge): fct_bbpp = 0 for a in statesdm[acharge]: bppa = si.get_ind_dm1(bpp, a, acharge) for l in range(nleads): fct_bbpp += -Tba[l, bpp, a]*Tba[l, a, bp]*phi1fct[l, bppa, 1] for c in statesdm[ccharge]: cbpp = si.get_ind_dm1(c, bpp, bcharge) for l in range(nleads): fct_bbpp += -Tba[l, bpp, c]*Tba[l, c, bp]*phi1fct[l, cbpp, 0].conjugate() kh.set_matrix_element(fct_bbpp, b, bp, bcharge, b, bpp, bcharge) # -------------------------------------------------- for c, cp in itertools.product(statesdm[ccharge], statesdm[ccharge]): if kh.is_included(c, cp, ccharge): cpbp = si.get_ind_dm1(cp, bp, bcharge) cb = si.get_ind_dm1(c, b, bcharge) fct_ccp = 0 for l in range(nleads): fct_ccp += (+ Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cpbp, 1] - Tba[l, b, c]*Tba[l, cp, bp]*phi1fct[l, cb, 1].conjugate()) kh.set_matrix_element(fct_ccp, b, bp, bcharge, c, cp, ccharge) # -------------------------------------------------- def generate_current(self): E, Tba = self.qd.Ea, self.leads.Tba phi1fct, phi1fct_energy = self.phi1fct, self.phi1fct_energy si = self.si ncharge, nleads, statesdm = si.ncharge, si.nleads, si.statesdm phi1 = self.phi1 current = self.current energy_current = self.energy_current kh = self.kernel_handler for charge in range(ncharge-1): ccharge = charge+1 bcharge = charge for c, b in itertools.product(statesdm[ccharge], statesdm[bcharge]): cb = si.get_ind_dm1(c, b, bcharge) for l in range(nleads): current_l, energy_current_l = 0, 0 for bp in statesdm[bcharge]: if not kh.is_included(bp, b, bcharge): continue phi0bpb = kh.get_phi0_element(bp, b, bcharge) cbp = si.get_ind_dm1(c, bp, bcharge) fct1 = phi1fct[l, cbp, 0] fct1h = phi1fct_energy[l, cbp, 0] phi1[l, cb] += Tba[l, c, bp]*phi0bpb*fct1 current_l += Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1 energy_current_l += Tba[l, b, c]*Tba[l, c, bp]*phi0bpb*fct1h for cp in statesdm[ccharge]: if not kh.is_included(c, cp, ccharge): continue phi0ccp = kh.get_phi0_element(c, cp, ccharge) cpb = si.get_ind_dm1(cp, b, bcharge) fct2 = phi1fct[l, cpb, 1] fct2h = phi1fct_energy[l, cpb, 1] phi1[l, cb] += Tba[l, cp, b]*phi0ccp*fct2 current_l += Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2 energy_current_l += Tba[l, b, c]*phi0ccp*Tba[l, cp, b]*fct2h current[l] += -2*current_l.imag energy_current[l] += -2*energy_current_l.imag self.heat_current[:] = energy_current - current*self.leads.mulst # ---------------------------------------------------------------------------------------------------
bsd-2-clause
-8,804,896,393,798,070,000
43.507353
101
0.452999
false
3.325824
false
false
false
invenia/Arbiter
setup.py
1
1178
""" to install: python setup.py install """ from setuptools import setup setup( name="arbiter", description="A task-dependency solver", long_description=open('README.rst').read(), version="0.4.0", author="Brendan Curran-Johnson", author_email="brendan.curran.johnson@invenia.ca", license="MIT License", url="https://github.com/invenia/Arbiter", packages=( "arbiter", ), install_requires=( 'enum34', 'futures', ), tests_require=( 'coverage', 'nose', 'python-coveralls', ), classifiers=( "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ), )
mit
2,089,709,813,790,238,000
24.06383
70
0.573854
false
4.147887
false
true
false
ak15199/rop
art/panimage.py
1
2671
from ._baseclass import ArtBaseClass import requests try: from thread import start_new_thread, allocate_lock except: from _thread import start_new_thread, allocate_lock import logging from opc.image import Image def position(matrix, image): x = 0 y = 0 while y < (image.height-matrix.height): while x < image.width-matrix.width: yield x, y x += 1 dy = 0 while dy < matrix.height and y < (image.height-matrix.height): yield x, y y += 1 dy += 1 while x > 0 and y < (image.height-matrix.height): yield x, y x -= 1 dy = 0 while dy < matrix.height and y < (image.height-matrix.height): yield x, y y += 1 dy += 1 class Art(ArtBaseClass): description = "Grab random images from the internet and pan over them" def __init__(self, matrix, config): w = matrix.width*16 h = matrix.height*16 self.url = "http://lorempixel.com/%s/%d/" % (w, h) self.image_active = None self._load() def start(self, matrix): matrix.clear() def _load(self): self.image_loaded = None start_new_thread(Art._loadthread, (self,)) def _consume(self, matrix): if not self.image_loaded: return False self.image_active = self.image_loaded self._load() self.position = position(matrix, self.image_active) return True def _loadthread(self): logging.info("_loadthread begin") try: r = requests.get(self.url) if r.status_code == 200: self.image_loaded = Image(bytestream=r.content) else: logging.error("_loadthread code %d, using fallback"%r.status_code) self.image_loaded = Image(filename="assets/images/lena.jpg") except Exception as e: logging.error("_loadthread exception '%s', using fallback"%str(e)) self.image_loaded = Image(filename="assets/images/lena.jpg") def refresh(self, matrix): if not self.image_active: # no image is active if not self._consume(matrix): # can we get a fresh image? return # return and re-try next cycle if still pending try: x, y = next(self.position) except: self.image_active = False # borked over image end return # try and load new image next cycle buf = self.image_active.translate(matrix, scale=1, x=x, y=y) matrix.copyBuffer(buf) def interval(self): return 40
gpl-3.0
-6,688,805,793,518,084,000
25.979798
84
0.561587
false
3.893586
false
false
false
nuagenetworks/vspk-python
vspk/v6/nuallredundancygroup.py
1
24239
# -*- coding: utf-8 -*- # # Copyright (c) 2015, Alcatel-Lucent Inc, 2017 Nokia # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # * Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software without # specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY # DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from .fetchers import NUPermissionsFetcher from .fetchers import NUMetadatasFetcher from .fetchers import NUGlobalMetadatasFetcher from bambou import NURESTObject class NUAllRedundancyGroup(NURESTObject): """ Represents a AllRedundancyGroup in the VSD Notes: A read only API to get all redundancy gateway objects in the VSD environment. Use the ID field to then actually manage the redundancy gateway using the redundancy gateway API entity. """ __rest_name__ = "allredundancygroup" __resource_name__ = "allredundancygroups" ## Constants CONST_PERSONALITY_EVDFB = "EVDFB" CONST_PERSONALITY_EVDF = "EVDF" CONST_PERSONALITY_NUAGE_210_WBX_32_Q = "NUAGE_210_WBX_32_Q" CONST_PERSONALITY_NSGDUC = "NSGDUC" CONST_PERSONALITY_OTHER = "OTHER" CONST_PERSONALITY_VDFG = "VDFG" CONST_PERSONALITY_NSG = "NSG" CONST_PERMITTED_ACTION_EXTEND = "EXTEND" CONST_PERMITTED_ACTION_INSTANTIATE = "INSTANTIATE" CONST_PERSONALITY_DC7X50 = "DC7X50" CONST_REDUNDANT_GATEWAY_STATUS_FAILED = "FAILED" CONST_PERSONALITY_HARDWARE_VTEP = "HARDWARE_VTEP" CONST_PERSONALITY_VSA = "VSA" CONST_PERMITTED_ACTION_USE = "USE" CONST_PERSONALITY_VSG = "VSG" CONST_PERMITTED_ACTION_READ = "READ" CONST_PERSONALITY_VRSB = "VRSB" CONST_REDUNDANT_GATEWAY_STATUS_SUCCESS = "SUCCESS" CONST_PERSONALITY_NETCONF_7X50 = "NETCONF_7X50" CONST_PERSONALITY_NUAGE_210_WBX_48_S = "NUAGE_210_WBX_48_S" CONST_PERSONALITY_VRSG = "VRSG" CONST_ENTITY_SCOPE_ENTERPRISE = "ENTERPRISE" CONST_ENTITY_SCOPE_GLOBAL = "GLOBAL" CONST_PERSONALITY_NETCONF_THIRDPARTY_HW_VTEP = "NETCONF_THIRDPARTY_HW_VTEP" CONST_PERMITTED_ACTION_ALL = "ALL" CONST_PERMITTED_ACTION_DEPLOY = "DEPLOY" CONST_PERSONALITY_NSGBR = "NSGBR" def __init__(self, **kwargs): """ Initializes a AllRedundancyGroup instance Notes: You can specify all parameters while calling this methods. A special argument named `data` will enable you to load the object from a Python dictionary Examples: >>> allredundancygroup = NUAllRedundancyGroup(id=u'xxxx-xxx-xxx-xxx', name=u'AllRedundancyGroup') >>> allredundancygroup = NUAllRedundancyGroup(data=my_dict) """ super(NUAllRedundancyGroup, self).__init__() # Read/Write Attributes self._name = None self._last_updated_by = None self._last_updated_date = None self._gateway_peer1_autodiscovered_gateway_id = None self._gateway_peer1_connected = None self._gateway_peer1_id = None self._gateway_peer1_name = None self._gateway_peer2_autodiscovered_gateway_id = None self._gateway_peer2_connected = None self._gateway_peer2_name = None self._redundant_gateway_status = None self._permitted_action = None self._personality = None self._description = None self._embedded_metadata = None self._enterprise_id = None self._entity_scope = None self._creation_date = None self._vtep = None self._owner = None self._external_id = None self.expose_attribute(local_name="name", remote_name="name", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_by", remote_name="lastUpdatedBy", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="last_updated_date", remote_name="lastUpdatedDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer1_autodiscovered_gateway_id", remote_name="gatewayPeer1AutodiscoveredGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer1_connected", remote_name="gatewayPeer1Connected", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer1_id", remote_name="gatewayPeer1ID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer1_name", remote_name="gatewayPeer1Name", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer2_autodiscovered_gateway_id", remote_name="gatewayPeer2AutodiscoveredGatewayID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer2_connected", remote_name="gatewayPeer2Connected", attribute_type=bool, is_required=False, is_unique=False) self.expose_attribute(local_name="gateway_peer2_name", remote_name="gatewayPeer2Name", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="redundant_gateway_status", remote_name="redundantGatewayStatus", attribute_type=str, is_required=False, is_unique=False, choices=[u'FAILED', u'SUCCESS']) self.expose_attribute(local_name="permitted_action", remote_name="permittedAction", attribute_type=str, is_required=False, is_unique=False, choices=[u'ALL', u'DEPLOY', u'EXTEND', u'INSTANTIATE', u'READ', u'USE']) self.expose_attribute(local_name="personality", remote_name="personality", attribute_type=str, is_required=False, is_unique=False, choices=[u'DC7X50', u'EVDF', u'EVDFB', u'HARDWARE_VTEP', u'NETCONF_7X50', u'NETCONF_THIRDPARTY_HW_VTEP', u'NSG', u'NSGBR', u'NSGDUC', u'NUAGE_210_WBX_32_Q', u'NUAGE_210_WBX_48_S', u'OTHER', u'VDFG', u'VRSB', u'VRSG', u'VSA', u'VSG']) self.expose_attribute(local_name="description", remote_name="description", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="embedded_metadata", remote_name="embeddedMetadata", attribute_type=list, is_required=False, is_unique=False) self.expose_attribute(local_name="enterprise_id", remote_name="enterpriseID", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="entity_scope", remote_name="entityScope", attribute_type=str, is_required=False, is_unique=False, choices=[u'ENTERPRISE', u'GLOBAL']) self.expose_attribute(local_name="creation_date", remote_name="creationDate", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="vtep", remote_name="vtep", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="owner", remote_name="owner", attribute_type=str, is_required=False, is_unique=False) self.expose_attribute(local_name="external_id", remote_name="externalID", attribute_type=str, is_required=False, is_unique=True) # Fetchers self.permissions = NUPermissionsFetcher.fetcher_with_object(parent_object=self, relationship="child") self.metadatas = NUMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self.global_metadatas = NUGlobalMetadatasFetcher.fetcher_with_object(parent_object=self, relationship="child") self._compute_args(**kwargs) # Properties @property def name(self): """ Get name value. Notes: Name of the Redundancy Group """ return self._name @name.setter def name(self, value): """ Set name value. Notes: Name of the Redundancy Group """ self._name = value @property def last_updated_by(self): """ Get last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ return self._last_updated_by @last_updated_by.setter def last_updated_by(self, value): """ Set last_updated_by value. Notes: ID of the user who last updated the object. This attribute is named `lastUpdatedBy` in VSD API. """ self._last_updated_by = value @property def last_updated_date(self): """ Get last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ return self._last_updated_date @last_updated_date.setter def last_updated_date(self, value): """ Set last_updated_date value. Notes: Time stamp when this object was last updated. This attribute is named `lastUpdatedDate` in VSD API. """ self._last_updated_date = value @property def gateway_peer1_autodiscovered_gateway_id(self): """ Get gateway_peer1_autodiscovered_gateway_id value. Notes: The Auto Discovered Gateway configuration owner in this Redundant Group. This attribute is named `gatewayPeer1AutodiscoveredGatewayID` in VSD API. """ return self._gateway_peer1_autodiscovered_gateway_id @gateway_peer1_autodiscovered_gateway_id.setter def gateway_peer1_autodiscovered_gateway_id(self, value): """ Set gateway_peer1_autodiscovered_gateway_id value. Notes: The Auto Discovered Gateway configuration owner in this Redundant Group. This attribute is named `gatewayPeer1AutodiscoveredGatewayID` in VSD API. """ self._gateway_peer1_autodiscovered_gateway_id = value @property def gateway_peer1_connected(self): """ Get gateway_peer1_connected value. Notes: Indicates status of the authoritative gateway of this Redundancy Group. This attribute is named `gatewayPeer1Connected` in VSD API. """ return self._gateway_peer1_connected @gateway_peer1_connected.setter def gateway_peer1_connected(self, value): """ Set gateway_peer1_connected value. Notes: Indicates status of the authoritative gateway of this Redundancy Group. This attribute is named `gatewayPeer1Connected` in VSD API. """ self._gateway_peer1_connected = value @property def gateway_peer1_id(self): """ Get gateway_peer1_id value. Notes: The gateway configuration owner in this Redundant Group. when Redundant Group is deleted this gateway will recieve vport associations This attribute is named `gatewayPeer1ID` in VSD API. """ return self._gateway_peer1_id @gateway_peer1_id.setter def gateway_peer1_id(self, value): """ Set gateway_peer1_id value. Notes: The gateway configuration owner in this Redundant Group. when Redundant Group is deleted this gateway will recieve vport associations This attribute is named `gatewayPeer1ID` in VSD API. """ self._gateway_peer1_id = value @property def gateway_peer1_name(self): """ Get gateway_peer1_name value. Notes: The gateway configuration owner name in this Redundant Group This attribute is named `gatewayPeer1Name` in VSD API. """ return self._gateway_peer1_name @gateway_peer1_name.setter def gateway_peer1_name(self, value): """ Set gateway_peer1_name value. Notes: The gateway configuration owner name in this Redundant Group This attribute is named `gatewayPeer1Name` in VSD API. """ self._gateway_peer1_name = value @property def gateway_peer2_autodiscovered_gateway_id(self): """ Get gateway_peer2_autodiscovered_gateway_id value. Notes: The Auto Discovered Gateway peer in this Redundant Group This attribute is named `gatewayPeer2AutodiscoveredGatewayID` in VSD API. """ return self._gateway_peer2_autodiscovered_gateway_id @gateway_peer2_autodiscovered_gateway_id.setter def gateway_peer2_autodiscovered_gateway_id(self, value): """ Set gateway_peer2_autodiscovered_gateway_id value. Notes: The Auto Discovered Gateway peer in this Redundant Group This attribute is named `gatewayPeer2AutodiscoveredGatewayID` in VSD API. """ self._gateway_peer2_autodiscovered_gateway_id = value @property def gateway_peer2_connected(self): """ Get gateway_peer2_connected value. Notes: Indicates status of the secondary gateway of this Redundancy Group. This attribute is named `gatewayPeer2Connected` in VSD API. """ return self._gateway_peer2_connected @gateway_peer2_connected.setter def gateway_peer2_connected(self, value): """ Set gateway_peer2_connected value. Notes: Indicates status of the secondary gateway of this Redundancy Group. This attribute is named `gatewayPeer2Connected` in VSD API. """ self._gateway_peer2_connected = value @property def gateway_peer2_name(self): """ Get gateway_peer2_name value. Notes: The gateway peer name in this Redundant Group This attribute is named `gatewayPeer2Name` in VSD API. """ return self._gateway_peer2_name @gateway_peer2_name.setter def gateway_peer2_name(self, value): """ Set gateway_peer2_name value. Notes: The gateway peer name in this Redundant Group This attribute is named `gatewayPeer2Name` in VSD API. """ self._gateway_peer2_name = value @property def redundant_gateway_status(self): """ Get redundant_gateway_status value. Notes: The status of Redundant Group, possible values are FAILED, SUCCESS Possible values are FAILED, SUCCESS, . This attribute is named `redundantGatewayStatus` in VSD API. """ return self._redundant_gateway_status @redundant_gateway_status.setter def redundant_gateway_status(self, value): """ Set redundant_gateway_status value. Notes: The status of Redundant Group, possible values are FAILED, SUCCESS Possible values are FAILED, SUCCESS, . This attribute is named `redundantGatewayStatus` in VSD API. """ self._redundant_gateway_status = value @property def permitted_action(self): """ Get permitted_action value. Notes: The permitted action to USE/EXTEND this Gateway Possible values are USE, READ, ALL, INSTANTIATE, EXTEND, DEPLOY, . This attribute is named `permittedAction` in VSD API. """ return self._permitted_action @permitted_action.setter def permitted_action(self, value): """ Set permitted_action value. Notes: The permitted action to USE/EXTEND this Gateway Possible values are USE, READ, ALL, INSTANTIATE, EXTEND, DEPLOY, . This attribute is named `permittedAction` in VSD API. """ self._permitted_action = value @property def personality(self): """ Get personality value. Notes: derived personality of the Redundancy Group - VSG,VRSG,NSG,OTHER Possible values are VSG, VSA, VRSG, VDFG, DC7X50, NSG, HARDWARE_VTEP, OTHER, . """ return self._personality @personality.setter def personality(self, value): """ Set personality value. Notes: derived personality of the Redundancy Group - VSG,VRSG,NSG,OTHER Possible values are VSG, VSA, VRSG, VDFG, DC7X50, NSG, HARDWARE_VTEP, OTHER, . """ self._personality = value @property def description(self): """ Get description value. Notes: Description of the Redundancy Group """ return self._description @description.setter def description(self, value): """ Set description value. Notes: Description of the Redundancy Group """ self._description = value @property def embedded_metadata(self): """ Get embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ return self._embedded_metadata @embedded_metadata.setter def embedded_metadata(self, value): """ Set embedded_metadata value. Notes: Metadata objects associated with this entity. This will contain a list of Metadata objects if the API request is made using the special flag to enable the embedded Metadata feature. Only a maximum of Metadata objects is returned based on the value set in the system configuration. This attribute is named `embeddedMetadata` in VSD API. """ self._embedded_metadata = value @property def enterprise_id(self): """ Get enterprise_id value. Notes: The enterprise associated with this Redundant Group. This is a read only attribute This attribute is named `enterpriseID` in VSD API. """ return self._enterprise_id @enterprise_id.setter def enterprise_id(self, value): """ Set enterprise_id value. Notes: The enterprise associated with this Redundant Group. This is a read only attribute This attribute is named `enterpriseID` in VSD API. """ self._enterprise_id = value @property def entity_scope(self): """ Get entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ return self._entity_scope @entity_scope.setter def entity_scope(self, value): """ Set entity_scope value. Notes: Specify if scope of entity is Data center or Enterprise level This attribute is named `entityScope` in VSD API. """ self._entity_scope = value @property def creation_date(self): """ Get creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ return self._creation_date @creation_date.setter def creation_date(self, value): """ Set creation_date value. Notes: Time stamp when this object was created. This attribute is named `creationDate` in VSD API. """ self._creation_date = value @property def vtep(self): """ Get vtep value. Notes: Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address. """ return self._vtep @vtep.setter def vtep(self, value): """ Set vtep value. Notes: Represent the system ID or the Virtual IP of a service used by a Gateway (VSG for now) to establish a tunnel with a remote VSG or hypervisor. The format of this field is consistent with an IP address. """ self._vtep = value @property def owner(self): """ Get owner value. Notes: Identifies the user that has created this object. """ return self._owner @owner.setter def owner(self, value): """ Set owner value. Notes: Identifies the user that has created this object. """ self._owner = value @property def external_id(self): """ Get external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ return self._external_id @external_id.setter def external_id(self, value): """ Set external_id value. Notes: External object ID. Used for integration with third party systems This attribute is named `externalID` in VSD API. """ self._external_id = value
bsd-3-clause
8,053,294,178,348,660,000
31.890095
372
0.601551
false
4.371326
false
false
false
waisuan/minblog_af
minblog2/helpers/databaseManager.py
1
6726
from pymongo import MongoClient import time from bson import ObjectId class DatabaseManager: # DB set-up def __init__(self): client = MongoClient() db = client.flaskr self.entries_col = db.entries # sort by [default] descending/latest order def get_all_entries(self, sort_by=-1): all_entries = self.entries_col.find().sort([('_id', sort_by)]) entries_as_dict = [ dict( entry_id = str(entry.get('_id', '9999')), creator = entry.get('creator', '????'), created_on_date = entry.get('created_on_date', '????'), created_on_time = entry.get('created_on_time', '????'), entry_title = entry.get('entry_title', '????'), entry_text = entry.get('entry_text', '????'), quick_text = entry.get('quick_text', entry.get('entry_text')), modified_on_date = entry.get('modified_on_date', '????'), modified_on_time = entry.get('modified_on_time', '????'), is_modified = entry.get('is_modified', False) ) for entry in all_entries ] return entries_as_dict def get_entry_count(self): all_entries = self.entries_col.find() entries_as_list = [ entry for entry in all_entries ] return len(entries_as_list) def get_entries_by_page(self, direction, last_entry_id, limit, sort_by=-1): if direction == '+': direction = '$gt' else: direction = '$lt' print direction, last_entry_id, sort_by # $natural == natural order curr_entries = self.entries_col.find({'_id': {direction: ObjectId(last_entry_id)}}).sort([('$natural', int(sort_by))]).limit(int(limit)) entries_as_dict = [ dict( entry_id = str(entry.get('_id', '9999')), creator = entry.get('creator', '????'), created_on_date = entry.get('created_on_date', '????'), created_on_time = entry.get('created_on_time', '????'), entry_title = entry.get('entry_title', '????'), entry_text = entry.get('entry_text', '????'), quick_text = entry.get('quick_text', entry.get('entry_text')), modified_on_date = entry.get('modified_on_date', '????'), modified_on_time = entry.get('modified_on_time', '????'), is_modified = entry.get('is_modified', False) ) for entry in curr_entries ] return entries_as_dict def get_entry_by_id(self, entry_id): entry = self.entries_col.find_one({'_id': ObjectId(entry_id)}) if len(entry) == 0: return {} entry = dict( entry_id = str(entry.get('_id', '9999')), creator = entry.get('creator', '????'), created_on_date = entry.get('created_on_date', '????'), created_on_time = entry.get('created_on_time', '????'), entry_title = entry.get('entry_title', '????'), entry_text = entry.get('entry_text', '????'), quick_text = entry.get('quick_text', entry.get('entry_text')), modified_on_date = entry.get('modified_on_date', '????'), modified_on_time = entry.get('modified_on_time', '????'), is_modified = entry.get('is_modified', False) ) return entry def create_new_entry(self, newEntryTitle, newEntryText, newQuickText): now_date = time.strftime("%d/%m/%Y") now_time = time.strftime("%I:%M %p") insert_result = self.entries_col.insert_one({ 'creator' : 'admin', 'created_on_date' : now_date, 'created_on_time' : now_time, 'entry_title' : newEntryTitle, 'entry_text' : newEntryText, 'quick_text' : newQuickText, 'modified_on_date' : now_date, 'modified_on_time' : now_time, 'is_modified' : False }) return str(insert_result.inserted_id) # Original _id type is ObjectId def update_entry(self, entry_id, updatedEntryTitle, updatedEntryText, updatedQuickText): now_date = time.strftime("%d/%m/%Y") now_time = time.strftime("%I:%M %p") update_result = self.entries_col.update_one({'_id': ObjectId(entry_id)}, {'$set': { 'entry_title' : updatedEntryTitle, 'entry_text' : updatedEntryText, 'quick_text' : updatedQuickText, 'modified_on_date' : now_date, 'modified_on_time' : now_time, 'is_modified' : True } }) return update_result.modified_count def delete_entry(self, entry_id): del_result = self.entries_col.delete_one({'_id': ObjectId(entry_id)}) return del_result.deleted_count
mit
3,785,670,568,057,706,500
56.487179
144
0.381505
false
4.852814
false
false
false
haku86/happyowlweb
happyowlweb/happyowlweb/urls.py
1
1074
from django.conf.urls import patterns, include, url from django.conf.urls.static import static from django.conf import settings from django.views.generic import TemplateView # Uncomment the next two lines to enable the admin: from django.contrib import admin admin.autodiscover() urlpatterns = patterns('', url(r'^$', TemplateView.as_view(template_name='index.html')), # Examples: # url(r'^$', 'happyowlweb.views.home', name='home'), # url(r'^happyowlweb/', include('happyowlweb.foo.urls')), # Uncomment the admin/doc line below to enable admin documentation: # url(r'^admin/doc/', include('django.contrib.admindocs.urls')), # Uncomment the next line to enable the admin: url(r'^admin/', include(admin.site.urls)), ) # Uncomment the next line to serve media files in dev. # urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) if settings.DEBUG: import debug_toolbar urlpatterns += patterns('', url(r'^__debug__/', include(debug_toolbar.urls)), )
mit
1,547,584,701,885,924,400
33.645161
78
0.675978
false
3.78169
false
true
false
bgroff/kala-app
django_kala/api/basecamp_classic/projects/serializers.py
1
1207
from rest_framework import serializers from projects.models import Project, Category class ProjectSerializer(serializers.ModelSerializer): class Meta: model = Project fields = [ 'id', 'name', 'organization' ] def create(self, validated_data): return Project.objects.create(**validated_data) def validate_name(self, value): # At least try to dedup names if Project.objects.filter(name__iexact=value): raise serializers.ValidationError('Name is already in use.') return value class CategorySerializer(serializers.ModelSerializer): def __init__(self, *args, **kwargs): self.project = kwargs.pop('project') super(CategorySerializer, self).__init__(*args, **kwargs) class Meta: model = Category fields = [ 'id', 'name', 'project' ] def validate_project(self, project): if self.project.pk != project.pk: raise serializers.ValidationError('The project primary key cannot be different from the current project') return project def create(self, validated_data): return Category.objects.create(**validated_data)
mit
-3,842,473,095,268,577,300
29.948718
117
0.641259
false
4.642308
false
false
false
Wolfterro/SVD
src/old/1.0/GlobalVars.py
1
1651
# -*- coding: utf-8 -*- ''' The MIT License (MIT) Copyright (c) 2017 Wolfgang Almeida <wolfgang.almeida@yahoo.com> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' # Classe de variáveis globais # =========================== class GlobalVars: # Variáveis globais # ----------------- Version = "1.0" Ui = None MainWindow = None IconName = "Icon.ico" IconPath = "Icon.ico" SaveFolder = "SVD" BinFolder = "bin" Youtube_dl = "" PossibleSaveOptions = ["MP3 (Conversão)", "WAV (Conversão)", "MP4", "WEBM", "MKV", "3GP", "MP4 (Conversão)", "WEBM (Conversão)", "MKV (Conversão)"] AudioFormats = ["mp3", "wav"]
mit
-2,166,897,040,673,686,500
34.577778
80
0.707421
false
3.573913
false
false
false
UUDigitalHumanitieslab/timealign
annotations/views.py
1
25800
import os from collections import defaultdict from tempfile import NamedTemporaryFile from lxml import etree from django.contrib import messages from django.contrib.admin.utils import construct_change_message from django.contrib.auth.mixins import LoginRequiredMixin, PermissionRequiredMixin from django.contrib.messages.views import SuccessMessageMixin from django.db.models import Count, Prefetch, QuerySet from django.http import HttpResponse, JsonResponse, QueryDict from django.shortcuts import get_object_or_404 from django.template.loader import render_to_string from django.urls import reverse from django.utils.http import urlquote from django.views import generic from django_filters.views import FilterView from reversion.models import Version from reversion.revisions import add_to_revision, set_comment from reversion.views import RevisionMixin from core.mixins import ImportMixin, CheckOwnerOrStaff, FluidMixin, SuperuserRequiredMixin from core.utils import find_in_enum, XLSX from .exports import export_annotations from .filters import AnnotationFilter from .forms import AnnotationForm, LabelImportForm, AddFragmentsForm, FragmentForm from .mixins import PrepareDownloadMixin, SelectSegmentMixin, ImportFragmentsMixin from .models import Corpus, SubCorpus, Document, Language, Fragment, Alignment, Annotation, \ TenseCategory, Tense, Source, Sentence, Word, LabelKey from .utils import get_next_alignment, get_available_corpora, get_xml_sentences, bind_annotations_to_xml, \ natural_sort_key ############## # Static views ############## class IntroductionView(generic.TemplateView): """ Loads a static introduction view. """ template_name = 'annotations/introduction.html' class InstructionsView(generic.TemplateView): """ Loads the various steps of the instructions. """ def get_template_names(self): return 'annotations/instructions{}.html'.format(self.kwargs['n']) def get_context_data(self, **kwargs): context = super(InstructionsView, self).get_context_data(**kwargs) context['is_no_target_title'] = Annotation._meta.get_field('is_no_target').verbose_name.format( 'present perfect') context['is_translation_title'] = Annotation._meta.get_field('is_translation').verbose_name return context class StatusView(PermissionRequiredMixin, generic.TemplateView): """ Loads a static home view, with an overview of the annotation progress. """ template_name = 'annotations/home.html' permission_required = 'annotations.change_annotation' def get_context_data(self, **kwargs): """Creates a list of tuples with information on the annotation progress.""" context = super(StatusView, self).get_context_data(**kwargs) corpus_pk = self.kwargs.get('pk', None) if corpus_pk: corpora = [get_object_or_404(Corpus, pk=corpus_pk)] else: corpora = get_available_corpora(self.request.user) # Retrieve the totals per language pair languages = {language.pk: language for language in Language.objects.all()} alignments = Alignment.objects.filter(original_fragment__document__corpus__in=corpora) totals = alignments \ .values('original_fragment__language', 'translated_fragment__language') \ .order_by('original_fragment__language', 'translated_fragment__language') \ .annotate(count=Count('pk')) completed = {(t.get('original_fragment__language'), t.get('translated_fragment__language')): t.get('count') for t in totals.exclude(annotation=None)} # Convert the QuerySets into a list of tuples language_totals = [] for total in totals: l1 = languages.get(total['original_fragment__language']) l2 = languages.get(total['translated_fragment__language']) complete = completed.get((l1.pk, l2.pk), 0) available = total['count'] language_totals.append((l1, l2, complete, available)) context['languages'] = language_totals context['corpus_pk'] = corpus_pk context['current_corpora'] = corpora return context ################# # CRUD Annotation ################# class AnnotationMixin(SelectSegmentMixin, SuccessMessageMixin, PermissionRequiredMixin): model = Annotation form_class = AnnotationForm permission_required = 'annotations.change_annotation' def __init__(self): """Creates an attribute to cache the Alignment.""" super(AnnotationMixin, self).__init__() self.alignment = None def get_form_kwargs(self): """Sets the User and the Alignment as a form kwarg.""" kwargs = super(AnnotationMixin, self).get_form_kwargs() kwargs['user'] = self.request.user kwargs['alignment'] = self.get_alignment() kwargs['select_segment'] = self.request.session.get('select_segment', False) return kwargs def get_context_data(self, **kwargs): """Sets the Alignment on the context.""" context = super(AnnotationMixin, self).get_context_data(**kwargs) context['alignment'] = self.get_alignment() return context def get_alignment(self): raise NotImplementedError def get_alignments(self): """Retrieve related fields on Alignment to prevent extra queries.""" return Alignment.objects \ .select_related('original_fragment__document__corpus', 'translated_fragment__document__corpus') \ .prefetch_related('original_fragment__sentence_set__word_set', 'translated_fragment__sentence_set__word_set') class RevisionWithCommentMixin(RevisionMixin): revision_manage_manually = True def form_valid(self, form): result = super().form_valid(form) if form.changed_data: add_to_revision(self.object) set_comment(self.format_change_comment(form.changed_data, form.cleaned_data)) return result def format_change_for_field(self, field, value): if isinstance(value, QuerySet): value = ', '.join(map(str, value)) return '{} to "{}"'.format(field, value) def format_change_comment(self, changes, values): parts = [] for change in changes: parts.append(self.format_change_for_field(change, values[change])) return 'Changed {}'.format(', '.join(parts)) def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) context['revisions'] = Version.objects.get_for_object(self.object) return context class RevisionCreateMixin(RevisionMixin): def form_valid(self, form): set_comment('Created annotation') return super().form_valid(form) class AnnotationUpdateMixin(AnnotationMixin, CheckOwnerOrStaff, RevisionWithCommentMixin): def get_context_data(self, **kwargs): """Sets the annotated Words on the context.""" context = super(AnnotationUpdateMixin, self).get_context_data(**kwargs) context['annotated_words'] = self.object.words.all() return context def get_success_url(self): """Returns to the overview per language.""" alignment = self.get_alignment() l1 = alignment.original_fragment.language.iso l2 = alignment.translated_fragment.language.iso return reverse('annotations:list', args=(l1, l2,)) def get_alignment(self): """Retrieves the Alignment from the object.""" if not self.alignment: self.alignment = self.get_alignments().get(pk=self.object.alignment.pk) return self.alignment class AnnotationCreate(AnnotationMixin, RevisionCreateMixin, generic.CreateView): success_message = 'Annotation created successfully' def get_success_url(self): """Go to the choose-view to select a new Alignment.""" alignment = self.object.alignment return reverse('annotations:choose', args=(alignment.original_fragment.document.corpus.pk, alignment.original_fragment.language.iso, alignment.translated_fragment.language.iso)) def form_valid(self, form): """Sets the User and Alignment on the created instance.""" form.instance.annotated_by = self.request.user form.instance.alignment = self.get_alignment() return super(AnnotationCreate, self).form_valid(form) def get_alignment(self): """Retrieves the Alignment by the pk in the kwargs.""" if not self.alignment: self.alignment = get_object_or_404(self.get_alignments(), pk=self.kwargs['pk']) return self.alignment class AnnotationUpdate(AnnotationUpdateMixin, generic.UpdateView): success_message = 'Annotation edited successfully' def form_valid(self, form): """Sets the last modified by on the instance.""" form.instance.last_modified_by = self.request.user return super(AnnotationUpdate, self).form_valid(form) class AnnotationDelete(AnnotationUpdateMixin, generic.DeleteView): success_message = 'Annotation deleted successfully' class AnnotationChoose(PermissionRequiredMixin, generic.RedirectView): permanent = False pattern_name = 'annotations:create' permission_required = 'annotations.change_annotation' def get_redirect_url(self, *args, **kwargs): """Redirects to the next open Alignment.""" l1 = Language.objects.get(iso=self.kwargs['l1']) l2 = Language.objects.get(iso=self.kwargs['l2']) corpus = Corpus.objects.get(pk=int(self.kwargs['corpus'])) if 'corpus' in self.kwargs else None next_alignment = get_next_alignment(self.request.user, l1, l2, corpus) # If no next Alignment has been found, redirect to the status overview if not next_alignment: messages.success(self.request, 'All work is done for this language pair!') return reverse('annotations:status') corpus_pk = next_alignment.original_fragment.document.corpus.pk return super().get_redirect_url(corpus_pk, next_alignment.pk) ############ # CRUD Fragment ############ class FragmentDetailMixin(LoginRequiredMixin): model = Fragment def get_object(self, queryset=None): qs = Fragment.objects \ .select_related('document__corpus', 'language', 'tense') \ .prefetch_related('original', 'sentence_set__word_set') fragment = super().get_object(qs) return fragment class FragmentDetail(FragmentDetailMixin, generic.DetailView): def get_context_data(self, **kwargs): context = super(FragmentDetail, self).get_context_data(**kwargs) fragment = self.object limit = 5 # TODO: magic number doc_sentences = get_xml_sentences(fragment, limit) context['sentences'] = doc_sentences or fragment.sentence_set.all() context['limit'] = limit return context class FragmentDetailPlain(FragmentDetailMixin, generic.DetailView): template_name = 'annotations/fragment_detail_plain.html' class FragmentRevisionWithCommentMixin(RevisionWithCommentMixin): def format_change_for_field(self, field, value): if field == 'formal_structure': return 'formal structure to ' + find_in_enum(value, Fragment.FORMAL_STRUCTURES) if field == 'sentence_function': return 'sentence function to ' + find_in_enum(value, Fragment.SENTENCE_FUNCTIONS) return super().format_change_for_field(field, value) class FragmentEdit(SelectSegmentMixin, LoginRequiredMixin, FragmentRevisionWithCommentMixin, generic.UpdateView): model = Fragment form_class = FragmentForm def get_context_data(self, **kwargs): """Sets the annotated Words on the context.""" context = super(FragmentEdit, self).get_context_data(**kwargs) context['annotated_words'] = self.object.targets() return context def get_success_url(self): return reverse('annotations:show', args=(self.object.pk,)) def form_valid(self, form): """Updates the target words.""" for word in Word.objects.filter(sentence__fragment=self.object): word.is_target = word in form.cleaned_data['words'] word.save() return super(FragmentEdit, self).form_valid(form) ############ # CRUD Corpus ############ class CorpusList(LoginRequiredMixin, generic.ListView): model = Corpus context_object_name = 'corpora' ordering = 'title' class CorpusDetail(LoginRequiredMixin, generic.DetailView): model = Corpus def get_context_data(self, **kwargs): context = super(CorpusDetail, self).get_context_data(**kwargs) # Retrieve all Documents and order them by title corpus = self.object documents = {d.pk: d.title for d in corpus.documents.all()} documents_sorted = sorted(list(documents.items()), key=lambda x: natural_sort_key(x[1])) document_pks = [d[0] for d in documents_sorted] # Create a list of Languages languages = defaultdict(list) for language in corpus.languages.all(): languages[language.title] = [None] * len(document_pks) # Retrieve the number of Annotations per document by_document = Annotation.objects. \ filter(alignment__translated_fragment__document__corpus=corpus). \ values('alignment__translated_fragment__language__title', 'alignment__translated_fragment__document__pk'). \ annotate(Count('pk')) # Wrap the number of Annotations into the list of Languages for d in by_document: language = d.get('alignment__translated_fragment__language__title') document_pk = d.get('alignment__translated_fragment__document__pk') # Additional sanity check: # happens if the language is not defined as a Corpus language, but nevertheless Annotations exist. if languages.get(language): index = document_pks.index(document_pk) languages[language][index] = d.get('pk__count') # And finally, append the list of Document and Languages to the context context['documents'] = documents_sorted context['languages'] = dict(languages) return context ############ # CRUD Document ############ class DocumentDetail(LoginRequiredMixin, generic.DetailView): model = Document ############ # CRUD Source ############ class SourceDetail(LoginRequiredMixin, generic.DetailView): model = Source def get_object(self, queryset=None): qs = Source.objects.select_related('document__corpus', 'language') source = super(SourceDetail, self).get_object(qs) return source def get_context_data(self, **kwargs): context = super(SourceDetail, self).get_context_data(**kwargs) source = self.object tree, failed_lookups = bind_annotations_to_xml(source) additional_sources = Source.objects \ .filter(document=source.document) \ .exclude(pk=source.pk) \ .select_related('language') transform = etree.XSLT(etree.fromstring(render_to_string('annotations/xml_transform.xslt').encode('utf-8'))) context['sentences'] = [transform(p) for p in tree.iter('p', 'head')] context['failed_lookups'] = failed_lookups context['additional_sources'] = additional_sources context['rows'] = [(x,) for x in context['sentences']] additional_source = self.request.GET.get('additional_source') if additional_source: source = get_object_or_404(Source, pk=additional_source) add_tree, add_failed_lookups = bind_annotations_to_xml(source) context['additional_source'] = source context['additional_sentences'] = [transform(p) for p in add_tree.iter('p', 'head')] context['failed_lookups'] = context['failed_lookups'].extend(add_failed_lookups) context['rows'] = zip(context['sentences'], context['additional_sentences']) return context ############ # List views ############ class AnnotationList(PermissionRequiredMixin, FluidMixin, FilterView): context_object_name = 'annotations' filterset_class = AnnotationFilter paginate_by = 15 permission_required = 'annotations.change_annotation' def get_queryset(self): """ Retrieves all Annotations for the given source (l1) and target (l2) language. :return: A QuerySet of Annotations. """ target_words = Sentence.objects. \ prefetch_related(Prefetch('word_set', queryset=Word.objects.filter(is_target=True))) return Annotation.objects \ .filter(alignment__original_fragment__language__iso=self.kwargs['l1']) \ .filter(alignment__translated_fragment__language__iso=self.kwargs['l2']) \ .filter(alignment__original_fragment__document__corpus__in=get_available_corpora(self.request.user)) \ .select_related('annotated_by', 'tense', 'alignment__original_fragment', 'alignment__original_fragment__document', 'alignment__original_fragment__tense', 'alignment__translated_fragment') \ .prefetch_related('alignment__original_fragment__sentence_set__word_set', Prefetch('alignment__original_fragment__sentence_set', queryset=target_words, to_attr='targets_prefetched'), 'alignment__translated_fragment__sentence_set__word_set', 'alignment__original_fragment__labels', 'labels', 'words') \ .order_by('-annotated_at') def get_filterset(self, filterset_class): kwargs = self.get_filterset_kwargs(filterset_class) request = kwargs['request'] l1, l2 = request.resolver_match.kwargs['l1'], request.resolver_match.kwargs['l2'] session_key = 'annotation_filter_{}_{}'.format(l1, l2) if kwargs['data']: request.session[session_key] = kwargs['data'].urlencode() elif session_key in request.session: kwargs['data'] = QueryDict(request.session[session_key]) return filterset_class(l1, l2, **kwargs) class FragmentList(PermissionRequiredMixin, generic.ListView): """ TODO: consider refactoring, too many queries. """ context_object_name = 'fragments' template_name = 'annotations/fragment_list.html' paginate_by = 25 permission_required = 'annotations.change_annotation' def get_queryset(self): """ Retrieves all Fragments for the given language that have an Annotation that contains a target expression. :return: A list of Fragments. """ results = [] fragments = Fragment.objects.filter(language__iso=self.kwargs['language']) \ .filter(document__corpus__in=get_available_corpora(self.request.user)) for fragment in fragments: if Annotation.objects.filter(alignment__original_fragment=fragment, is_no_target=False).exists(): results.append(fragment) if len(results) == 50: # TODO: Capping this for now with a magic number. break return results def get_context_data(self, **kwargs): """ Sets the current language and other_languages on the context. :param kwargs: Contains the current language. :return: The context variables. """ context = super(FragmentList, self).get_context_data(**kwargs) language = self.kwargs['language'] corpus = context['fragments'][0].document.corpus context['language'] = Language.objects.filter(iso=language) context['other_languages'] = corpus.languages.exclude(iso=language) context['show_tenses'] = self.kwargs.get('showtenses', False) return context class TenseCategoryList(PermissionRequiredMixin, FluidMixin, generic.ListView): model = TenseCategory context_object_name = 'tensecategories' template_name = 'annotations/tenses.html' permission_required = 'annotations.change_annotation' def get_context_data(self, **kwargs): """ Sets the tenses and languages on the context. :return: The context variables. """ context = super(TenseCategoryList, self).get_context_data(**kwargs) tense_cache = {(t.category.title, t.language.iso): t.title for t in Tense.objects.select_related('category', 'language')} tense_categories = TenseCategory.objects.all() tenses = defaultdict(list) languages = [] for language in Language.objects.order_by('iso'): if not Tense.objects.filter(language=language): continue languages.append(language) for tc in tense_categories: tense = tense_cache.get((tc.title, language.iso), '') tenses[tc].append(tense) context['tenses'] = sorted(list(tenses.items()), key=lambda item: item[0].pk) context['languages'] = languages return context class LabelList(PermissionRequiredMixin, FluidMixin, generic.ListView): model = LabelKey context_object_name = 'labelkeys' template_name = 'annotations/labels.html' permission_required = 'annotations.change_annotation' def get_context_data(self, **kwargs): context = super().get_context_data(**kwargs) corpus = self.kwargs.get('corpus') if corpus: corpus = Corpus.objects.get(pk=corpus) else: corpus = get_available_corpora(self.request.user)[0] self.object_list = self.object_list.filter(corpora=corpus) context['label_keys'] = self.object_list labels = [key.labels.all() for key in self.object_list] # transpose the 2d array stored in labels so that we could have each label key # show in a column on the html table transposed = [] max_len = max([len(x) for x in labels]) if labels else 0 for i in range(max_len): transposed.append([]) for group in labels: if len(group) > i: transposed[-1].append(group[i]) else: # add empty table cells transposed[-1].append('') context['labels'] = transposed context['corpus'] = corpus context['corpora'] = get_available_corpora(self.request.user) return context ############## # Export views ############## class PrepareDownload(PrepareDownloadMixin, generic.TemplateView): template_name = 'annotations/download.html' class ExportPOSPrepare(PermissionRequiredMixin, generic.View): permission_required = 'annotations.change_annotation' def get(self, request, *args, **kwargs): language = self.request.GET['language'] corpus_id = self.request.GET['corpus'] subcorpus_id = self.request.GET['subcorpus'] document_id = self.request.GET['document'] include_non_targets = 'include_non_targets' in self.request.GET add_lemmata = 'add_lemmata' in self.request.GET pos_file = NamedTemporaryFile(delete=False) self.request.session['pos_file'] = pos_file.name corpus = Corpus.objects.get(pk=int(corpus_id)) subcorpus = SubCorpus.objects.get(pk=int(subcorpus_id)) if subcorpus_id != 'all' else None document = Document.objects.get(pk=int(document_id)) if document_id != 'all' else None document_title = document.title if document_id != 'all' else 'all' filename = '{}-{}-{}.xlsx'.format(urlquote(corpus.title), urlquote(document_title), language) self.request.session['pos_filename'] = filename export_annotations(pos_file.name, XLSX, corpus, language, subcorpus=subcorpus, document=document, include_non_targets=include_non_targets, add_lemmata=add_lemmata) return JsonResponse(dict(done=True)) class ExportPOSDownload(PermissionRequiredMixin, generic.View): permission_required = 'annotations.change_annotation' def get(self, request, *args, **kwargs): pos_file = self.request.session['pos_file'] pos_filename = self.request.session['pos_filename'] with open(pos_file, 'rb') as f: contents = f.read() os.unlink(pos_file) response = HttpResponse(contents, content_type='application/xlsx') response['Content-Disposition'] = 'attachment; filename={}'.format(pos_filename) return response ############## # Import views ############## class ImportLabelsView(SuperuserRequiredMixin, ImportMixin): """ Allows superusers to import labels to Annotations and Fragments. """ form_class = LabelImportForm template_name = 'annotations/label_form.html' success_message = 'Successfully imported the labels!' def get_success_url(self): return reverse('annotations:import-labels') class AddFragmentsView(SuperuserRequiredMixin, ImportFragmentsMixin): """ Allows superusers to import Fragments. """ form_class = AddFragmentsForm template_name = 'annotations/add_fragments_form.html' success_message = 'Successfully added the fragments!' def get_success_url(self): return reverse('annotations:add-fragments')
mit
3,640,639,079,216,742,400
38.389313
116
0.647868
false
4.208809
false
false
false
shubhdev/edx-platform
lms/djangoapps/certificates/tests/test_views.py
1
30514
"""Tests for certificates views. """ import json import ddt from uuid import uuid4 from nose.plugins.attrib import attr from mock import patch from django.conf import settings from django.core.cache import cache from django.core.urlresolvers import reverse from django.test import TestCase from django.test.client import Client from django.test.utils import override_settings from opaque_keys.edx.locator import CourseLocator from openedx.core.lib.tests.assertions.events import assert_event_matches from student.tests.factories import UserFactory, CourseEnrollmentFactory from track.tests import EventTrackingTestCase from xmodule.modulestore.tests.factories import CourseFactory from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase from certificates.api import get_certificate_url from certificates.models import ( ExampleCertificateSet, ExampleCertificate, GeneratedCertificate, BadgeAssertion, CertificateStatuses, CertificateHtmlViewConfiguration, CertificateSocialNetworks, ) from certificates.tests.factories import ( CertificateHtmlViewConfigurationFactory, LinkedInAddToProfileConfigurationFactory, BadgeAssertionFactory, ) from lms import urls FEATURES_WITH_CERTS_ENABLED = settings.FEATURES.copy() FEATURES_WITH_CERTS_ENABLED['CERTIFICATES_HTML_VIEW'] = True FEATURES_WITH_CERTS_DISABLED = settings.FEATURES.copy() FEATURES_WITH_CERTS_DISABLED['CERTIFICATES_HTML_VIEW'] = False @attr('shard_1') @ddt.ddt class UpdateExampleCertificateViewTest(TestCase): """Tests for the XQueue callback that updates example certificates. """ COURSE_KEY = CourseLocator(org='test', course='test', run='test') DESCRIPTION = 'test' TEMPLATE = 'test.pdf' DOWNLOAD_URL = 'http://www.example.com' ERROR_REASON = 'Kaboom!' def setUp(self): super(UpdateExampleCertificateViewTest, self).setUp() self.cert_set = ExampleCertificateSet.objects.create(course_key=self.COURSE_KEY) self.cert = ExampleCertificate.objects.create( example_cert_set=self.cert_set, description=self.DESCRIPTION, template=self.TEMPLATE, ) self.url = reverse('certificates.views.update_example_certificate') # Since rate limit counts are cached, we need to clear # this before each test. cache.clear() def test_update_example_certificate_success(self): response = self._post_to_view(self.cert, download_url=self.DOWNLOAD_URL) self._assert_response(response) self.cert = ExampleCertificate.objects.get() self.assertEqual(self.cert.status, ExampleCertificate.STATUS_SUCCESS) self.assertEqual(self.cert.download_url, self.DOWNLOAD_URL) def test_update_example_certificate_invalid_key(self): payload = { 'xqueue_header': json.dumps({ 'lms_key': 'invalid' }), 'xqueue_body': json.dumps({ 'username': self.cert.uuid, 'url': self.DOWNLOAD_URL }) } response = self.client.post(self.url, data=payload) self.assertEqual(response.status_code, 404) def test_update_example_certificate_error(self): response = self._post_to_view(self.cert, error_reason=self.ERROR_REASON) self._assert_response(response) self.cert = ExampleCertificate.objects.get() self.assertEqual(self.cert.status, ExampleCertificate.STATUS_ERROR) self.assertEqual(self.cert.error_reason, self.ERROR_REASON) @ddt.data('xqueue_header', 'xqueue_body') def test_update_example_certificate_invalid_params(self, missing_param): payload = { 'xqueue_header': json.dumps({ 'lms_key': self.cert.access_key }), 'xqueue_body': json.dumps({ 'username': self.cert.uuid, 'url': self.DOWNLOAD_URL }) } del payload[missing_param] response = self.client.post(self.url, data=payload) self.assertEqual(response.status_code, 400) def test_update_example_certificate_missing_download_url(self): payload = { 'xqueue_header': json.dumps({ 'lms_key': self.cert.access_key }), 'xqueue_body': json.dumps({ 'username': self.cert.uuid }) } response = self.client.post(self.url, data=payload) self.assertEqual(response.status_code, 400) def test_update_example_cetificate_non_json_param(self): payload = { 'xqueue_header': '{/invalid', 'xqueue_body': '{/invalid' } response = self.client.post(self.url, data=payload) self.assertEqual(response.status_code, 400) def test_unsupported_http_method(self): response = self.client.get(self.url) self.assertEqual(response.status_code, 405) def test_bad_request_rate_limiting(self): payload = { 'xqueue_header': json.dumps({ 'lms_key': 'invalid' }), 'xqueue_body': json.dumps({ 'username': self.cert.uuid, 'url': self.DOWNLOAD_URL }) } # Exceed the rate limit for invalid requests # (simulate a DDOS with invalid keys) for _ in range(100): response = self.client.post(self.url, data=payload) if response.status_code == 403: break # The final status code should indicate that the rate # limit was exceeded. self.assertEqual(response.status_code, 403) def _post_to_view(self, cert, download_url=None, error_reason=None): """Simulate a callback from the XQueue to the example certificate end-point. """ header = {'lms_key': cert.access_key} body = {'username': cert.uuid} if download_url is not None: body['url'] = download_url if error_reason is not None: body['error'] = 'error' body['error_reason'] = self.ERROR_REASON payload = { 'xqueue_header': json.dumps(header), 'xqueue_body': json.dumps(body) } return self.client.post(self.url, data=payload) def _assert_response(self, response): """Check the response from the callback end-point. """ content = json.loads(response.content) self.assertEqual(response.status_code, 200) self.assertEqual(content['return_code'], 0) def fakemicrosite(name, default=None): """ This is a test mocking function to return a microsite configuration """ if name == 'microsite_config_key': return 'test_microsite' else: return default @attr('shard_1') class MicrositeCertificatesViewsTests(ModuleStoreTestCase): """ Tests for the microsite certificates web/html views """ def setUp(self): super(MicrositeCertificatesViewsTests, self).setUp() self.client = Client() self.course = CourseFactory.create( org='testorg', number='run1', display_name='refundable course' ) self.course_id = self.course.location.course_key self.user = UserFactory.create( email='joe_user@edx.org', username='joeuser', password='foo' ) self.user.profile.name = "Joe User" self.user.profile.save() self.client.login(username=self.user.username, password='foo') self.cert = GeneratedCertificate.objects.create( user=self.user, course_id=self.course_id, verify_uuid=uuid4(), download_uuid=uuid4(), grade="0.95", key='the_key', distinction=True, status='generated', mode='honor', name=self.user.profile.name, ) def _certificate_html_view_configuration(self, configuration_string, enabled=True): """ This will create a certificate html configuration """ config = CertificateHtmlViewConfiguration(enabled=enabled, configuration=configuration_string) config.save() return config def _add_course_certificates(self, count=1, signatory_count=0, is_active=True): """ Create certificate for the course. """ signatories = [ { 'name': 'Signatory_Name ' + str(i), 'title': 'Signatory_Title ' + str(i), 'organization': 'Signatory_Organization ' + str(i), 'signature_image_path': '/static/certificates/images/demo-sig{}.png'.format(i), 'id': i, } for i in xrange(signatory_count) ] certificates = [ { 'id': i, 'name': 'Name ' + str(i), 'description': 'Description ' + str(i), 'course_title': 'course_title_' + str(i), 'org_logo_path': '/t4x/orgX/testX/asset/org-logo-{}.png'.format(i), 'signatories': signatories, 'version': 1, 'is_active': is_active } for i in xrange(count) ] self.course.certificates = {'certificates': certificates} self.course.save() self.store.update_item(self.course, self.user.id) @patch("microsite_configuration.microsite.get_value", fakemicrosite) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_html_view_for_microsite(self): test_configuration_string = """{ "default": { "accomplishment_class_append": "accomplishment-certificate", "platform_name": "edX", "company_about_url": "http://www.edx.org/about-us", "company_privacy_url": "http://www.edx.org/edx-privacy-policy", "company_tos_url": "http://www.edx.org/edx-terms-service", "company_verified_certificate_url": "http://www.edx.org/verified-certificate", "document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css", "logo_src": "/static/certificates/images/logo-edx.svg", "logo_url": "http://www.edx.org" }, "test_microsite": { "accomplishment_class_append": "accomplishment-certificate", "platform_name": "platform_microsite", "company_about_url": "http://www.microsite.org/about-us", "company_privacy_url": "http://www.microsite.org/edx-privacy-policy", "company_tos_url": "http://www.microsite.org/microsite-terms-service", "company_verified_certificate_url": "http://www.microsite.org/verified-certificate", "document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css", "logo_src": "/static/certificates/images/logo-microsite.svg", "logo_url": "http://www.microsite.org", "company_about_description": "This is special microsite aware company_about_description content", "company_about_title": "Microsite title" }, "honor": { "certificate_type": "Honor Code", "document_body_class_append": "is-honorcode" } }""" config = self._certificate_html_view_configuration(configuration_string=test_configuration_string) self.assertEquals(config.configuration, test_configuration_string) test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) self._add_course_certificates(count=1, signatory_count=2) response = self.client.get(test_url) self.assertIn('platform_microsite', response.content) self.assertIn('http://www.microsite.org', response.content) self.assertIn('This is special microsite aware company_about_description content', response.content) self.assertIn('Microsite title', response.content) @patch("microsite_configuration.microsite.get_value", fakemicrosite) def test_html_view_microsite_configuration_missing(self): test_configuration_string = """{ "default": { "accomplishment_class_append": "accomplishment-certificate", "platform_name": "edX", "company_about_url": "http://www.edx.org/about-us", "company_privacy_url": "http://www.edx.org/edx-privacy-policy", "company_tos_url": "http://www.edx.org/edx-terms-service", "company_verified_certificate_url": "http://www.edx.org/verified-certificate", "document_stylesheet_url_application": "/static/certificates/sass/main-ltr.css", "logo_src": "/static/certificates/images/logo-edx.svg", "logo_url": "http://www.edx.org", "company_about_description": "This should not survive being overwritten by static content" }, "honor": { "certificate_type": "Honor Code", "document_body_class_append": "is-honorcode" } }""" config = self._certificate_html_view_configuration(configuration_string=test_configuration_string) self.assertEquals(config.configuration, test_configuration_string) test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) self._add_course_certificates(count=1, signatory_count=2) response = self.client.get(test_url) self.assertIn('edX', response.content) self.assertNotIn('platform_microsite', response.content) self.assertNotIn('http://www.microsite.org', response.content) self.assertNotIn('This should not survive being overwritten by static content', response.content) @attr('shard_1') class CertificatesViewsTests(ModuleStoreTestCase, EventTrackingTestCase): """ Tests for the certificates web/html views """ def setUp(self): super(CertificatesViewsTests, self).setUp() self.client = Client() self.course = CourseFactory.create( org='testorg', number='run1', display_name='refundable course' ) self.course_id = self.course.location.course_key self.user = UserFactory.create( email='joe_user@edx.org', username='joeuser', password='foo' ) self.user.profile.name = "Joe User" self.user.profile.save() self.client.login(username=self.user.username, password='foo') self.cert = GeneratedCertificate.objects.create( user=self.user, course_id=self.course_id, verify_uuid=uuid4(), download_uuid=uuid4(), grade="0.95", key='the_key', distinction=True, status='generated', mode='honor', name=self.user.profile.name, ) CourseEnrollmentFactory.create( user=self.user, course_id=self.course_id ) CertificateHtmlViewConfigurationFactory.create() LinkedInAddToProfileConfigurationFactory.create() def _add_course_certificates(self, count=1, signatory_count=0, is_active=True): """ Create certificate for the course. """ signatories = [ { 'name': 'Signatory_Name ' + str(i), 'title': 'Signatory_Title ' + str(i), 'organization': 'Signatory_Organization ' + str(i), 'signature_image_path': '/static/certificates/images/demo-sig{}.png'.format(i), 'id': i, } for i in xrange(0, signatory_count) ] certificates = [ { 'id': i, 'name': 'Name ' + str(i), 'description': 'Description ' + str(i), 'course_title': 'course_title_' + str(i), 'org_logo_path': '/t4x/orgX/testX/asset/org-logo-{}.png'.format(i), 'signatories': signatories, 'version': 1, 'is_active': is_active } for i in xrange(0, count) ] self.course.certificates = {'certificates': certificates} self.course.save() self.store.update_item(self.course, self.user.id) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_valid_certificate(self): test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) # pylint: disable=no-member ) self._add_course_certificates(count=1, signatory_count=2) response = self.client.get(test_url) self.assertIn(str(self.cert.verify_uuid), response.content) # Hit any "verified" mode-specific branches self.cert.mode = 'verified' self.cert.save() response = self.client.get(test_url) self.assertIn(str(self.cert.verify_uuid), response.content) # Hit any 'xseries' mode-specific branches self.cert.mode = 'xseries' self.cert.save() response = self.client.get(test_url) self.assertIn(str(self.cert.verify_uuid), response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_with_valid_signatories(self): test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) self._add_course_certificates(count=1, signatory_count=2) response = self.client.get(test_url) self.assertIn('course_title_0', response.content) self.assertIn('/t4x/orgX/testX/asset/org-logo-0.png', response.content) self.assertIn('Signatory_Name 0', response.content) self.assertIn('Signatory_Title 0', response.content) self.assertIn('Signatory_Organization 0', response.content) self.assertIn('/static/certificates/images/demo-sig0.png', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_course_display_name_not_override_with_course_title(self): # if certificate in descriptor has not course_title then course name should not be overridden with this title. test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) test_certificates = [ { 'id': 0, 'name': 'Name 0', 'description': 'Description 0', 'signatories': [], 'version': 1, 'is_active':True } ] self.course.certificates = {'certificates': test_certificates} self.course.save() self.store.update_item(self.course, self.user.id) response = self.client.get(test_url) self.assertNotIn('test_course_title_0', response.content) self.assertIn('refundable course', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_certificate_view_without_org_logo(self): test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) test_certificates = [ { 'id': 0, 'name': 'Certificate Name 0', 'signatories': [], 'version': 1, 'is_active': True } ] self.course.certificates = {'certificates': test_certificates} self.course.save() self.store.update_item(self.course, self.user.id) response = self.client.get(test_url) # make sure response html has only one organization logo container for edX self.assertContains(response, "<li class=\"wrapper-organization\">", 1) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_without_signatories(self): test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) self._add_course_certificates(count=1, signatory_count=0) response = self.client.get(test_url) self.assertNotIn('Signatory_Name 0', response.content) self.assertNotIn('Signatory_Title 0', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED) def test_render_html_view_invalid_feature_flag(self): test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) response = self.client.get(test_url) self.assertIn('invalid', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_invalid_course_id(self): test_url = get_certificate_url( user_id=self.user.id, course_id='az/23423/4vs' ) response = self.client.get(test_url) self.assertIn('invalid', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_invalid_course(self): test_url = get_certificate_url( user_id=self.user.id, course_id='missing/course/key' ) response = self.client.get(test_url) self.assertIn('invalid', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_invalid_user(self): test_url = get_certificate_url( user_id=111, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) response = self.client.get(test_url) self.assertIn('invalid', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_invalid_user_certificate(self): self.cert.delete() self.assertEqual(len(GeneratedCertificate.objects.all()), 0) test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) response = self.client.get(test_url) self.assertIn('invalid', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_with_preview_mode(self): """ test certificate web view should render properly along with its signatories information when accessing it in preview mode. Either the certificate is marked active or not. """ self.cert.delete() self.assertEqual(len(GeneratedCertificate.objects.all()), 0) self._add_course_certificates(count=1, signatory_count=2) test_url = get_certificate_url( user_id=self.user.id, course_id=self.course.id.to_deprecated_string() # pylint: disable=no-member ) response = self.client.get(test_url + '?preview=honor') self.assertNotIn(self.course.display_name, response.content) self.assertIn('course_title_0', response.content) self.assertIn('Signatory_Title 0', response.content) # mark certificate inactive but accessing in preview mode. self._add_course_certificates(count=1, signatory_count=2, is_active=False) response = self.client.get(test_url + '?preview=honor') self.assertNotIn(self.course.display_name, response.content) self.assertIn('course_title_0', response.content) self.assertIn('Signatory_Title 0', response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_render_html_view_invalid_certificate_configuration(self): test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) response = self.client.get(test_url) self.assertIn("Invalid Certificate", response.content) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_certificate_evidence_event_emitted(self): self.client.logout() self._add_course_certificates(count=1, signatory_count=2) self.recreate_tracker() test_url = get_certificate_url( user_id=self.user.id, course_id=unicode(self.course.id) ) response = self.client.get(test_url) self.assertEqual(response.status_code, 200) actual_event = self.get_event() self.assertEqual(actual_event['name'], 'edx.certificate.evidence_visited') assert_event_matches( { 'user_id': self.user.id, 'certificate_id': unicode(self.cert.verify_uuid), 'enrollment_mode': self.cert.mode, 'certificate_url': test_url, 'course_id': unicode(self.course.id), 'social_network': CertificateSocialNetworks.linkedin }, actual_event['data'] ) @override_settings(FEATURES=FEATURES_WITH_CERTS_ENABLED) def test_evidence_event_sent(self): test_url = get_certificate_url(user_id=self.user.id, course_id=self.course_id) + '?evidence_visit=1' self.recreate_tracker() assertion = BadgeAssertion( user=self.user, course_id=self.course_id, mode='honor', data={ 'image': 'http://www.example.com/image.png', 'json': {'id': 'http://www.example.com/assertion.json'}, 'issuer': 'http://www.example.com/issuer.json', } ) assertion.save() response = self.client.get(test_url) self.assertEqual(response.status_code, 200) assert_event_matches( { 'name': 'edx.badge.assertion.evidence_visited', 'data': { 'course_id': 'testorg/run1/refundable_course', # pylint: disable=no-member 'assertion_id': assertion.id, 'assertion_json_url': 'http://www.example.com/assertion.json', 'assertion_image_url': 'http://www.example.com/image.png', 'user_id': self.user.id, 'issuer': 'http://www.example.com/issuer.json', 'enrollment_mode': 'honor', }, }, self.get_event() ) @override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED) def test_request_certificate_without_passing(self): self.cert.status = CertificateStatuses.unavailable self.cert.save() request_certificate_url = reverse('certificates.views.request_certificate') response = self.client.post(request_certificate_url, {'course_id': unicode(self.course.id)}) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(CertificateStatuses.notpassing, response_json['add_status']) @override_settings(FEATURES=FEATURES_WITH_CERTS_DISABLED) @override_settings(CERT_QUEUE='test-queue') def test_request_certificate_after_passing(self): self.cert.status = CertificateStatuses.unavailable self.cert.save() request_certificate_url = reverse('certificates.views.request_certificate') with patch('capa.xqueue_interface.XQueueInterface.send_to_queue') as mock_queue: mock_queue.return_value = (0, "Successfully queued") with patch('courseware.grades.grade') as mock_grade: mock_grade.return_value = {'grade': 'Pass', 'percent': 0.75} response = self.client.post(request_certificate_url, {'course_id': unicode(self.course.id)}) self.assertEqual(response.status_code, 200) response_json = json.loads(response.content) self.assertEqual(CertificateStatuses.generating, response_json['add_status']) class TrackShareRedirectTest(ModuleStoreTestCase, EventTrackingTestCase): """ Verifies the badge image share event is sent out. """ def setUp(self): super(TrackShareRedirectTest, self).setUp() self.client = Client() self.course = CourseFactory.create( org='testorg', number='run1', display_name='trackable course' ) self.assertion = BadgeAssertionFactory( user=self.user, course_id=self.course.id, data={ 'image': 'http://www.example.com/image.png', 'json': {'id': 'http://www.example.com/assertion.json'}, 'issuer': 'http://www.example.com/issuer.json', }, ) # Enabling the feature flag isn't enough to change the URLs-- they're already loaded by this point. self.old_patterns = urls.urlpatterns urls.urlpatterns += (urls.BADGE_SHARE_TRACKER_URL,) def tearDown(self): super(TrackShareRedirectTest, self).tearDown() urls.urlpatterns = self.old_patterns def test_social_event_sent(self): test_url = '/certificates/badge_share_tracker/{}/social_network/{}/'.format( unicode(self.course.id), self.user.username, ) self.recreate_tracker() response = self.client.get(test_url) self.assertEqual(response.status_code, 302) self.assertEqual(response['Location'], 'http://www.example.com/image.png') assert_event_matches( { 'name': 'edx.badge.assertion.shared', 'data': { 'course_id': 'testorg/run1/trackable_course', 'social_network': 'social_network', # pylint: disable=no-member 'assertion_id': self.assertion.id, 'assertion_json_url': 'http://www.example.com/assertion.json', 'assertion_image_url': 'http://www.example.com/image.png', 'user_id': self.user.id, 'issuer': 'http://www.example.com/issuer.json', 'enrollment_mode': 'honor', }, }, self.get_event() )
agpl-3.0
988,242,617,299,130,100
40.459239
118
0.606967
false
3.981472
true
false
false
openstack/heat
heat/tests/openstack/neutron/test_sfc/test_port_chain.py
1
6568
# # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from unittest import mock from heat.engine.clients.os import neutron from heat.engine.resources.openstack.neutron.sfc import port_chain from heat.engine import stack from heat.engine import template from heat.tests import common from heat.tests import utils port_chain_template = { 'heat_template_version': '2015-04-30', 'resources': { 'test_resource': { 'type': 'OS::Neutron::PortChain', 'properties': { 'name': 'test_port_chain', 'description': 'port_chain_desc', 'port_pair_groups': ['port_pair_group_1'], 'flow_classifiers': ['flow_classifier1'], 'chain_parameters': {"correlation": 'mpls'} } } } } class PortChainTest(common.HeatTestCase): def setUp(self): super(PortChainTest, self).setUp() self.patchobject(neutron.NeutronClientPlugin, 'has_extension', return_value=True) self.ctx = utils.dummy_context() self.stack = stack.Stack( self.ctx, 'test_stack', template.Template(port_chain_template) ) self.test_resource = self.stack['test_resource'] self.test_client_plugin = mock.MagicMock() self.test_resource.client_plugin = mock.MagicMock( return_value=self.test_client_plugin) self.test_client = mock.MagicMock() self.test_resource.client = mock.MagicMock( return_value=self.test_client) self.test_client_plugin.get_notification = mock.MagicMock( return_value='sample_notification') self.patchobject(self.test_client_plugin, 'resolve_ext_resource' ).return_value = ('port_pair_group_1') self.patchobject(self.test_client_plugin, 'resolve_ext_resource' ).return_value = ('flow_classifier1') def test_resource_mapping(self): mapping = port_chain.resource_mapping() self.assertEqual(port_chain.PortChain, mapping['OS::Neutron::PortChain']) def _get_mock_resource(self): value = mock.MagicMock() value.id = '477e8273-60a7-4c41-b683-fdb0bc7cd152' return value def test_resource_handle_create(self): mock_pc_create = self.test_client_plugin.create_ext_resource mock_resource = self._get_mock_resource() mock_pc_create.return_value = mock_resource # validate the properties self.assertEqual( 'test_port_chain', self.test_resource.properties.get( port_chain.PortChain.NAME)) self.assertEqual( 'port_chain_desc', self.test_resource.properties.get( port_chain.PortChain.DESCRIPTION)) self.assertEqual( ['port_pair_group_1'], self.test_resource.properties.get( port_chain.PortChain.PORT_PAIR_GROUPS)) self.assertEqual( ['flow_classifier1'], self.test_resource.properties.get( port_chain.PortChain.FLOW_CLASSIFIERS)) self.assertEqual( {"correlation": 'mpls'}, self.test_resource.properties.get( port_chain.PortChain.CHAIN_PARAMETERS)) self.test_resource.data_set = mock.Mock() self.test_resource.handle_create() mock_pc_create.assert_called_once_with( 'port_chain', { 'name': 'test_port_chain', 'description': 'port_chain_desc', 'port_pair_groups': ['port_pair_group_1'], 'flow_classifiers': ['flow_classifier1'], 'chain_parameters': {"correlation": 'mpls'}} ) def delete_portchain(self): mock_pc_delete = self.test_client_plugin.delete_ext_resource self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' mock_pc_delete.return_value = None self.assertIsNone(self.test_resource.handle_delete()) mock_pc_delete.assert_called_once_with( 'port_chain', self.test_resource.resource_id) def delete_portchain_resource_id_is_none(self): self.test_resource.resource_id = None self.assertIsNone(self.test_resource.handle_delete()) self.assertEqual(0, self.test_client_plugin. delete_ext_resource.call_count) def test_resource_handle_delete_not_found(self): self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' mock_pc_delete = self.test_client_plugin.delete_ext_resource mock_pc_delete.side_effect = self.test_client_plugin.NotFound self.assertIsNone(self.test_resource.handle_delete()) def test_resource_show_resource(self): mock_pc_get = self.test_client_plugin.show_ext_resource mock_pc_get.return_value = None self.assertIsNone(self.test_resource._show_resource(), 'Failed to show resource') def test_resource_handle_update(self): mock_ppg_patch = self.test_client_plugin.update_ext_resource self.test_resource.resource_id = '477e8273-60a7-4c41-b683-fdb0bc7cd151' prop_diff = { 'name': 'name-updated', 'description': 'description-updated', 'port_pair_groups': ['port_pair_group_2'], 'flow_classifiers': ['flow_classifier2'], } self.test_resource.handle_update(json_snippet=None, tmpl_diff=None, prop_diff=prop_diff) mock_ppg_patch.assert_called_once_with( 'port_chain', { 'name': 'name-updated', 'description': 'description-updated', 'port_pair_groups': ['port_pair_group_2'], 'flow_classifiers': ['flow_classifier2'], }, self.test_resource.resource_id)
apache-2.0
103,200,701,343,941,060
37.409357
79
0.600792
false
3.923536
true
false
false
matrumz/RPi_Custom_Files
Printing/hplip-3.15.2/ui4/plugindiagnose.py
1
3164
# -*- coding: utf-8 -*- # # (c) Copyright 2001-2011 Hewlett-Packard Development Company, L.P. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # # Authors: Amarnath Chitumalla # # Local from base.g import * from base import device, utils, pkit from prnt import cups from base.codes import * from .ui_utils import * from installer import pluginhandler from base.sixext import to_unicode # Qt from PyQt4.QtCore import * from PyQt4.QtGui import * import signal # Ui from .plugindiagnose_base import Ui_Dialog class PluginDiagnose(QDialog, Ui_Dialog): def __init__(self, parent, install_mode=PLUGIN_NONE, plugin_reason=PLUGIN_REASON_NONE, upgrade=False): QDialog.__init__(self, parent) self.install_mode = install_mode self.plugin_reason = plugin_reason self.plugin_path = None self.result = False self.pluginObj = pluginhandler.PluginHandle() self.setupUi(self, upgrade) self.user_settings = UserSettings() self.user_settings.load() self.user_settings.debug() self.initUi() def initUi(self): # connect signals/slots self.connect(self.CancelButton, SIGNAL("clicked()"), self.CancelButton_clicked) self.connect(self.NextButton, SIGNAL("clicked()"), self.NextButton_clicked) signal.signal(signal.SIGINT, signal.SIG_DFL) # Application icon self.setWindowIcon(QIcon(load_pixmap('hp_logo', '128x128'))) def PathLineEdit_textChanged(self, t): self.plugin_path = to_unicode(t) self.setPathIndicators() # # Misc # def displayPage(self, page): self.updateStepText(page) self.StackedWidget.setCurrentIndex(page) def CancelButton_clicked(self): self.close() def NextButton_clicked(self): self.NextButton.setEnabled(False) self.CancelButton.setEnabled(False) try: plugin = PLUGIN_REQUIRED plugin_reason = PLUGIN_REASON_NONE ok, sudo_ok = pkit.run_plugin_command(plugin == PLUGIN_REQUIRED, plugin_reason) if not ok or self.pluginObj.getStatus() != pluginhandler.PLUGIN_INSTALLED: FailureUI(self, self.__tr("Failed to install Plug-in.\nEither you have chosen to skip the Plug-in installation or entered incorrect Password.")) finally: endWaitCursor() self.result = True self.close() def __tr(self,s,c = None): return qApp.translate("PluginDialog",s,c)
gpl-2.0
7,540,046,640,401,814,000
28.570093
161
0.677623
false
3.853837
false
false
false
gradiuscypher/internet_illithid
mirror_shield/endpoints/filestore.py
1
2550
import traceback import requests import time import imghdr from os.path import exists, isfile, join, isdir from os import makedirs, listdir, walk from flask import Blueprint, request, send_from_directory, render_template filestore = Blueprint('callback', __name__) @filestore.route('/clone', methods=["POST"]) def clone(): try: # Grab the JSON content in post content = request.get_json() url = content['url'] url_filename = url.split("/")[-1] sender = content['sender'] source = content['source'] timestamp = int(time.time()) filename = "files/{}/{}/{}-{}".format(source, sender, timestamp, url_filename) # Check if the user's folder exists if not exists("files/{}/{}".format(source, sender)): makedirs("files/{}/{}".format(source, sender)) # Download the file and save to the user's directory r = requests.get(url, stream=True) if r.status_code == 200: with open(filename, 'wb') as f: for chunk in r: f.write(chunk) return filename, 200 except: print(traceback.format_exc()) return "Fail", 500 @filestore.route('/files/<path:path>', methods=["GET"]) def files(path): return send_from_directory('files', path) @filestore.route('/src/<path:path>', methods=["GET"]) def src(path): print("PATH IS {}".format(path)) return send_from_directory('src', path) @filestore.route('/', methods=["GET"]) def services(): services = [] filepath = "files/" for f in listdir(filepath): if isdir(join(filepath, f)): services.append(f) return render_template('servicelist.html', services=services) @filestore.route('/<service>/userlist', methods=["GET"]) def userlist(service): users = [] filepath = "files/{}".format(service) for f in listdir(filepath): if isdir(join(filepath, f)): users.append(f) return render_template('userlist.html', users=users, service=service) @filestore.route('/<service>/gallery/<user>', methods=["GET"]) def gallery(user, service): filepath = "files/{}/{}".format(service, user) images = [] other = [] for f in listdir(filepath): if isfile(join(filepath, f)): if imghdr.what(join(filepath, f)) is not None: images.append(f) else: other.append(f) return render_template('gallery.html', title="Gallery", images=images, filepath=filepath, otherfiles=other)
mit
-8,529,620,719,959,950,000
27.977273
111
0.606667
false
3.923077
false
false
false
chetan51/nupic.research
sensorimotor/tests/unit/one_d_universe_test.py
1
2335
#!/usr/bin/env python # ---------------------------------------------------------------------- # Numenta Platform for Intelligent Computing (NuPIC) # Copyright (C) 2014, Numenta, Inc. Unless you have an agreement # with Numenta, Inc., for a separate license for this software code, the # following terms and conditions apply: # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License version 3 as # published by the Free Software Foundation. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. # See the GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see http://www.gnu.org/licenses. # # http://numenta.org/licenses/ # ---------------------------------------------------------------------- import unittest2 as unittest from sensorimotor.one_d_universe import OneDUniverse class OneDUniverseTest(unittest.TestCase): def testEncodeSensorValue(self): universe = OneDUniverse(debugSensor=True, nSensor=105, wSensor=5, nMotor=105, wMotor=5) self.assertEqual(universe.encodeSensorValue(0), set(xrange(0, 5))) self.assertEqual(universe.encodeSensorValue(19), set(xrange(95, 100))) self.assertEqual(universe.encodeSensorValue(20), set(xrange(100, 105))) universe = OneDUniverse(debugSensor=False, nSensor=105, wSensor=5, nMotor=105, wMotor=5) self.assertNotEqual(universe.encodeSensorValue(0), set(xrange(0, 5))) def testEncodeMotorValue(self): universe = OneDUniverse(debugMotor=True, nSensor=105, wSensor=5, nMotor=48*21, wMotor=48) self.assertEqual(universe.encodeMotorValue(-10), set(xrange(0, 48))) self.assertEqual(universe.encodeMotorValue(0), set(xrange(480, 528))) self.assertEqual(universe.encodeMotorValue(10), set(xrange(960, 1008))) universe = OneDUniverse(debugMotor=False, nSensor=105, wSensor=5, nMotor=48*21, wMotor=48) self.assertNotEqual(universe.encodeMotorValue(-10), set(xrange(0, 48))) if __name__ == "__main__": unittest.main()
gpl-3.0
1,721,112,332,134,884,000
39.258621
76
0.668094
false
3.898164
true
false
false
ucfopen/canvasapi
canvasapi/course.py
1
94642
import warnings from canvasapi.assignment import Assignment, AssignmentGroup from canvasapi.blueprint import BlueprintSubscription from canvasapi.canvas_object import CanvasObject from canvasapi.collaboration import Collaboration from canvasapi.course_epub_export import CourseEpubExport from canvasapi.custom_gradebook_columns import CustomGradebookColumn from canvasapi.discussion_topic import DiscussionTopic from canvasapi.exceptions import RequiredFieldMissing from canvasapi.feature import Feature, FeatureFlag from canvasapi.folder import Folder from canvasapi.gradebook_history import ( Day, Grader, SubmissionHistory, SubmissionVersion, ) from canvasapi.grading_period import GradingPeriod from canvasapi.grading_standard import GradingStandard from canvasapi.license import License from canvasapi.outcome_import import OutcomeImport from canvasapi.page import Page from canvasapi.paginated_list import PaginatedList from canvasapi.progress import Progress from canvasapi.quiz import QuizExtension from canvasapi.rubric import Rubric, RubricAssociation from canvasapi.submission import GroupedSubmission, Submission from canvasapi.tab import Tab from canvasapi.todo import Todo from canvasapi.upload import FileOrPathLike, Uploader from canvasapi.usage_rights import UsageRights from canvasapi.util import ( combine_kwargs, file_or_path, is_multivalued, normalize_bool, obj_or_id, obj_or_str, ) class Course(CanvasObject): def __str__(self): return "{} {} ({})".format(self.course_code, self.name, self.id) def add_grading_standards(self, title, grading_scheme_entry, **kwargs): """ Create a new grading standard for the course. :calls: `POST /api/v1/courses/:course_id/grading_standards \ <https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.create>`_ :param title: The title for the Grading Standard :type title: str :param grading_scheme: A list of dictionaries containing keys for "name" and "value" :type grading_scheme: list of dict :rtype: :class:`canvasapi.grading_standards.GradingStandard` """ if not isinstance(grading_scheme_entry, list) or len(grading_scheme_entry) <= 0: raise ValueError("Param `grading_scheme_entry` must be a non-empty list.") for entry in grading_scheme_entry: if not isinstance(entry, dict): raise ValueError("grading_scheme_entry must consist of dictionaries.") if "name" not in entry or "value" not in entry: raise ValueError( "Dictionaries with keys 'name' and 'value' are required." ) kwargs["grading_scheme_entry"] = grading_scheme_entry response = self._requester.request( "POST", "courses/%s/grading_standards" % (self.id), title=title, _kwargs=combine_kwargs(**kwargs), ) return GradingStandard(self._requester, response.json()) def column_data_bulk_update(self, column_data, **kwargs): """ Set the content of custom columns. :calls: `PUT /api/v1/courses/:course_id/custom_gradebook_column_data \ <https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_column_data_api.bulk_update>`_ :param column_data: Content to put into the column :type column_data: list :rtype: :class:`canvasapi.progress.Progress` """ kwargs["column_data"] = column_data response = self._requester.request( "PUT", "courses/{}/custom_gradebook_column_data".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Progress(self._requester, response.json()) def conclude(self, **kwargs): """ Mark this course as concluded. :calls: `DELETE /api/v1/courses/:id \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.destroy>`_ :returns: True if the course was concluded, False otherwise. :rtype: bool """ kwargs["event"] = "conclude" response = self._requester.request( "DELETE", "courses/{}".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("conclude") def create_assignment(self, assignment, **kwargs): """ Create a new assignment for this course. Note: The assignment is created in the active state. :calls: `POST /api/v1/courses/:course_id/assignments \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.create>`_ :param assignment: The attributes of the assignment :type assignment: dict :rtype: :class:`canvasapi.assignment.Assignment` """ from canvasapi.assignment import Assignment if isinstance(assignment, dict) and "name" in assignment: kwargs["assignment"] = assignment else: raise RequiredFieldMissing("Dictionary with key 'name' is required.") response = self._requester.request( "POST", "courses/{}/assignments".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Assignment(self._requester, response.json()) def create_assignment_group(self, **kwargs): """ Create a new assignment group for this course. :calls: `POST /api/v1/courses/:course_id/assignment_groups \ <https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.create>`_ :rtype: :class:`canvasapi.assignment.AssignmentGroup` """ from canvasapi.assignment import AssignmentGroup response = self._requester.request( "POST", "courses/{}/assignment_groups".format(self.id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return AssignmentGroup(self._requester, response_json) def create_assignment_overrides(self, assignment_overrides, **kwargs): """ Create the specified overrides for each assignment. :calls: `POST /api/v1/courses/:course_id/assignments/overrides \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_create>`_ :param assignment_overrides: Attributes for the new assignment overrides. :type assignment_overrides: list :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.AssignmentOverride` """ from canvasapi.assignment import AssignmentOverride kwargs["assignment_overrides"] = assignment_overrides return PaginatedList( AssignmentOverride, self._requester, "POST", "courses/{}/assignments/overrides".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def create_content_migration(self, migration_type, **kwargs): """ Create a content migration. :calls: `POST /api/v1/courses/:course_id/content_migrations \ <https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.create>`_ :param migration_type: The migrator type to use in this migration :type migration_type: str or :class:`canvasapi.content_migration.Migrator` :rtype: :class:`canvasapi.content_migration.ContentMigration` """ from canvasapi.content_migration import ContentMigration, Migrator if isinstance(migration_type, Migrator): kwargs["migration_type"] = migration_type.type elif isinstance(migration_type, str): kwargs["migration_type"] = migration_type else: raise TypeError("Parameter migration_type must be of type Migrator or str") response = self._requester.request( "POST", "courses/{}/content_migrations".format(self.id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return ContentMigration(self._requester, response_json) def create_course_section(self, **kwargs): """ Create a new section for this course. :calls: `POST /api/v1/courses/:course_id/sections \ <https://canvas.instructure.com/doc/api/sections.html#method.sections.create>`_ :rtype: :class:`canvasapi.course.Section` """ from canvasapi.section import Section response = self._requester.request( "POST", "courses/{}/sections".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Section(self._requester, response.json()) def create_custom_column(self, column, **kwargs): """ Create a custom gradebook column. :calls: `POST /api/v1/courses/:course_id/custom_gradebook_columns \ <https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_columns_api.create>`_ :param column: A dictionary representing the Custom Gradebook Column to create :type column: dict :rtype: :class:`canvasapi.custom_gradebook_columns.CustomGradebookColumn` """ if isinstance(column, dict) and "title" in column: kwargs["column"] = column else: raise RequiredFieldMissing("Dictionary with key 'title' is required.") response = self._requester.request( "POST", "courses/{}/custom_gradebook_columns".format(self.id), _kwargs=combine_kwargs(**kwargs), ) column_json = response.json() column_json.update({"course_id": self.id}) return CustomGradebookColumn(self._requester, column_json) def create_discussion_topic(self, **kwargs): """ Creates a new discussion topic for the course or group. :calls: `POST /api/v1/courses/:course_id/discussion_topics \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.create>`_ :rtype: :class:`canvasapi.discussion_topic.DiscussionTopic` """ response = self._requester.request( "POST", "courses/{}/discussion_topics".format(self.id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return DiscussionTopic(self._requester, response_json) def create_epub_export(self, **kwargs): """ Create an ePub export for a course. :calls: `POST /api/v1/courses/:course_id/epub_exports/:id\ <https://canvas.instructure.com/doc/api/e_pub_exports.html#method.epub_exports.create>`_ :rtype: :class:`canvasapi.course_epub_export.CourseEpubExport` """ response = self._requester.request( "POST", "courses/{}/epub_exports/".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return CourseEpubExport(self._requester, response.json()) def create_external_feed(self, url, **kwargs): """ Create a new external feed for the course. :calls: `POST /api/v1/courses/:course_id/external_feeds \ <https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.create>`_ :param url: The url of the external rss or atom feed :type url: str :rtype: :class:`canvasapi.external_feed.ExternalFeed` """ from canvasapi.external_feed import ExternalFeed response = self._requester.request( "POST", "courses/{}/external_feeds".format(self.id), url=url, _kwargs=combine_kwargs(**kwargs), ) return ExternalFeed(self._requester, response.json()) def create_external_tool(self, **kwargs): """ Create an external tool in the current course. :calls: `POST /api/v1/courses/:course_id/external_tools \ <https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.create>`_ :param name: The name of the tool :type name: str :rtype: :class:`canvasapi.external_tool.ExternalTool` """ from canvasapi.external_tool import ExternalTool required_params = ("name", "privacy_level", "consumer_key", "shared_secret") if "client_id" not in kwargs and not all(x in kwargs for x in required_params): raise RequiredFieldMissing( "Must pass either `client_id` parameter or " "`name`, `privacy_level`, `consumer_key`, and `shared_secret` parameters." ) response = self._requester.request( "POST", "courses/{}/external_tools".format(self.id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return ExternalTool(self._requester, response_json) def create_folder(self, name, **kwargs): """ Creates a folder in this course. :calls: `POST /api/v1/courses/:course_id/folders \ <https://canvas.instructure.com/doc/api/files.html#method.folders.create>`_ :param name: The name of the folder. :type name: str :rtype: :class:`canvasapi.folder.Folder` """ response = self._requester.request( "POST", "courses/{}/folders".format(self.id), name=name, _kwargs=combine_kwargs(**kwargs), ) return Folder(self._requester, response.json()) def create_group_category(self, name, **kwargs): """ Create a group category. :calls: `POST /api/v1/courses/:course_id/group_categories \ <https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.create>`_ :param name: Name of the category. :type name: str :rtype: :class:`canvasapi.group.GroupCategory` """ from canvasapi.group import GroupCategory response = self._requester.request( "POST", "courses/{}/group_categories".format(self.id), name=name, _kwargs=combine_kwargs(**kwargs), ) return GroupCategory(self._requester, response.json()) def create_late_policy(self, **kwargs): """ Create a late policy. If the course already has a late policy, a bad_request is returned since there can only be one late policy per course. :calls: `POST /api/v1/courses/:id/late_policy \ <https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.create>`_ :rtype: :class:`canvasapi.course.LatePolicy` """ response = self._requester.request( "POST", "courses/{}/late_policy".format(self.id), _kwargs=combine_kwargs(**kwargs), ) late_policy_json = response.json() return LatePolicy(self._requester, late_policy_json["late_policy"]) def create_module(self, module, **kwargs): """ Create a new module. :calls: `POST /api/v1/courses/:course_id/modules \ <https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.create>`_ :param module: The attributes for the module. :type module: dict :returns: The created module. :rtype: :class:`canvasapi.module.Module` """ from canvasapi.module import Module if isinstance(module, dict) and "name" in module: kwargs["module"] = module else: raise RequiredFieldMissing("Dictionary with key 'name' is required.") response = self._requester.request( "POST", "courses/{}/modules".format(self.id), _kwargs=combine_kwargs(**kwargs), ) module_json = response.json() module_json.update({"course_id": self.id}) return Module(self._requester, module_json) def create_page(self, wiki_page, **kwargs): """ Create a new wiki page. :calls: `POST /api/v1/courses/:course_id/pages \ <https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.create>`_ :param wiki_page: The title for the page. :type wiki_page: dict :returns: The created page. :rtype: :class:`canvasapi.page.Page` """ if isinstance(wiki_page, dict) and "title" in wiki_page: kwargs["wiki_page"] = wiki_page else: raise RequiredFieldMissing("Dictionary with key 'title' is required.") response = self._requester.request( "POST", "courses/{}/pages".format(self.id), _kwargs=combine_kwargs(**kwargs) ) page_json = response.json() page_json.update({"course_id": self.id}) return Page(self._requester, page_json) def create_quiz(self, quiz, **kwargs): """ Create a new quiz in this course. :calls: `POST /api/v1/courses/:course_id/quizzes \ <https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.create>`_ :param quiz: The attributes for the quiz. :type quiz: dict :rtype: :class:`canvasapi.quiz.Quiz` """ from canvasapi.quiz import Quiz if isinstance(quiz, dict) and "title" in quiz: kwargs["quiz"] = quiz else: raise RequiredFieldMissing("Dictionary with key 'title' is required.") response = self._requester.request( "POST", "courses/{}/quizzes".format(self.id), _kwargs=combine_kwargs(**kwargs), ) quiz_json = response.json() quiz_json.update({"course_id": self.id}) return Quiz(self._requester, quiz_json) def create_rubric(self, **kwargs): """ Create a new rubric. :calls: `POST /api/v1/courses/:course_id/rubrics \ <https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics.create>`_ :returns: Returns a dictionary with rubric and rubric association. :rtype: `dict` """ response = self._requester.request( "POST", "courses/{}/rubrics".format(self.id), _kwargs=combine_kwargs(**kwargs), ) dictionary = response.json() rubric_dict = {} if "rubric" in dictionary: r_dict = dictionary["rubric"] rubric = Rubric(self._requester, r_dict) rubric_dict = {"rubric": rubric} if "rubric_association" in dictionary: ra_dict = dictionary["rubric_association"] rubric_association = RubricAssociation(self._requester, ra_dict) rubric_dict.update({"rubric_association": rubric_association}) return rubric_dict def create_rubric_association(self, **kwargs): """ Create a new RubricAssociation. :calls: `POST /api/v1/courses/:course_id/rubric_associations \ <https://canvas.instructure.com/doc/api/rubrics.html#method.rubric_associations.create>`_ :returns: Returns a RubricAssociation. :rtype: :class:`canvasapi.rubric.RubricAssociation` """ from canvasapi.rubric import RubricAssociation response = self._requester.request( "POST", "courses/{}/rubric_associations".format(self.id), _kwargs=combine_kwargs(**kwargs), ) quiz_json = response.json() quiz_json.update({"course_id": self.id}) return RubricAssociation(self._requester, quiz_json) def delete(self, **kwargs): """ Permanently delete this course. :calls: `DELETE /api/v1/courses/:id \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.destroy>`_ :returns: True if the course was deleted, False otherwise. :rtype: bool """ kwargs["event"] = "delete" response = self._requester.request( "DELETE", "courses/{}".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("delete") def delete_external_feed(self, feed, **kwargs): """ Deletes the external feed. :calls: `DELETE /api/v1/courses/:course_id/external_feeds/:external_feed_id \ <https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.destroy>`_ :param feed: The object or ID of the feed to be deleted. :type feed: :class:`canvasapi.external_feed.ExternalFeed` or int :rtype: :class:`canvasapi.external_feed.ExternalFeed` """ from canvasapi.external_feed import ExternalFeed feed_id = obj_or_id(feed, "feed", (ExternalFeed,)) response = self._requester.request( "DELETE", "courses/{}/external_feeds/{}".format(self.id, feed_id), _kwargs=combine_kwargs(**kwargs), ) return ExternalFeed(self._requester, response.json()) def edit_front_page(self, **kwargs): """ Update the title or contents of the front page. :calls: `PUT /api/v1/courses/:course_id/front_page \ <https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.update_front_page>`_ :rtype: :class:`canvasapi.course.Course` """ response = self._requester.request( "PUT", "courses/{}/front_page".format(self.id), _kwargs=combine_kwargs(**kwargs), ) page_json = response.json() page_json.update({"course_id": self.id}) return Page(self._requester, page_json) def edit_late_policy(self, **kwargs): """ Patch a late policy. No body is returned upon success. :calls: `PATCH /api/v1/courses/:id/late_policy \ <https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.update>`_ :returns: True if Late Policy was updated successfully. False otherwise. :rtype: bool """ response = self._requester.request( "PATCH", "courses/{}/late_policy".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.status_code == 204 def enroll_user(self, user, enrollment_type=None, **kwargs): """ Create a new user enrollment for a course or a section. :calls: `POST /api/v1/courses/:course_id/enrollments \ <https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.create>`_ :param user: The object or ID of the user to enroll in this course. :type user: :class:`canvasapi.user.User` or int :param enrollment_type: The type of enrollment. :type enrollment_type: str, optional :rtype: :class:`canvasapi.enrollment.Enrollment` """ from canvasapi.enrollment import Enrollment from canvasapi.user import User kwargs["enrollment[user_id]"] = obj_or_id(user, "user", (User,)) if enrollment_type: warnings.warn( ( "The `enrollment_type` argument is deprecated and will be " "removed in a future version.\n" "Use `enrollment[type]` as a keyword argument instead. " "e.g. `enroll_user(enrollment={'type': 'StudentEnrollment'})`" ), DeprecationWarning, ) kwargs["enrollment[type]"] = enrollment_type response = self._requester.request( "POST", "courses/{}/enrollments".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Enrollment(self._requester, response.json()) def export_content(self, export_type, **kwargs): """ Begin a content export job for a course. :calls: `POST /api/v1/courses/:course_id/content_exports\ <https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.create>`_ :param export_type: The type of content to export. :type export_type: str :rtype: :class:`canvasapi.content_export.ContentExport` """ from canvasapi.content_export import ContentExport kwargs["export_type"] = export_type response = self._requester.request( "POST", "courses/{}/content_exports".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return ContentExport(self._requester, response.json()) def get_all_outcome_links_in_context(self, **kwargs): """ Get all outcome links for context - BETA :calls: `GET /api/v1/courses/:course_id/outcome_group_links \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.link_index>`_ :returns: Paginated List of OutcomesLinks in the context. :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.outcome.OutcomeLink` """ from canvasapi.outcome import OutcomeLink return PaginatedList( OutcomeLink, self._requester, "GET", "courses/{}/outcome_group_links".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_assignment(self, assignment, **kwargs): """ Return the assignment with the given ID. :calls: `GET /api/v1/courses/:course_id/assignments/:id \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.show>`_ :param assignment: The object or ID of the assignment to retrieve. :type assignment: :class:`canvasapi.assignment.Assignment` or int :rtype: :class:`canvasapi.assignment.Assignment` """ from canvasapi.assignment import Assignment assignment_id = obj_or_id(assignment, "assignment", (Assignment,)) response = self._requester.request( "GET", "courses/{}/assignments/{}".format(self.id, assignment_id), _kwargs=combine_kwargs(**kwargs), ) return Assignment(self._requester, response.json()) def get_assignment_group(self, assignment_group, **kwargs): """ Retrieve specified assignment group for the specified course. :calls: `GET /api/v1/courses/:course_id/assignment_groups/:assignment_group_id \ <https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups_api.show>`_ :param assignment_group: object or ID of assignment group. :type assignment_group: :class:`canvasapi.assignment.AssignmentGroup` or int :rtype: :class:`canvasapi.assignment.AssignmentGroup` """ from canvasapi.assignment import AssignmentGroup assignment_group_id = obj_or_id( assignment_group, "assignment_group", (AssignmentGroup,) ) response = self._requester.request( "GET", "courses/{}/assignment_groups/{}".format(self.id, assignment_group_id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return AssignmentGroup(self._requester, response_json) def get_assignment_groups(self, **kwargs): """ List assignment groups for the specified course. :calls: `GET /api/v1/courses/:course_id/assignment_groups \ <https://canvas.instructure.com/doc/api/assignment_groups.html#method.assignment_groups.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.AssignmentGroup` """ from canvasapi.assignment import AssignmentGroup return PaginatedList( AssignmentGroup, self._requester, "GET", "courses/{}/assignment_groups".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_assignment_overrides(self, assignment_overrides, **kwargs): """ List the specified overrides in this course, providing they target sections/groups/students visible to the current user. :calls: `GET /api/v1/courses/:course_id/assignments/overrides \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_retrieve>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.AssignmentOverride` """ from canvasapi.assignment import AssignmentOverride kwargs["assignment_overrides"] = assignment_overrides return PaginatedList( AssignmentOverride, self._requester, "GET", "courses/{}/assignments/overrides".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_assignments(self, **kwargs): """ List all of the assignments in this course. :calls: `GET /api/v1/courses/:course_id/assignments \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.Assignment` """ from canvasapi.assignment import Assignment return PaginatedList( Assignment, self._requester, "GET", "courses/{}/assignments".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_assignments_for_group(self, assignment_group, **kwargs): """ Returns a paginated list of assignments for the given assignment group :calls: `GET /api/v1/courses/:course_id/assignment_groups/:assignment_group_id/assignments\ <https://canvas.instructure.com/doc/api/assignments.html#method.assignments_api.index>`_ :param assignment_group: The object or id of the assignment group :type assignment_group: :class: `canvasapi.assignment.AssignmentGroup` or int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.Assignment` """ assignment_group_id = obj_or_id( assignment_group, "assignment_group", (AssignmentGroup,) ) return PaginatedList( Assignment, self._requester, "GET", "courses/{}/assignment_groups/{}/assignments".format( self.id, assignment_group_id ), _kwargs=combine_kwargs(**kwargs), ) def get_blueprint(self, template="default", **kwargs): """ Return the blueprint of a given ID. :calls: `GET /api/v1/courses/:course_id/blueprint_templates/:template_id \ <https://canvas.instructure.com/doc/api/blueprint_courses.html#method.master_courses/master_templates.show>`_ :param template: The object or ID of the blueprint template to get. :type template: int or :class:`canvasapi.blueprint.BlueprintTemplate` :rtype: :class:`canvasapi.blueprint.BlueprintTemplate` """ from canvasapi.blueprint import BlueprintTemplate if template == "default": template_id = template else: template_id = obj_or_id(template, "template", (BlueprintTemplate,)) response = self._requester.request( "GET", "courses/{}/blueprint_templates/{}".format(self.id, template_id), _kwargs=combine_kwargs(**kwargs), ) return BlueprintTemplate(self._requester, response.json()) def get_collaborations(self, **kwargs): """ Return a list of collaborations for a given course ID. :calls: `GET /api/v1/courses/:course_id/collaborations \ <https://canvas.instructure.com/doc/api/collaborations.html#method.collaborations.api_index>`_ :rtype: :class:`canvasapi.collaboration.Collaboration` """ return PaginatedList( Collaboration, self._requester, "GET", "courses/{}/collaborations".format(self.id), _root="collaborations", kwargs=combine_kwargs(**kwargs), ) def get_content_export(self, content_export, **kwargs): """ Return information about a single content export. :calls: `GET /api/v1/courses/:course_id/content_exports/:id\ <https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.show>`_ :param content_export: The object or ID of the content export to show. :type content_export: int or :class:`canvasapi.content_export.ContentExport` :rtype: :class:`canvasapi.content_export.ContentExport` """ from canvasapi.content_export import ContentExport export_id = obj_or_id(content_export, "content_export", (ContentExport,)) response = self._requester.request( "GET", "courses/{}/content_exports/{}".format(self.id, export_id), _kwargs=combine_kwargs(**kwargs), ) return ContentExport(self._requester, response.json()) def get_content_exports(self, **kwargs): """ Return a paginated list of the past and pending content export jobs for a course. :calls: `GET /api/v1/courses/:course_id/content_exports\ <https://canvas.instructure.com/doc/api/content_exports.html#method.content_exports_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.content_export.ContentExport` """ from canvasapi.content_export import ContentExport return PaginatedList( ContentExport, self._requester, "GET", "courses/{}/content_exports".format(self.id), kwargs=combine_kwargs(**kwargs), ) def get_content_migration(self, content_migration, **kwargs): """ Retrive a content migration by its ID :calls: `GET /api/v1/courses/:course_id/content_migrations/:id \ <https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.show>`_ :param content_migration: The object or ID of the content migration to retrieve. :type content_migration: int, str or :class:`canvasapi.content_migration.ContentMigration` :rtype: :class:`canvasapi.content_migration.ContentMigration` """ from canvasapi.content_migration import ContentMigration migration_id = obj_or_id( content_migration, "content_migration", (ContentMigration,) ) response = self._requester.request( "GET", "courses/{}/content_migrations/{}".format(self.id, migration_id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return ContentMigration(self._requester, response_json) def get_content_migrations(self, **kwargs): """ List content migrations that the current account can view or manage. :calls: `GET /api/v1/courses/:course_id/content_migrations/ \ <https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.content_migration.ContentMigration` """ from canvasapi.content_migration import ContentMigration return PaginatedList( ContentMigration, self._requester, "GET", "courses/{}/content_migrations".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_course_level_assignment_data(self, **kwargs): """ Return a list of assignments for the course sorted by due date :calls: `GET /api/v1/courses/:course_id/analytics/assignments \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_assignments>`_ :rtype: dict """ response = self._requester.request( "GET", "courses/{}/analytics/assignments".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_course_level_participation_data(self, **kwargs): """ Return page view hits and participation numbers grouped by day through the course's history :calls: `GET /api/v1/courses/:course_id/analytics/activity \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_participation>`_ :rtype: dict """ response = self._requester.request( "GET", "courses/{}/analytics/activity".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_course_level_student_summary_data(self, **kwargs): """ Return a summary of per-user access information for all students in a course :calls: `GET /api/v1/courses/:course_id/analytics/student_summaries \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.course_student_summaries>`_ :rtype: dict """ response = self._requester.request( "GET", "courses/{}/analytics/student_summaries".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_custom_columns(self, **kwargs): """ List of all the custom gradebook columns for a course. :calls: `GET /api/v1/courses/:course_id/custom_gradebook_columns \ <https://canvas.instructure.com/doc/api/custom_gradebook_columns.html#method.custom_gradebook_columns_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.custom_gradebook_columns.CustomGradebookColumn` """ return PaginatedList( CustomGradebookColumn, self._requester, "GET", "courses/{}/custom_gradebook_columns".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_discussion_topic(self, topic, **kwargs): """ Return data on an individual discussion topic. :calls: `GET /api/v1/courses/:course_id/discussion_topics/:topic_id \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.show>`_ :param topic: The object or ID of the discussion topic. :type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int :rtype: :class:`canvasapi.discussion_topic.DiscussionTopic` """ topic_id = obj_or_id(topic, "topic", (DiscussionTopic,)) response = self._requester.request( "GET", "courses/{}/discussion_topics/{}".format(self.id, topic_id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return DiscussionTopic(self._requester, response_json) def get_discussion_topics(self, **kwargs): """ Returns the paginated list of discussion topics for this course or group. :calls: `GET /api/v1/courses/:course_id/discussion_topics \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.discussion_topic.DiscussionTopic` """ return PaginatedList( DiscussionTopic, self._requester, "GET", "courses/{}/discussion_topics".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_enabled_features(self, **kwargs): """ Lists all enabled features in a course. :calls: `GET /api/v1/courses/:course_id/features/enabled \ <https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.enabled_features>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.feature.Feature` """ return PaginatedList( Feature, self._requester, "GET", "courses/{}/features/enabled".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_enrollments(self, **kwargs): """ List all of the enrollments in this course. :calls: `GET /api/v1/courses/:course_id/enrollments \ <https://canvas.instructure.com/doc/api/enrollments.html#method.enrollments_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.enrollment.Enrollment` """ from canvasapi.enrollment import Enrollment return PaginatedList( Enrollment, self._requester, "GET", "courses/{}/enrollments".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_epub_export(self, epub, **kwargs): """ Get information about a single epub export. :calls: `GET /api/v1/courses/:course_id/epub_exports/:id\ <https://canvas.instructure.com/doc/api/e_pub_exports.html#method.epub_exports.show>`_ :param epub: Object or ID of ePub Export :type epub: int or :class:`canvasapi.course_epub_export.CourseEpubExport` :rtype: :class:`canvasapi.course_epub_export.CourseEpubExport` """ epub_id = obj_or_id(epub, "epub", (CourseEpubExport,)) response = self._requester.request( "GET", "courses/{}/epub_exports/{}".format(self.id, epub_id), _kwargs=combine_kwargs(**kwargs), ) return CourseEpubExport(self._requester, response.json()) def get_external_feeds(self, **kwargs): """ Returns the list of External Feeds this course. :calls: `GET /api/v1/courses/:course_id/external_feeds \ <https://canvas.instructure.com/doc/api/announcement_external_feeds.html#method.external_feeds.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.external_feed.ExternalFeed` """ from canvasapi.external_feed import ExternalFeed return PaginatedList( ExternalFeed, self._requester, "GET", "courses/{}/external_feeds".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_external_tool(self, tool, **kwargs): """ :calls: `GET /api/v1/courses/:course_id/external_tools/:external_tool_id \ <https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.show>`_ :param tool: The object or ID of the tool to retrieve. :type tool: :class:`canvasapi.external_tool.ExternalTool` or int :rtype: :class:`canvasapi.external_tool.ExternalTool` """ from canvasapi.external_tool import ExternalTool tool_id = obj_or_id(tool, "tool", (ExternalTool,)) response = self._requester.request( "GET", "courses/{}/external_tools/{}".format(self.id, tool_id), _kwargs=combine_kwargs(**kwargs), ) tool_json = response.json() tool_json.update({"course_id": self.id}) return ExternalTool(self._requester, tool_json) def get_external_tools(self, **kwargs): """ :calls: `GET /api/v1/courses/:course_id/external_tools \ <https://canvas.instructure.com/doc/api/external_tools.html#method.external_tools.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.external_tool.ExternalTool` """ from canvasapi.external_tool import ExternalTool return PaginatedList( ExternalTool, self._requester, "GET", "courses/{}/external_tools".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_feature_flag(self, feature, **kwargs): """ Return the feature flag that applies to given course. :calls: `GET /api/v1/courses/:course_id/features/flags/:feature \ <https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.show>`_ :param feature: The feature object or name of the feature to retrieve. :type feature: :class:`canvasapi.feature.Feature` or str :rtype: :class:`canvasapi.feature.FeatureFlag` """ feature_name = obj_or_str(feature, "name", (Feature,)) response = self._requester.request( "GET", "courses/{}/features/flags/{}".format(self.id, feature_name), _kwargs=combine_kwargs(**kwargs), ) return FeatureFlag(self._requester, response.json()) def get_features(self, **kwargs): """ Lists all features of a course. :calls: `GET /api/v1/courses/:course_id/features \ <https://canvas.instructure.com/doc/api/feature_flags.html#method.feature_flags.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.feature.Feature` """ return PaginatedList( Feature, self._requester, "GET", "courses/{}/features".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_file(self, file, **kwargs): """ Return the standard attachment json object for a file. :calls: `GET /api/v1/courses/:course_id/files/:id \ <https://canvas.instructure.com/doc/api/files.html#method.files.api_show>`_ :param file: The object or ID of the file to retrieve. :type file: :class:`canvasapi.file.File` or int :rtype: :class:`canvasapi.file.File` """ from canvasapi.file import File file_id = obj_or_id(file, "file", (File,)) response = self._requester.request( "GET", "courses/{}/files/{}".format(self.id, file_id), _kwargs=combine_kwargs(**kwargs), ) return File(self._requester, response.json()) def get_file_quota(self, **kwargs): """ Returns the total and used storage quota for the course. :calls: `GET /api/v1/courses/:course_id/files/quota \ <https://canvas.instructure.com/doc/api/files.html#method.files.api_quota>`_ :rtype: dict """ response = self._requester.request( "GET", "courses/{}/files/quota".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_files(self, **kwargs): """ Returns the paginated list of files for the course. :calls: `GET /api/v1/courses/:course_id/files \ <https://canvas.instructure.com/doc/api/files.html#method.files.api_index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.file.File` """ from canvasapi.file import File return PaginatedList( File, self._requester, "GET", "courses/{}/files".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_folder(self, folder, **kwargs): """ Returns the details for a course folder :calls: `GET /api/v1/courses/:course_id/folders/:id \ <https://canvas.instructure.com/doc/api/files.html#method.folders.show>`_ :param folder: The object or ID of the folder to retrieve. :type folder: :class:`canvasapi.folder.Folder` or int :rtype: :class:`canvasapi.folder.Folder` """ folder_id = obj_or_id(folder, "folder", (Folder,)) response = self._requester.request( "GET", "courses/{}/folders/{}".format(self.id, folder_id), _kwargs=combine_kwargs(**kwargs), ) return Folder(self._requester, response.json()) def get_folders(self, **kwargs): """ Returns the paginated list of all folders for the given course. This will be returned as a flat list containing all subfolders as well. :calls: `GET /api/v1/courses/:course_id/folders \ <https://canvas.instructure.com/doc/api/files.html#method.folders.list_all_folders>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.folder.Folder` """ return PaginatedList( Folder, self._requester, "GET", "courses/{}/folders".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_full_discussion_topic(self, topic, **kwargs): """ Return a cached structure of the discussion topic. :calls: `GET /api/v1/courses/:course_id/discussion_topics/:topic_id/view \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics_api.view>`_ :param topic: The object or ID of the discussion topic. :type topic: :class:`canvasapi.discussion_topic.DiscussionTopic` or int :rtype: dict """ topic_id = obj_or_id(topic, "topic", (DiscussionTopic,)) response = self._requester.request( "GET", "courses/{}/discussion_topics/{}/view".format(self.id, topic_id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_gradebook_history_dates(self, **kwargs): """ Returns a map of dates to grader/assignment groups :calls: `GET /api/v1/courses/:course_id/gradebook_history/days\ <https://canvas.instructure.com/doc/api/gradebook_history.html#method.gradebook_history_api.days>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.grading_history.Day` """ return PaginatedList( Day, self._requester, "GET", "courses/{}/gradebook_history/days".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_gradebook_history_details(self, date, **kwargs): """ Returns the graders who worked on this day, along with the assignments they worked on. More details can be obtained by selecting a grader and assignment and calling the 'submissions' api endpoint for a given date. :calls: `GET /api/v1/courses/:course_id/gradebook_history/:date\ <https://canvas.instructure.com/doc/api/gradebook_history.html#method.\ gradebook_history_api.day_details>`_ :param date: The date for which you would like to see detailed information. :type date: int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.gradebook_history.Grader` """ return PaginatedList( Grader, self._requester, "GET", "courses/{}/gradebook_history/{}".format(self.id, date), kwargs=combine_kwargs(**kwargs), ) def get_grading_period(self, grading_period, **kwargs): """ Return a single grading period for the associated course and id. :calls: `GET /api/v1/courses/:course_id/grading_periods/:id\ <https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.index>`_ :param grading_period_id: The ID of the rubric. :type grading_period_id: int :rtype: :class:`canvasapi.grading_period.GradingPeriod` """ response = self._requester.request( "GET", "courses/{}/grading_periods/{}".format(self.id, grading_period), _kwargs=combine_kwargs(**kwargs), ) response_grading_period = response.json()["grading_periods"][0] response_grading_period.update({"course_id": self.id}) return GradingPeriod(self._requester, response_grading_period) def get_grading_periods(self, **kwargs): """ Return a list of grading periods for the associated course. :calls: `GET /api/v1/courses/:course_id/grading_periods\ <https://canvas.instructure.com/doc/api/grading_periods.html#method.grading_periods.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.grading_period.GradingPeriod` """ return PaginatedList( GradingPeriod, self._requester, "GET", "courses/{}/grading_periods".format(self.id), {"course_id": self.id}, _root="grading_periods", kwargs=combine_kwargs(**kwargs), ) def get_grading_standards(self, **kwargs): """ Get a PaginatedList of the grading standards available for the course :calls: `GET /api/v1/courses/:course_id/grading_standards \ <https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.context_index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.grading_standards.GradingStandard` """ return PaginatedList( GradingStandard, self._requester, "GET", "courses/%s/grading_standards" % (self.id), _kwargs=combine_kwargs(**kwargs), ) def get_group_categories(self, **kwargs): """ List group categories for a context. :calls: `GET /api/v1/courses/:course_id/group_categories \ <https://canvas.instructure.com/doc/api/group_categories.html#method.group_categories.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.group.GroupCategory` """ from canvasapi.group import GroupCategory return PaginatedList( GroupCategory, self._requester, "GET", "courses/{}/group_categories".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_groups(self, **kwargs): """ Return list of active groups for the specified course. :calls: `GET /api/v1/courses/:course_id/groups \ <https://canvas.instructure.com/doc/api/groups.html#method.groups.context_index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.course.Course` """ from canvasapi.group import Group return PaginatedList( Group, self._requester, "GET", "courses/{}/groups".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_late_policy(self, **kwargs): """ Returns the late policy for a course. :calls: `GET /api/v1/courses/:id/late_policy \ <https://canvas.instructure.com/doc/api/late_policy.html#method.late_policy.show>`_ :rtype: :class:`canvasapi.course.LatePolicy` """ response = self._requester.request( "GET", "courses/{}/late_policy".format(self.id), _kwargs=combine_kwargs(**kwargs), ) late_policy_json = response.json() return LatePolicy(self._requester, late_policy_json["late_policy"]) def get_licenses(self, **kwargs): """ Returns a paginated list of the licenses that can be applied to the files under the course scope :calls: `GET /api/v1/course/:course_id/content_licenses \ <https://canvas.instructure.com/doc/api/files.html#method.usage_rights.licenses>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.license.License` """ return PaginatedList( License, self._requester, "GET", "courses/{}/content_licenses".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_migration_systems(self, **kwargs): """ Return a list of migration systems. :calls: `GET /api/v1/courses/:course_id/content_migrations/migrators \ <https://canvas.instructure.com/doc/api/content_migrations.html#method.content_migrations.available_migrators>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.content_migration.Migrator` """ from canvasapi.content_migration import Migrator return PaginatedList( Migrator, self._requester, "GET", "courses/{}/content_migrations/migrators".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_module(self, module, **kwargs): """ Retrieve a single module by ID. :calls: `GET /api/v1/courses/:course_id/modules/:id \ <https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.show>`_ :param module: The object or ID of the module to retrieve. :type module: :class:`canvasapi.module.Module` or int :rtype: :class:`canvasapi.module.Module` """ from canvasapi.module import Module module_id = obj_or_id(module, "module", (Module,)) response = self._requester.request( "GET", "courses/{}/modules/{}".format(self.id, module_id) ) module_json = response.json() module_json.update({"course_id": self.id}) return Module(self._requester, module_json) def get_modules(self, **kwargs): """ Return a list of modules in this course. :calls: `GET /api/v1/courses/:course_id/modules \ <https://canvas.instructure.com/doc/api/modules.html#method.context_modules_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.module.Module` """ from canvasapi.module import Module return PaginatedList( Module, self._requester, "GET", "courses/{}/modules".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_multiple_submissions(self, **kwargs): """ List submissions for multiple assignments. Get all existing submissions for a given set of students and assignments. :calls: `GET /api/v1/courses/:course_id/students/submissions \ <https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.for_students>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.submission.Submission` """ is_grouped = kwargs.get("grouped", False) if normalize_bool(is_grouped, "grouped"): cls = GroupedSubmission else: cls = Submission return PaginatedList( cls, self._requester, "GET", "courses/{}/students/submissions".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_outcome_group(self, group, **kwargs): """ Returns the details of the Outcome Group with the given id. :calls: `GET /api/v1/courses/:course_id/outcome_groups/:id \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.show>`_ :param group: The outcome group object or ID to return. :type group: :class:`canvasapi.outcome.OutcomeGroup` or int :returns: An outcome group object. :rtype: :class:`canvasapi.outcome.OutcomeGroup` """ from canvasapi.outcome import OutcomeGroup outcome_group_id = obj_or_id(group, "group", (OutcomeGroup,)) response = self._requester.request( "GET", "courses/{}/outcome_groups/{}".format(self.id, outcome_group_id) ) return OutcomeGroup(self._requester, response.json()) def get_outcome_groups_in_context(self, **kwargs): """ Get all outcome groups for context - BETA :calls: `GET /api/v1/courses/:course_id/outcome_groups \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.index>`_ :returns: Paginated List of OutcomesGroups in the context. :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.outcome.OutcomeGroups` """ from canvasapi.outcome import OutcomeGroup return PaginatedList( OutcomeGroup, self._requester, "GET", "courses/{}/outcome_groups".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_outcome_import_status(self, outcome_import, **kwargs): """ Get the status of an already created Outcome import. Pass 'latest' for the outcome import id for the latest import. :calls: `GET /api/v1/courses/:course_id/outcome_imports/:id \ <https://canvas.instructure.com/doc/api/outcome_imports.html#method.outcome_imports_api.show>`_ :param outcome_import: The outcome import object or ID to get the status of. :type outcome_import: :class:`canvasapi.outcome_import.OutcomeImport`, int, or string: "latest" :rtype: :class:`canvasapi.outcome_import.OutcomeImport` """ if outcome_import == "latest": outcome_import_id = "latest" else: outcome_import_id = obj_or_id( outcome_import, "outcome_import", (OutcomeImport,) ) response = self._requester.request( "GET", "courses/{}/outcome_imports/{}".format(self.id, outcome_import_id), _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return OutcomeImport(self._requester, response_json) def get_outcome_result_rollups(self, **kwargs): """ Get all outcome result rollups for context - BETA :calls: `GET /api/v1/courses/:course_id/outcome_rollups \ <https://canvas.instructure.com/doc/api/outcome_results.html#method.outcome_results.rollups>`_ :returns: List of outcome result rollups in the context. :rtype: dict """ response = self._requester.request( "GET", "courses/{}/outcome_rollups".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_outcome_results(self, **kwargs): """ Get all outcome results for context - BETA :calls: `GET /api/v1/courses/:course_id/outcome_results \ <https://canvas.instructure.com/doc/api/outcome_results.html#method.outcome_results.index>`_ :returns: List of potential related outcome result dicts. :rtype: dict """ response = self._requester.request( "GET", "courses/{}/outcome_results".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_page(self, url, **kwargs): """ Retrieve the contents of a wiki page. :calls: `GET /api/v1/courses/:course_id/pages/:url \ <https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show>`_ :param url: The url for the page. :type url: str :returns: The specified page. :rtype: :class:`canvasapi.page.Page` """ response = self._requester.request( "GET", "courses/{}/pages/{}".format(self.id, url), _kwargs=combine_kwargs(**kwargs), ) page_json = response.json() page_json.update({"course_id": self.id}) return Page(self._requester, page_json) def get_pages(self, **kwargs): """ List the wiki pages associated with a course. :calls: `GET /api/v1/courses/:course_id/pages \ <https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.page.Page` """ return PaginatedList( Page, self._requester, "GET", "courses/{}/pages".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_quiz(self, quiz, **kwargs): """ Return the quiz with the given id. :calls: `GET /api/v1/courses/:course_id/quizzes/:id \ <https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.show>`_ :param quiz: The object or ID of the quiz to retrieve. :type quiz: :class:`canvasapi.quiz.Quiz` or int :rtype: :class:`canvasapi.quiz.Quiz` """ from canvasapi.quiz import Quiz quiz_id = obj_or_id(quiz, "quiz", (Quiz,)) response = self._requester.request( "GET", "courses/{}/quizzes/{}".format(self.id, quiz_id), _kwargs=combine_kwargs(**kwargs), ) quiz_json = response.json() quiz_json.update({"course_id": self.id}) return Quiz(self._requester, quiz_json) def get_quiz_overrides(self, **kwargs): """ Retrieve the actual due-at, unlock-at, and available-at dates for quizzes based on the assignment overrides active for the current API user. :calls: `GET /api/v1/courses/:course_id/quizzes/assignment_overrides \ <https://canvas.instructure.com/doc/api/quiz_assignment_overrides.html#method.quizzes/quiz_assignment_overrides.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.quiz.QuizAssignmentOverrideSet` """ from canvasapi.quiz import QuizAssignmentOverrideSet return PaginatedList( QuizAssignmentOverrideSet, self._requester, "GET", "courses/{}/quizzes/assignment_overrides".format(self.id), _root="quiz_assignment_overrides", _kwargs=combine_kwargs(**kwargs), ) def get_quizzes(self, **kwargs): """ Return a list of quizzes belonging to this course. :calls: `GET /api/v1/courses/:course_id/quizzes \ <https://canvas.instructure.com/doc/api/quizzes.html#method.quizzes/quizzes_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.quiz.Quiz` """ from canvasapi.quiz import Quiz return PaginatedList( Quiz, self._requester, "GET", "courses/{}/quizzes".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_recent_students(self, **kwargs): """ Return a list of students in the course ordered by how recently they have logged in. :calls: `GET /api/v1/courses/:course_id/recent_students \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.recent_students>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User` """ from canvasapi.user import User return PaginatedList( User, self._requester, "GET", "courses/{}/recent_students".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_root_outcome_group(self, **kwargs): """ Redirect to root outcome group for context :calls: `GET /api/v1/courses/:course_id/root_outcome_group \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_ :returns: The OutcomeGroup of the context. :rtype: :class:`canvasapi.outcome.OutcomeGroup` """ from canvasapi.outcome import OutcomeGroup response = self._requester.request( "GET", "courses/{}/root_outcome_group".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return OutcomeGroup(self._requester, response.json()) def get_rubric(self, rubric_id, **kwargs): """ Get a single rubric, based on rubric id. :calls: `GET /api/v1/courses/:course_id/rubrics/:id \ <https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics_api.show>`_ :param rubric_id: The ID of the rubric. :type rubric_id: int :rtype: :class:`canvasapi.rubric.Rubric` """ response = self._requester.request( "GET", "courses/%s/rubrics/%s" % (self.id, rubric_id), _kwargs=combine_kwargs(**kwargs), ) return Rubric(self._requester, response.json()) def get_rubrics(self, **kwargs): """ Get the paginated list of active rubrics for the current course. :calls: `GET /api/v1/courses/:course_id/rubrics \ <https://canvas.instructure.com/doc/api/rubrics.html#method.rubrics_api.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.rubric.Rubric` """ return PaginatedList( Rubric, self._requester, "GET", "courses/%s/rubrics" % (self.id), _kwargs=combine_kwargs(**kwargs), ) def get_section(self, section, **kwargs): """ Retrieve a section. :calls: `GET /api/v1/courses/:course_id/sections/:id \ <https://canvas.instructure.com/doc/api/sections.html#method.sections.show>`_ :param section: The object or ID of the section to retrieve. :type section: :class:`canvasapi.section.Section` or int :rtype: :class:`canvasapi.section.Section` """ from canvasapi.section import Section section_id = obj_or_id(section, "section", (Section,)) response = self._requester.request( "GET", "courses/{}/sections/{}".format(self.id, section_id), _kwargs=combine_kwargs(**kwargs), ) return Section(self._requester, response.json()) def get_sections(self, **kwargs): """ List all sections in a course. :calls: `GET /api/v1/courses/:course_id/sections \ <https://canvas.instructure.com/doc/api/sections.html#method.sections.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.section.Section` """ from canvasapi.section import Section return PaginatedList( Section, self._requester, "GET", "courses/{}/sections".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_settings(self, **kwargs): """ Returns this course's settings. :calls: `GET /api/v1/courses/:course_id/settings \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.settings>`_ :rtype: dict """ response = self._requester.request( "GET", "courses/{}/settings".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_single_grading_standard(self, grading_standard_id, **kwargs): """ Get a single grading standard from the course. :calls: `GET /api/v1/courses/:course_id/grading_standards/:grading_standard_id \ <https://canvas.instructure.com/doc/api/grading_standards.html#method.grading_standards_api.context_show>`_ :param grading_standard_id: The grading standard id :type grading_standard_id: int :rtype: :class:`canvasapi.grading_standards.GradingStandard` """ response = self._requester.request( "GET", "courses/%s/grading_standards/%d" % (self.id, grading_standard_id), _kwargs=combine_kwargs(**kwargs), ) return GradingStandard(self._requester, response.json()) def get_submission_history(self, date, grader_id, assignment_id, **kwargs): """ Gives a nested list of submission versions. :calls: `GET /api/v1/courses/:course_id/gradebook_history/:date/graders\ /:grader_id/assignments/:assignment_id/submissions\ <https://canvas.instructure.com/doc/api/gradebook_history.html#method.\ gradebook_history_api.submissions>`_ :param date: The date for which you would like to see submissions :type grader_id: str :param grader_id: The ID of the grader for which you want to see submissions. :type grader_id: int :param assignment_id: The ID of the assignment for which you want to see submissions :type assignment_id: int :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.gradebook_history.SubmissionHistory` """ return PaginatedList( SubmissionHistory, self._requester, "GET", "courses/{}/gradebook_history/{}/graders/{}/assignments/{}/submissions".format( self.id, date, grader_id, assignment_id ), kwargs=combine_kwargs(**kwargs), ) def get_tabs(self, **kwargs): """ List available tabs for a course. Returns a list of navigation tabs available in the current context. :calls: `GET /api/v1/courses/:course_id/tabs \ <https://canvas.instructure.com/doc/api/tabs.html#method.tabs.index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.tab.Tab` """ return PaginatedList( Tab, self._requester, "GET", "courses/{}/tabs".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def get_todo_items(self, **kwargs): """ Returns the current user's course-specific todo items. :calls: `GET /api/v1/courses/:course_id/todo \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.todo_items>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.todo.Todo` """ return PaginatedList( Todo, self._requester, "GET", "courses/{}/todo".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def get_uncollated_submissions(self, **kwargs): """ Gives a paginated, uncollated list of submission versions for all matching submissions in the context. This SubmissionVersion objects will not include the new_grade or previous_grade keys, only the grade; same for graded_at and grader. :calls: `GET /api/v1/courses/:course_id/gradebook_history/feed\ <https://canvas.instructure.com/doc/api/gradebook_history.html#method\ .gradebook_history_api.feed>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.gradebook_history.SubmissionVersion` """ return PaginatedList( SubmissionVersion, self._requester, "GET", "courses/{}/gradebook_history/feed".format(self.id), kwargs=combine_kwargs(**kwargs), ) def get_user(self, user, user_id_type=None, **kwargs): """ Retrieve a user by their ID. `user_id_type` denotes which endpoint to try as there are several different ids that can pull the same user record from Canvas. :calls: `GET /api/v1/courses/:course_id/users/:id \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.user>`_ :param user: The object or ID of the user to retrieve. :type user: :class:`canvasapi.user.User` or int :param user_id_type: The type of the ID to search for. :type user_id_type: str :rtype: :class:`canvasapi.user.User` """ from canvasapi.user import User if user_id_type: uri = "courses/{}/users/{}:{}".format(self.id, user_id_type, user) else: user_id = obj_or_id(user, "user", (User,)) uri = "courses/{}/users/{}".format(self.id, user_id) response = self._requester.request("GET", uri, _kwargs=combine_kwargs(**kwargs)) return User(self._requester, response.json()) def get_user_in_a_course_level_assignment_data(self, user, **kwargs): """ Return a list of assignments for the course sorted by due date :calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/assignments \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_assignments>`_ :param user: The object or ID of the related user :type user: :class:`canvasapi.user.User` or int :rtype: dict """ from canvasapi.user import User user_id = obj_or_id(user, "user", (User,)) response = self._requester.request( "GET", "courses/{}/analytics/users/{}/assignments".format(self.id, user_id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_user_in_a_course_level_messaging_data(self, user, **kwargs): """ Return messaging hits grouped by day through the entire history of the course :calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/communication \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_messaging>`_ :param user: The object or ID of the related user :type user: :class:`canvasapi.user.User` or int :rtype: dict """ from canvasapi.user import User user_id = obj_or_id(user, "user", (User,)) response = self._requester.request( "GET", "courses/{}/analytics/users/{}/communication".format(self.id, user_id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_user_in_a_course_level_participation_data(self, user, **kwargs): """ Return page view hits grouped by hour and participation details through course's history :calls: `GET /api/v1/courses/:course_id/analytics/users/:student_id/activity \ <https://canvas.instructure.com/doc/api/analytics.html#method.analytics_api.student_in_course_participation>`_ :param user: The object or ID of the related user :type user: :class:`canvasapi.user.User` or int :rtype: dict """ from canvasapi.user import User user_id = obj_or_id(user, "user", (User,)) response = self._requester.request( "GET", "courses/{}/analytics/users/{}/activity".format(self.id, user_id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def get_users(self, **kwargs): """ List all users in a course. :calls: `GET /api/v1/courses/:course_id/search_users \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.users>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.user.User` """ from canvasapi.user import User return PaginatedList( User, self._requester, "GET", "courses/{}/search_users".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def import_outcome(self, attachment, **kwargs): """ Import outcome into canvas. :calls: `POST /api/v1/courses/:course_id/outcome_imports \ <https://canvas.instructure.com/doc/api/outcome_imports.html#method.outcome_imports_api.create>`_ :param attachment: A file handler or path of the file to import. :type attachment: file or str :rtype: :class:`canvasapi.outcome_import.OutcomeImport` """ attachment, is_path = file_or_path(attachment) try: response = self._requester.request( "POST", "courses/{}/outcome_imports".format(self.id), file={"attachment": attachment}, _kwargs=combine_kwargs(**kwargs), ) response_json = response.json() response_json.update({"course_id": self.id}) return OutcomeImport(self._requester, response_json) finally: if is_path: attachment.close() def list_blueprint_subscriptions(self, **kwargs): """ Return a list of blueprint subscriptions for the given course. :calls: `GET /api/v1/courses/:course_id/blueprint_subscriptions\ <https://canvas.instructure.com/doc/api/blueprint_courses.html#method.\ master_courses/master_templates.subscriptions_index>`_ :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.blueprint.BlueprintSubscription` """ return PaginatedList( BlueprintSubscription, self._requester, "GET", "courses/{}/blueprint_subscriptions".format(self.id), {"course_id": self.id}, kwargs=combine_kwargs(**kwargs), ) def preview_html(self, html, **kwargs): """ Preview HTML content processed for this course. :calls: `POST /api/v1/courses/:course_id/preview_html \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.preview_html>`_ :param html: The HTML code to preview. :type html: str :rtype: str """ kwargs["html"] = html response = self._requester.request( "POST", "courses/{}/preview_html".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("html", "") def remove_usage_rights(self, **kwargs): """ Removes the usage rights for specified files that are under the current course scope :calls: `DELETE /api/v1/courses/:course_id/usage_rights \ <https://canvas.instructure.com/doc/api/files.html#method.usage_rights.remove_usage_rights>`_ :rtype: dict """ response = self._requester.request( "DELETE", "courses/{}/usage_rights".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json() def reorder_pinned_topics(self, order, **kwargs): """ Puts the pinned discussion topics in the specified order. All pinned topics should be included. :calls: `POST /api/v1/courses/:course_id/discussion_topics/reorder \ <https://canvas.instructure.com/doc/api/discussion_topics.html#method.discussion_topics.reorder>`_ :param order: The ids of the pinned discussion topics in the desired order. e.g. [104, 102, 103], (104, 102, 103), or "104,102,103" :type order: string or iterable sequence of values :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.discussion_topic.DiscussionTopic` """ # Convert iterable sequence to comma-separated string if is_multivalued(order): order = ",".join([str(topic_id) for topic_id in order]) # Check if is a string with commas if not isinstance(order, str) or "," not in order: raise ValueError("Param `order` must be a list, tuple, or string.") kwargs["order"] = order response = self._requester.request( "POST", "courses/{}/discussion_topics/reorder".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return response.json().get("reorder") def reset(self, **kwargs): """ Delete the current course and create a new equivalent course with no content, but all sections and users moved over. :calls: `POST /api/v1/courses/:course_id/reset_content \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.reset_content>`_ :rtype: :class:`canvasapi.course.Course` """ response = self._requester.request( "POST", "courses/{}/reset_content".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Course(self._requester, response.json()) def resolve_path(self, full_path=None, **kwargs): """ Returns the paginated list of all of the folders in the given path starting at the course root folder. Returns root folder if called with no arguments. :calls: `GET /api/v1/courses/:course_id/folders/by_path/*full_path \ <https://canvas.instructure.com/doc/api/files.html#method.folders.resolve_path>`_ :param full_path: Full path to resolve, relative to course root. :type full_path: string :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.folder.Folder` """ if full_path: return PaginatedList( Folder, self._requester, "GET", "courses/{0}/folders/by_path/{1}".format(self.id, full_path), _kwargs=combine_kwargs(**kwargs), ) else: return PaginatedList( Folder, self._requester, "GET", "courses/{0}/folders/by_path".format(self.id), _kwargs=combine_kwargs(**kwargs), ) def set_quiz_extensions(self, quiz_extensions, **kwargs): """ Set extensions for student all quiz submissions in a course. :calls: `POST /api/v1/courses/:course_id/quizzes/:quiz_id/extensions <https://canvas.instructure.com/doc/api/quiz_extensions.html#method.quizzes/quiz_extensions.create>`_ :param quiz_extensions: List of dictionaries representing extensions. :type quiz_extensions: list :rtype: list of :class:`canvasapi.quiz.QuizExtension` Example Usage: >>> course.set_quiz_extensions([ ... { ... 'user_id': 1, ... 'extra_time': 60, ... 'extra_attempts': 1 ... }, ... { ... 'user_id': 2, ... 'extra_attempts': 3 ... }, ... { ... 'user_id': 3, ... 'extra_time': 20 ... } ... ]) """ if not isinstance(quiz_extensions, list) or not quiz_extensions: raise ValueError("Param `quiz_extensions` must be a non-empty list.") if any(not isinstance(extension, dict) for extension in quiz_extensions): raise ValueError("Param `quiz_extensions` must only contain dictionaries") if any("user_id" not in extension for extension in quiz_extensions): raise RequiredFieldMissing( "Dictionaries in `quiz_extensions` must contain key `user_id`" ) kwargs["quiz_extensions"] = quiz_extensions response = self._requester.request( "POST", "courses/{}/quiz_extensions".format(self.id), _kwargs=combine_kwargs(**kwargs), ) extension_list = response.json()["quiz_extensions"] return [ QuizExtension(self._requester, extension) for extension in extension_list ] def set_usage_rights(self, **kwargs): """ Changes the usage rights for specified files that are under the current course scope :calls: `PUT /api/v1/courses/:course_id/usage_rights \ <https://canvas.instructure.com/doc/api/files.html#method.usage_rights.set_usage_rights>`_ :rtype: :class:`canvasapi.usage_rights.UsageRights` """ response = self._requester.request( "PUT", "courses/{}/usage_rights".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return UsageRights(self._requester, response.json()) def show_front_page(self, **kwargs): """ Retrieve the content of the front page. :calls: `GET /api/v1/courses/:course_id/front_page \ <https://canvas.instructure.com/doc/api/pages.html#method.wiki_pages_api.show_front_page>`_ :rtype: :class:`canvasapi.course.Course` """ response = self._requester.request( "GET", "courses/{}/front_page".format(self.id), _kwargs=combine_kwargs(**kwargs), ) page_json = response.json() page_json.update({"course_id": self.id}) return Page(self._requester, page_json) def submissions_bulk_update(self, **kwargs): """ Update the grading and comments on multiple student's assignment submissions in an asynchronous job. :calls: `POST /api/v1/courses/:course_id/submissions/update_grades \ <https://canvas.instructure.com/doc/api/submissions.html#method.submissions_api.bulk_update>`_ :rtype: :class:`canvasapi.progress.Progress` """ response = self._requester.request( "POST", "courses/{}/submissions/update_grades".format(self.id), _kwargs=combine_kwargs(**kwargs), ) return Progress(self._requester, response.json()) def update(self, **kwargs): """ Update this course. :calls: `PUT /api/v1/courses/:id \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.update>`_ :returns: `True` if the course was updated, `False` otherwise. :rtype: `bool` """ response = self._requester.request( "PUT", "courses/{}".format(self.id), _kwargs=combine_kwargs(**kwargs) ) if response.json().get("name"): super(Course, self).set_attributes(response.json()) return response.json().get("name") def update_assignment_overrides(self, assignment_overrides, **kwargs): """ Update a list of specified overrides for each assignment. Note: All current overridden values must be supplied if they are to be retained. :calls: `PUT /api/v1/courses/:course_id/assignments/overrides \ <https://canvas.instructure.com/doc/api/assignments.html#method.assignment_overrides.batch_update>`_ :param assignment_overrides: Attributes for the updated assignment overrides. :type assignment_overrides: list :rtype: :class:`canvasapi.paginated_list.PaginatedList` of :class:`canvasapi.assignment.AssignmentOverride` """ from canvasapi.assignment import AssignmentOverride kwargs["assignment_overrides"] = assignment_overrides return PaginatedList( AssignmentOverride, self._requester, "PUT", "courses/{}/assignments/overrides".format(self.id), {"course_id": self.id}, _kwargs=combine_kwargs(**kwargs), ) def update_settings(self, **kwargs): """ Update a course's settings. :calls: `PUT /api/v1/courses/:course_id/settings \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.update_settings>`_ :rtype: dict """ response = self._requester.request( "PUT", "courses/{}/settings".format(self.id), **kwargs ) return response.json() def upload(self, file: FileOrPathLike, **kwargs): """ Upload a file to this course. :calls: `POST /api/v1/courses/:course_id/files \ <https://canvas.instructure.com/doc/api/courses.html#method.courses.create_file>`_ :param file: The file or path of the file to upload. :type file: file or str :returns: True if the file uploaded successfully, False otherwise, \ and the JSON response from the API. :rtype: tuple """ return Uploader( self._requester, "courses/{}/files".format(self.id), file, **kwargs ).start() class CourseNickname(CanvasObject): def __str__(self): return "{} ({})".format(self.nickname, self.course_id) def remove(self, **kwargs): """ Remove the nickname for the given course. Subsequent course API calls will return the actual name for the course. :calls: `DELETE /api/v1/users/self/course_nicknames/:course_id \ <https://canvas.instructure.com/doc/api/users.html#method.course_nicknames.delete>`_ :rtype: :class:`canvasapi.course.CourseNickname` """ response = self._requester.request( "DELETE", "users/self/course_nicknames/{}".format(self.course_id), _kwargs=combine_kwargs(**kwargs), ) return CourseNickname(self._requester, response.json()) class LatePolicy(CanvasObject): def __str__(self): return "Late Policy {}".format(self.id)
mit
1,574,006,498,719,888,100
34.499625
132
0.601858
false
4.06032
false
false
false
remeeting/mrp-score
match_ref_hyp.py
1
7218
#!/usr/bin/env python ###################################################################### # # File: match_ref_hyp.py # Author: Adam Janin # Feb 23, 2017 # # Given a reference file and a hypothesis file, figure out which parts # of the hypotheses match which part of the reference. Output the # hypotheses file as stm, where the non-text lines are taken # from the matching lines of the reference (except where there's # no matching references, where the non-text will be "unmatched"). # # Currently, only stm for reference and ctm for hypthosis files are # supported. The algorithm currently mimics sclite; the midpoint of a # ctm word is used to determine its time, and it is assigned to the # first stm segment it's within. # # This code can be used either as a stand-alone program, or it can # be imported. See function match_ref_hyp() # # Licensed under the Apache License, Version 2.0 (the "License"); you # may not use this file except in compliance with the License. The # License may be found in the file LICENSE at the top level of the # repository / directory tree. # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. See the License for the specific language governing # permissions and limitations under the License. from __future__ import print_function from six.moves import input import six import argparse from bisect import bisect from collections import defaultdict import logging from operator import methodcaller import sys import utils VERSION = 0.1 class Global: '''Stores globals. There should be no instances of Global.''' # Command line arguments args = None # Two times that are less than this are considered identical. epstime = 0.005 # end class Global def main(argv): parse_arguments(argv[1:]) setup_logging() instm = utils.read_stm(Global.args.ref) # List of STM_Line inctm = utils.read_ctm(Global.args.inhyp) # List of CTM_Line outhyp = match_ref_hyp(instm, inctm) # List of STM_Line for line in outhyp: Global.args.outhyp.write(str(line)) Global.args.outhyp.write('\n') # end main() def match_ref_hyp(instm, inctm): """Given a list of references of type STM_Line and a list of hypotheses of type CTM_Line, return a list of type STM_Line where each word in the input ctm is matched (in time) with a corresponding entry in the input stm file. If a word doesn't match any line in the input stm, it is given a new entry with speaker "unmatched" (since the input ctm doesn't specify speaker).""" outhyps = [] # Go through the ctms, storing each file/channel in a separately # sorted list. # Map of e.g. 'sw_4390 A' -> list of CTM_Line sorted by midtime sorted_ctms = defaultdict(list) # Map of e.g. 'sw_4390 A' -> list of booleans, False if the ctm hasn't been assigned # to an stm line, True otherwise. assigned = {} # The midtimes of sorted_ctms in the same order. sorted_ctm_midtimes = {} # Store the ctms by file/channel for ctm in inctm: sorted_ctms['%s %s'%(ctm.file, ctm.channel)].append(ctm) # Sort them by midtime. for key in sorted_ctms: ctmlist = sorted_ctms[key] ctmlist.sort(key=methodcaller('midtime')) sorted_ctm_midtimes[key] = [x.midtime() for x in ctmlist] assigned[key] = [False]*len(ctmlist) for ref in instm: outwords = [] filechannel = '%s %s'%(ref.file, ref.channel) if filechannel not in sorted_ctm_midtimes: continue cur_ctm_midtimes = sorted_ctm_midtimes[filechannel] cur_ctms = sorted_ctms[filechannel] for ii in range(bisect(cur_ctm_midtimes, ref.starttime), len(cur_ctm_midtimes)): hyptime = cur_ctm_midtimes[ii] hyp = cur_ctms[ii] if hyptime < ref.starttime or hyptime > ref.endtime: break else: if assigned[filechannel][ii]: # If it's only barely assigned to this segment, don't report the error. if abs(hyptime - ref.starttime) > Global.epstime and abs(hyptime - ref.endtime) > Global.epstime: logging.warning("Warning: Found hypothesis that matches more than one stm line. This indicates that the stm contains overlapping segments. The ctm word has been assigned to the first segment. The ctm entry was:\n%s\nThe second stm entry was:\n%s\n"%(hyp, ref)) else: assigned[filechannel][ii] = True outwords.append(hyp.word) # Make a copy of the corresponding ref line, and replace the words. outhyp = utils.STM_Line(str(ref)) outhyp.words = ' '.join(outwords) outhyps.append(outhyp) # Now find any ctms that were not assigned to an stm. outwords = [] for filechannel in sorted_ctms: for ii in range(len(sorted_ctms[filechannel])): if not assigned[filechannel][ii]: hyp = sorted_ctms[filechannel][ii] outhyp = utils.STM_Line() outhyp.file = hyp.file outhyp.channel = hyp.channel outhyp.speaker = 'unassigned' outhyp.starttime = hyp.starttime outhyp.endtime = hyp.starttime + hyp.duration outhyp.label = '<>' outhyp.words = hyp.word outhyps.append(outhyp) return outhyps # end match_ref_hyp() def parse_arguments(strs): parser = argparse.ArgumentParser(description='Given input references in stm format and input hypothesis in ctm format, generate a hypothesis file in stm format. Each hypothisis line is generated by picking any words from the same file/channel whose midpoint intersects the corresponding reference line. Any hypothesis words that do not match a reference line are added at the end of the hypothesis file. Version %s.'%(VERSION)) parser.add_argument('-loglevel', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'], default='WARNING', help='Logging level (default %(default)s)') parser.add_argument('-version', '--version', action='version', version=str(VERSION)) parser.add_argument('-ref', help='Input reference file in stm format', type=argparse.FileType('r'), required=True) parser.add_argument('-inhyp', help='Input hypothesis file in ctm format', type=argparse.FileType('r'), required=True) parser.add_argument('-outhyp', help='Output hypothesis file in stm format', type=argparse.FileType('w'), required=True) Global.args = parser.parse_args(strs) # end parse_arguments() def setup_logging(): numeric_level = getattr(logging, Global.args.loglevel, None) if not isinstance(numeric_level, int): raise ValueError('Invalid log level: %s' % Global.args.loglevel) logging.basicConfig(level=numeric_level, format="%(module)s:%(levelname)s: %(message)s") # end setup_logging() if __name__ == "__main__": main(sys.argv)
apache-2.0
9,198,778,341,884,706,000
39.550562
431
0.655583
false
3.792958
false
false
false
molgor/cyberklt
cyberklt/drivers/models.py
1
1851
# -*- coding: utf-8 -*- import numpy as np from django.db import models import drivers.MPL3115A2 as mpl class Temperature(models.Model): temp = models.FloatField(default=-999.9) #Celcius altitude = models.FloatField(default=np.nan) #meters timestamp = models.DateTimeField(auto_now=True) instrument = models.CharField(max_length=100,default="MPL3115A2") @classmethod def getTemp(cls): """ Supose to measure temperature from the sensor """ data = mpl.readData() temp = cls(temp=data['Temperature']) return temp @classmethod def create(cls): """ Supose to measure temperature from the sensor """ data = mpl.readData() temp = cls(temp=data['Temperature'],altitude=data['Altitude']) temp.save() return temp def __str__(self): time_ = self.timestamp.strftime(" |%H:%M %d-%M-%Y|") cad = "<Temperature reading: %s , %s>"%(self.temp,time_) return cad class Pressure(models.Model): pressure = models.FloatField(default=-999.9) #KPa timestamp = models.DateTimeField(auto_now=True) instrument = models.CharField(max_length=100,default="MPL3115A2") @classmethod def getPressure(cls): """ Supose to measure temperature from the sensor """ data = mpl.readData(mode=2) temp = cls(temp=data['Pressure']) return temp @classmethod def create(cls): """ Supose to measure temperature from the sensor """ data = mpl.readData(mode=2) temp = cls(pressure=data['Pressure']) temp.save() return temp def __str__(self): time_ = self.timestamp.strftime(" |%H:%M %d-%M-%Y|") cad = "<Pressure reading: %s , %s>"%(self.pressure,time_) return cad
gpl-3.0
6,452,715,802,677,283,000
27.476923
70
0.594273
false
3.808642
false
false
false
nicolashainaux/mathmaker
toolbox/build_db.py
1
89443
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # Mathmaker creates automatically maths exercises sheets # with their answers # Copyright 2006-2017 Nicolas Hainaux <nh.techn@gmail.com> # This file is part of Mathmaker. # Mathmaker is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 3 of the License, or # any later version. # Mathmaker is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # You should have received a copy of the GNU General Public License # along with Mathmaker; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ This script adds new entries to the database. It actually erases the database and builds it entirely. It will add all entries: - from files mini_pb_addi_direct.yaml, mini_pb_divi_direct.yaml, mini_pb_subtr_direct.yaml and mini_pb_multi_direct.yaml from data/wordings/, - from all w3l.po files from locale/*/LC_MESSAGES/ - from all w4l.po files from locale/*/LC_MESSAGES/ - from all w5l.po files from locale/*/LC_MESSAGES/ - from all *_names.po files from locale/*/LC_MESSAGES/ - all single ints from 2 to SINGLEINTS_MAX - all single decimal numbers with one digit from 0.0 to 100.0 - all integers pairs from 2 to INTPAIRS_MAX - all integers triples from 2 to INTPAIRS_MAX - a list of "clever" couples of (integer, decimal) (for multiplications) - a list of angles' ranges (around 0, 90, 180, 270) - the list of variants identification numbers (from 0 to 23 and 100 to 155, so far) for order_of_operations questions - all unit conversions, sorted in categories and levels, - decimals from 0.001 to 9.999 - digits positions: one table for thousands to thousandths, another for tenths to thousandths. - simple fractions: 1/2 to 1/10, 2/3 to 2/10 etc. until 9/10 - dvipsnames_selection for LaTeX package 'xcolor' - polygons shapes """ import os import sys import json import sqlite3 from math import gcd from decimal import Decimal from mathmakerlib.calculus import Number from mathmaker import settings from mathmaker.lib.tools import po_file_get_list_of, check_unique_letters_words from mathmaker.lib.tools.frameworks import get_attributes from mathmaker.lib.tools.distcode import distcode from mathmaker.lib.tools.database import parse_sql_creation_query from mathmaker.lib.constants.numeration import DIGITSPLACES from mathmaker.lib.constants.numeration import DIGITSPLACES_DECIMAL INTPAIRS_MAX = 1000 INTTRIPLES_MAX = 200 INTQUADRUPLES_MAX = 50 INTQUINTUPLES_MAX = 36 INTSEXTUPLES_MAX = 25 SINGLEINTS_MAX = 1000 NNPAIRS_MAX = 100 NNTRIPLES_MAX = 10 NNQUADRUPLES_MAX = 10 NNQUINTUPLES_MAX = 10 NNSEXTUPLES_MAX = 10 def _suits_for_deci1(i, j): return not(i % 10 == 0 and j % 10 == 0) def _suits_for_deci2(i, j): return not(i % 10 == 0 or j % 10 == 0) def __main__(): settings.init() WORDINGS_DIR = settings.datadir + "wordings/" WORDINGS_FILES = [WORDINGS_DIR + n + ".yaml" for n in ["mini_pb_addi_direct", "mini_pb_divi_direct", "mini_pb_subtr_direct", "mini_pb_multi_direct"]] # Existent db is deleted. A brand new empty db is created. if os.path.isfile(settings.path.db_dist): sys.stderr.write('Remove previous database...\n') os.remove(settings.path.db_dist) if os.path.isfile(settings.path.shapes_db_dist): sys.stderr.write('Remove previous shapes database...\n') os.remove(settings.path.shapes_db_dist) if os.path.isfile(settings.path.solids_db_dist): sys.stderr.write('Remove previous shapes database...\n') os.remove(settings.path.solids_db_dist) if os.path.isfile(settings.path.anglessets_db_dist): sys.stderr.write('Remove previous anglessets database...\n') os.remove(settings.path.anglessets_db_dist) if os.path.isfile(settings.path.natural_nb_tuples_db_dist): sys.stderr.write('Remove previous inttuples database...\n') os.remove(settings.path.natural_nb_tuples_db_dist) sys.stderr.write('Create new databases...\n') open(settings.path.db_dist, 'a').close() open(settings.path.shapes_db_dist, 'a').close() open(settings.path.solids_db_dist, 'a').close() open(settings.path.anglessets_db_dist, 'a').close() open(settings.path.natural_nb_tuples_db_dist, 'a').close() sys.stderr.write('Connect to databases...\n') db = sqlite3.connect(settings.path.db_dist) shapes_db = sqlite3.connect(settings.path.shapes_db_dist) solids_db = sqlite3.connect(settings.path.solids_db_dist) anglessets_db = sqlite3.connect(settings.path.anglessets_db_dist) natural_nb_tuples_db = sqlite3.connect( settings.path.natural_nb_tuples_db_dist) natural_nb_tuples_db_creation_queries = [] sys.stderr.write('Create tables...\n') # Creation of the tables db_creation_queries = ['''CREATE TABLE w{}l (id INTEGER PRIMARY KEY, language TEXT, word TEXT, drawDate INTEGER)'''.format(n) for n in settings.available_wNl] db_creation_queries += \ ['''CREATE TABLE angle_decorations (id INTEGER PRIMARY KEY, variety TEXT, hatchmark TEXT, drawDate INTEGER)''', '''CREATE TABLE names (id INTEGER PRIMARY KEY, language TEXT, gender TEXT, name TEXT, drawDate INTEGER)''', '''CREATE TABLE mini_pb_wordings (id INTEGER PRIMARY KEY, wording_context TEXT, wording TEXT, nb1_min INTEGER, nb1_max INTEGER, nb2_min INTEGER, nb2_max INTEGER, back_to_unit TEXT, q_id TEXT, drawDate INTEGER)''', '''CREATE TABLE single_ints (id INTEGER PRIMARY KEY, nb1 INTEGER, drawDate INTEGER)''', # DECIMAL(4, 1) stands for up to 4 integer digits, up to 1 fractional # digit but these values may have no effect (purpose is only # documentation) '''CREATE TABLE single_deci1 (id INTEGER PRIMARY KEY, nb1 DECIMAL(4, 1), drawDate INTEGER)''', '''CREATE TABLE angle_ranges (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, drawDate INTEGER)''', '''CREATE TABLE units_conversions (id INTEGER PRIMARY KEY, unit1 TEXT, unit2 TEXT, direction TEXT, category TEXT, level INTEGER, dimension INTEGER, drawDate INTEGER)''', '''CREATE TABLE int_pairs (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, lock_equal_products INTEGER, drawDate INTEGER, clever INTEGER, suits_for_deci1 INTEGER, suits_for_deci2 INTEGER)''', '''CREATE TABLE int_triples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, code TEXT, triangle INTEGER, isosceles INTEGER, equilateral INTEGER, pythagorean INTEGER, equal_sides INTEGER, drawDate INTEGER)''', '''CREATE TABLE simple_fractions (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, reducible INTEGER, drawDate INTEGER)''', # As int_deci_clever_pairs may be 'unioned' with int_pairs, its ids # will be determined starting from the max id of int_pairs, in order # to have unique ids over the two tables. '''CREATE TABLE int_deci_clever_pairs (id INTEGER, nb1 FLOAT, nb2 FLOAT, drawDate INTEGER, clever INTEGER)''', '''CREATE TABLE order_of_operations_variants (id INTEGER PRIMARY KEY, nb1 INTEGER, drawDate INTEGER)''', # DECIMAL(2, 3) stands for up to 2 integer digits, # up to 3 fractional digits # but these values may have no effect (purpose is only documentation) # nz stands for "Non Zero digits (number)" # iz stands for "Isolated Zeros (number)" # fd stands for "Fractional Digits (number)" '''CREATE TABLE decimals (id INTEGER PRIMARY KEY, nb1 DECIMAL(2, 3), nz INTEGER, iz INTEGER, fd INTEGER, overlap_level INTEGER, pure_half INTEGER, pure_quarter INTEGER, drawDate INTEGER)''', '''CREATE TABLE digits_places (id INTEGER PRIMARY KEY, place DECIMAL(4, 3), drawDate INTEGER)''', '''CREATE TABLE fracdigits_places (id INTEGER PRIMARY KEY, place DECIMAL(4, 3), drawDate INTEGER)''', '''CREATE TABLE dvipsnames_selection (id INTEGER PRIMARY KEY, color_name TEXT, drawDate INTEGER)''', ] for qr in db_creation_queries: db.execute(qr) sys.stderr.write('Insert data from locale/*/LC_MESSAGES/*.pot files...\n') # Extract data from po(t) files and insert them into the db for lang in next(os.walk(settings.localedir))[1]: settings.language = lang for n in settings.available_wNl: if os.path.isfile(settings.localedir + lang + "/LC_MESSAGES/w{}l.po".format(str(n))): words = po_file_get_list_of('words', lang, n) check_unique_letters_words(words, n) db_rows = list(zip([lang for _ in range(len(words))], words, [0 for _ in range(len(words))])) db.executemany( "INSERT INTO w{}l(language, word, drawDate) " "VALUES(?, ?, ?)".format(str(n)), db_rows) for gender in ["masculine", "feminine"]: if os.path.isfile(settings.localedir + lang + "/LC_MESSAGES/" + gender + "_names.po"): # __ names = po_file_get_list_of('names', lang, gender) db_rows = list(zip([lang for _ in range(len(names))], [gender for _ in range(len(names))], names, [0 for _ in range(len(names))])) db.executemany("INSERT " "INTO names(language, gender, name, drawDate) " "VALUES(?, ?, ?, ?)", db_rows) sys.stderr.write('Insert angles\'s decorations...\n') db_rows = [('single', 'singledash', 0), ('single', 'doubledash', 0), ('single', 'tripledash', 0), ('double', None, 0), ('double', 'singledash', 0), ('triple', None, 0), ('triple', 'singledash', 0), ] db.executemany("INSERT " "INTO angle_decorations" "(variety, hatchmark, drawDate) " "VALUES(?, ?, ?)", db_rows) sys.stderr.write( 'Insert data from data/frameworks/wordings/*.yaml files...\n') # Extract data from yaml files and insert them into the db for f in WORDINGS_FILES: wordings = get_attributes(f, "wording") db_rows = list(zip([w['wording_context'] for w in wordings], [w['wording'] for w in wordings], [w['nb1_min'] for w in wordings], [w['nb1_max'] for w in wordings], [w['nb2_min'] for w in wordings], [w['nb2_max'] for w in wordings], [w['back_to_unit'] for w in wordings], [w['q_id'] for w in wordings], [0 for _ in range(len(wordings))])) db.executemany("INSERT " "INTO mini_pb_wordings(wording_context, wording, " "nb1_min, nb1_max, nb2_min, nb2_max, back_to_unit, " "q_id, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows) creation_query = '''CREATE TABLE mini_pb_prop_wordings (id INTEGER PRIMARY KEY, wid INTEGER, wording_context TEXT, wording TEXT, coeff_min INTEGER, coeff_max INTEGER, nb1_min INTEGER, nb1_max INTEGER, nb2_min INTEGER, nb2_max INTEGER, nb3_min INTEGER, nb3_max INTEGER, solution_min INTEGER, solution_max INTEGER, nb1_xcoeff INTEGER, nb2_xcoeff INTEGER, nb3_xcoeff INTEGER, nb1_may_be_deci INTEGER, nb2_may_be_deci INTEGER, nb3_may_be_deci INTEGER, solution_may_be_deci INTEGER, locked INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) PROP_WORDINGS_FILE = WORDINGS_DIR + 'mini_pb_proportionality' + '.yaml' wordings = get_attributes(PROP_WORDINGS_FILE, "wording") db_rows = list(zip([i + 1 for i in range(len(wordings))], [w.get('wording_context') for w in wordings], [w.get('wording') for w in wordings], [w.get('coeff_min', 0) for w in wordings], [w.get('coeff_max', 10000) for w in wordings], [w.get('nb1_min', 0) for w in wordings], [w.get('nb1_max', 1000) for w in wordings], [w.get('nb2_min', 0) for w in wordings], [w.get('nb2_max', 1000) for w in wordings], [w.get('nb3_min', 0) for w in wordings], [w.get('nb3_max', 10000) for w in wordings], [w.get('solution_min', 0) for w in wordings], [w.get('solution_max', 10000) for w in wordings], [w.get('nb1_xcoeff', 1) for w in wordings], [w.get('nb2_xcoeff', 1) for w in wordings], [w.get('nb3_xcoeff', 1) for w in wordings], [w.get('nb1_may_be_deci', 0) for w in wordings], [w.get('nb2_may_be_deci', 0) for w in wordings], [w.get('nb3_may_be_deci', 0) for w in wordings], [w.get('solution_may_be_deci', 0) for w in wordings], [0 for _ in range(len(wordings))], [0 for _ in range(len(wordings))])) db.executemany("INSERT " "INTO mini_pb_prop_wordings(wid, wording_context, wording, " "coeff_min, coeff_max, nb1_min, nb1_max, nb2_min, nb2_max, " "nb3_min, nb3_max, solution_min, solution_max, " "nb1_xcoeff, nb2_xcoeff, nb3_xcoeff, " "nb1_may_be_deci, nb2_may_be_deci, " "nb3_may_be_deci, solution_may_be_deci, " "locked, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " "?, ?, ?, ?, ?)", db_rows) creation_query = '''CREATE TABLE mini_pb_time_wordings (id INTEGER PRIMARY KEY, wid INTEGER, wording_context TEXT, type TEXT, wording TEXT, mini_start_hour INTEGER, mini_start_minute INTEGER, maxi_start_hour INTEGER, maxi_start_minute INTEGER, mini_duration_hour INTEGER, mini_duration_minute INTEGER, maxi_duration_hour INTEGER, maxi_duration_minute INTEGER, mini_end_hour INTEGER, mini_end_minute INTEGER, maxi_end_hour INTEGER, maxi_end_minute INTEGER, locked, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) TIME_WORDINGS_FILE = WORDINGS_DIR + 'mini_pb_time' + '.yaml' wordings = get_attributes(TIME_WORDINGS_FILE, 'wording') db_rows = list(zip([i + 1 for i in range(len(wordings))], [w.get('wording_context') for w in wordings], [w.get('type') for w in wordings], [w.get('wording') for w in wordings], [w.get('mini_start_hour') for w in wordings], [w.get('mini_start_minute') for w in wordings], [w.get('maxi_start_hour') for w in wordings], [w.get('maxi_start_minute') for w in wordings], [w.get('mini_duration_hour') for w in wordings], [w.get('mini_duration_minute') for w in wordings], [w.get('maxi_duration_hour') for w in wordings], [w.get('maxi_duration_minute') for w in wordings], [w.get('mini_end_hour') for w in wordings], [w.get('mini_end_minute') for w in wordings], [w.get('maxi_end_hour') for w in wordings], [w.get('maxi_end_minute') for w in wordings], [0 for _ in range(len(wordings))], [0 for _ in range(len(wordings))])) db.executemany("INSERT " "INTO mini_pb_time_wordings(wid, wording_context, type, " "wording, " "mini_start_hour, mini_start_minute, maxi_start_hour, " "maxi_start_minute, mini_duration_hour," "mini_duration_minute, maxi_duration_hour, " "maxi_duration_minute, mini_end_hour, mini_end_minute, " "maxi_end_hour, maxi_end_minute, locked, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, " "?, ?)", db_rows) creation_query = '''CREATE TABLE divisibility_statements (id INTEGER PRIMARY KEY, wid INTEGER, wording TEXT, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) wordings = get_attributes(WORDINGS_DIR + 'divisibility_statements.yaml', 'wording') db_rows = list(zip([i + 1 for i in range(len(wordings))], [w.get('wording') for w in wordings], [0 for _ in range(len(wordings))])) db.executemany("INSERT " "INTO divisibility_statements" "(wid, wording, drawDate) " "VALUES(?, ?, ?)", db_rows) creation_query = '''CREATE TABLE distcodes (id INTEGER PRIMARY KEY, nbof_nb INTEGER, distcode TEXT, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) db_rows = [(2, '2', 1, 1, 0), (2, '1_1', 0, 0, 0), (3, '3', 1, 1, 0), (3, '2_1', 0, 1, 0), (3, '1_1_1', 0, 0, 0), (4, '4', 1, 1, 0), (4, '3_1', 0, 1, 0), (4, '2_2', 0, 1, 0), (4, '2_1_1', 0, 1, 0), (4, '1_1_1_1', 0, 0, 0), (5, '5', 1, 1, 0), (5, '4_1', 0, 1, 0), (5, '3_2', 0, 1, 0), (5, '3_1_1', 0, 1, 0), (5, '2_2_1', 0, 1, 0), (5, '2_1_1_1', 0, 1, 0), (5, '1_1_1_1_1', 0, 0, 0), (6, '6', 1, 1, 0), (6, '5_1', 0, 1, 0), (6, '4_2', 0, 1, 0), (6, '4_1_1', 0, 1, 0), (6, '3_3', 0, 1, 0), (6, '3_2_1', 0, 1, 0), (6, '3_1_1_1', 0, 1, 0), (6, '2_2_2', 0, 1, 0), (6, '2_2_1_1', 0, 1, 0), (6, '2_1_1_1_1_1', 0, 1, 0), (6, '1_1_1_1_1_1', 0, 0, 0)] db.executemany("INSERT " "INTO distcodes" "(nbof_nb, distcode, equilateral, equal_sides, drawDate) " "VALUES(?, ?, ?, ? , ?)", db_rows) creation_query = '''CREATE TABLE directions (id INTEGER PRIMARY KEY, direction TEXT, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) db_rows = [('top-right', 0), ('top-left', 0), ('bottom-left', 0), ('bottom-right', 0)] db.executemany("INSERT " "INTO directions" "(direction, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE times (id INTEGER PRIMARY KEY, hour INTEGER, minute INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) db_rows = [(hour, minute, 0) for hour in range(24) for minute in range(60)] db.executemany("INSERT " "INTO times" "(hour, minute, drawDate) " "VALUES(?, ?, ?)", db_rows) sys.stderr.write('Insert mixed decimals and ints triples for ' 'proportionality...\n') integers = [_ for _ in range(2, 32)] integers.append(50) integers.append(60) integers.append(80) integers.append(100) db_rows = [(0.666667, n1, n2, float((Number('0.666667') * n1).rounded(Number('0.01'))), float((Number('0.666667') * n2).rounded(Number('0.01'))), 0, 0) for n1 in integers if n1 % 3 == 0 for n2 in integers if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 3 == 0] db_rows += [(0.75, n1, n2, float(Number('0.75') * n1), float(Number('0.75') * n2), 0, 0) for n1 in integers if n1 % 4 == 0 for n2 in integers if n2 != n1 and n2 % n1 and n2 > n1 / 2 and n2 % 4 == 0] db_rows += [(1.125, n1, n2, float(Number('1.125') * n1), float(Number('1.125') * n2), 0, 0) for n1 in integers if n1 % 8 == 0 and n1 > 8 for n2 in integers if n2 != n1 and n2 % n1 and n2 > n1 / 2 and n2 % 8 != 0 and n2 % 4 == 0] db_rows += [(1.2, n1, n2, float(Number('1.2') * n1), float(Number('1.2') * n2), 0, 0) for n1 in integers if n1 % 5 == 0 for n2 in integers if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 5 == 0] db_rows += [(1.25, n1, n2, float(Number('1.25') * n1), float(Number('1.25') * n2), 0, 0) for n1 in integers if n1 % 4 == 0 for n2 in integers if n2 != n1 and n2 > n1 / 2 and n2 % 4 != 0 and n2 % 2 == 0 and n2 % n1] db_rows += [(1.25, n1, n2, float(Number('1.25') * n1), float(Number('1.25') * n2), 0, 0) for n1 in integers if n1 % 4 == 0 for n2 in integers if n2 != n1 and n2 > n1 / 2 and n2 % 4 == 0 and n2 >= 41 and n2 % n1] db_rows += [(1.333333, n1, n2, float((Number('1.333333') * n1).rounded(Number('0.01'))), float((Number('1.333333') * n2).rounded(Number('0.01'))), 0, 0) for n1 in integers if n1 % 3 == 0 for n2 in integers if n2 != n1 and n2 % n1 != 0 and n2 > n1 / 2 and n2 % 3 == 0] db_rows += [(1.5, n1, n2, float(Number('1.5') * n1), float(Number('1.5') * n2), 0, 0) for n1 in integers if n1 < 7 or (8 <= n1 <= 24 and n1 % 2 == 0) or (n1 >= 30 and n1 % 10 == 0) for n2 in integers if n2 != n1 and n2 % n1 and n2 > n1 / 2] db_rows += [(c, 1.5, n2, float(c * Number('1.5')), float(c * n2), 0, 0) for c in [2, 3, 4, 5, 6, 8, 10, 12, 14, 16, 18, 20, 30, 40, 50, 60, 80, 100] for n2 in integers if n2 != c and n2 > c / 2] db_rows += [(2.5, n1, n2, float(Number('2.5') * n1), float(Number('2.5') * n2), 0, 0) for n1 in integers if n1 <= 10 or (n1 > 10 and n1 % 10 == 0) if not ((n1 >= 12 and n1 % 10 != 0) or n1 in [7, 9]) for n2 in integers if n2 != n1 and n2 % n1 and n2 > n1 / 2 and n2 % 2 != 0] db_rows += [(c, 2.5, n2, float(c * Number('2.5')), float(c * n2), 0, 0) for c in [2, 3, 4, 5, 6, 8, 10, 20, 30, 40, 50, 60, 80, 100] for n2 in integers if n2 != c and n2 > c / 2] creation_query = '''CREATE TABLE deci_int_triples_for_prop (id INTEGER PRIMARY KEY, coeff DECIMAL(1, 6), nb1 DECIMAL(1, 6), nb2 DECIMAL(1, 6), nb3 DECIMAL(1, 6), solution DECIMAL(1, 6), locked INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) db.executemany("INSERT " "INTO deci_int_triples_for_prop(coeff, nb1, nb2, " "nb3, solution, locked, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Insert integers pairs...') # Tables of 1, 2, 3... INTPAIRS_MAX db_rows = [(i + 1, j + 1, 0, 0, 0, _suits_for_deci1(i + 1, j + 1), _suits_for_deci2(i + 1, j + 1)) for i in range(INTPAIRS_MAX) for j in range(INTPAIRS_MAX) if j >= i] for i in range(100): sys.stderr.write('\rInsert integers pairs... {} %'.format(i)) db.executemany("INSERT " "INTO int_pairs(nb1, nb2, lock_equal_products, " "drawDate, clever, suits_for_deci1, suits_for_deci2) " "VALUES(?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert integers pairs... 100 %\n') sys.stderr.write('Create integers triples...\n') # Tables of 1, 2, 3... INTTRIPLES_MAX db_rows = [(15, 2, 3, 'none', 0, 0, 0, 0, 0, 0), (15, 2, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 2, 6, 'none', 0, 0, 0, 0, 0, 0), (15, 3, 4, 'none', 0, 0, 0, 0, 0, 0), (15, 3, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 4, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 4, 6, 'none', 0, 0, 0, 0, 0, 0), (15, 5, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 3, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 3, 4, 'none', 0, 0, 0, 0, 0, 0), (25, 3, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 4, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 4, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 5, 6, 'none', 0, 0, 0, 0, 0, 0)] db_rows += [(i + 1, j + 1, k + 1, # nb1, nb2, nb3 distcode(i + 1, j + 1, k + 1), # code k + 1 < i + j + 2, # triangle? (i == j and j != k) or (i == k and i != j) or (j == k and i != j), # isosceles? (but not equilateral) i == j == k, # equilateral? (k + 1) ** 2 == (i + 1) ** 2 + (j + 1) ** 2, # pythagorean? (i == j or j == k or k == i), # at least 2 equal sides? 0 # drawDate ) for i in range(INTTRIPLES_MAX) for j in range(INTTRIPLES_MAX) for k in range(INTTRIPLES_MAX) if k >= j >= i and k - i <= 30] sys.stderr.write('Insert integers triples...') for i in range(100): sys.stderr.write('\rInsert integers triples... {} %'.format(i)) db.executemany("INSERT " "INTO int_triples(nb1, nb2, nb3, code, triangle, " "isosceles, equilateral, pythagorean, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert integers triples... 100 %\n') creation_query = '''CREATE TABLE int_quadruples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, code TEXT, quadrilateral INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) sys.stderr.write('Create integers quadruples...\n') # Tables of 1, 2, 3... INTQUADRUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, # nb1, nb2, nb3, nb4 distcode(i + 1, j + 1, k + 1, n + 1), # code n + 1 < i + j + k + 3, # quadrilateral? i == j == k == n, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n), # at least 2 equal sides? 0 # drawDate ) for i in range(INTQUADRUPLES_MAX) for j in range(INTQUADRUPLES_MAX) for k in range(INTQUADRUPLES_MAX) for n in range(INTQUADRUPLES_MAX) if n >= k >= j >= i and n - i <= 18] sys.stderr.write('Insert integers quadruples...') for i in range(100): sys.stderr.write('\rInsert integers quadruples... {} %'.format(i)) db.executemany("INSERT " "INTO int_quadruples(nb1, nb2, nb3, nb4, code, " "quadrilateral, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert integers quadruples... 100 %\n') creation_query = '''CREATE TABLE int_quintuples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, nb5 INTEGER, code TEXT, pentagon INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) sys.stderr.write('Create integers quintuples...\n') # Tables of 1, 2, 3... INTQUINTUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, # nb1, nb2, nb3, nb4, nb5 distcode(i + 1, j + 1, k + 1, n + 1, p + 1), # code p + 1 < i + j + k + n + 4, # pentagon? i == j == k == n == p, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n or i == p or j == p or k == p or n == p), # at least 2 equal sides? 0 # drawDate ) for i in range(INTQUINTUPLES_MAX) for j in range(INTQUINTUPLES_MAX) for k in range(INTQUINTUPLES_MAX) for n in range(INTQUINTUPLES_MAX) for p in range(INTQUINTUPLES_MAX) if p >= n >= k >= j >= i and p - i <= 16] sys.stderr.write('Insert integers quintuples...') for i in range(100): sys.stderr.write('\rInsert integers quintuples... {} %'.format(i)) db.executemany("INSERT " "INTO int_quintuples(nb1, nb2, nb3, nb4, nb5, code, " "pentagon, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert integers quintuples... 100 %\n') creation_query = '''CREATE TABLE int_sextuples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, nb5 INTEGER, nb6 INTEGER, code TEXT, hexagon INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) sys.stderr.write('Create integers sextuples...\n') # Tables of 1, 2, 3... INTSEXTUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1, # nb1, nb2, nb3, nb4, nb5, nb6 distcode(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1), # code q + 1 < i + j + k + n + p + 5, # hexagon? i == j == k == n == p == q, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n or i == p or j == p or k == p or n == p or i == q or j == q or k == q or n == q or p == q), # at least 2 equal sides? 0 # drawDate ) for i in range(INTSEXTUPLES_MAX) for j in range(INTSEXTUPLES_MAX) for k in range(INTSEXTUPLES_MAX) for n in range(INTSEXTUPLES_MAX) for p in range(INTSEXTUPLES_MAX) for q in range(INTSEXTUPLES_MAX) if q >= p >= n >= k >= j >= i and q - i <= 16] sys.stderr.write('Insert integers sextuples...') for i in range(100): sys.stderr.write('\rInsert integers sextuples... {} %'.format(i)) db.executemany("INSERT " "INTO int_sextuples(nb1, nb2, nb3, nb4, nb5, nb6, " "code, hexagon, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert integers sextuples... 100 %\n') # sys.stderr.flush() sys.stderr.write('Create natural numbers pairs...\n') creation_query = '''CREATE TABLE pairs (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, code TEXT, lock_equal_products INTEGER, drawDate INTEGER, clever INTEGER, suits_for_deci1 INTEGER, suits_for_deci2 INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) # Tables of 1, 2, 3... NNPAIRS_MAX db_rows = [(i + 1, j + 1, distcode(i + 1, j + 1), 0, 0, 0, _suits_for_deci1(i + 1, j + 1), _suits_for_deci2(i + 1, j + 1)) for i in range(NNPAIRS_MAX) for j in range(NNPAIRS_MAX) if j >= i] for i in range(100): sys.stderr.write('\rInsert natural numbers pairs... {} %'.format(i)) natural_nb_tuples_db.executemany( "INSERT " "INTO pairs(nb1, nb2, code, lock_equal_products, " "drawDate, clever, suits_for_deci1, suits_for_deci2) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert natural numbers pairs... 100 %\n') sys.stderr.write('Setup natural numbers pairs: clever (5)...\n') for couple in [(2, 5), (2, 50), (2, 500), (5, 20), (5, 200)]: natural_nb_tuples_db.execute( "UPDATE pairs SET clever = 5 WHERE nb1 = '" + str(couple[0]) + "' and nb2 = '" + str(couple[1]) + "';") sys.stderr.write('Setup natural numbers pairs: clever (4)...\n') for couple in [(4, 25), (4, 250)]: natural_nb_tuples_db.execute( "UPDATE pairs SET clever = 4 WHERE nb1 = '" + str(couple[0]) + "' and nb2 = '" + str(couple[1]) + "';") sys.stderr.write('Create natural number×decimal "clever" pairs...\n') creation_query = '''CREATE TABLE nn_deci_clever_pairs (id INTEGER, nb1 FLOAT, nb2 FLOAT, drawDate INTEGER, clever INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) sys.stderr.write('Insert natural number×decimal "clever" pairs...\n') # Insert natural number/decimal "clever" pairs into the db # The tenths series (only one yet) is identified by a 10 # the quarters series by a 4 # the halfs/fifths series by a 5 start_id = tuple(natural_nb_tuples_db.execute( "SELECT MAX(id) FROM pairs "))[0][0] + 1 db_rows = list(zip([i + start_id for i in range(5)], [0.2, 2, 4, 4, 0.1], [5, 0.5, 0.25, 2.5, 10], [0, 0, 0, 0, 0], [5, 5, 4, 4, 10])) natural_nb_tuples_db.executemany( "INSERT INTO nn_deci_clever_pairs(id, nb1, nb2, drawDate, clever) " "VALUES(?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Create natural numbers triples...\n') creation_query = '''CREATE TABLE triples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, code TEXT, constructible INTEGER, isosceles INTEGER, equilateral INTEGER, pythagorean INTEGER, equal_sides INTEGER, drawDate INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) # Tables of 1, 2, 3... NNTRIPLES_MAX db_rows = [(15, 2, 3, 'none', 0, 0, 0, 0, 0, 0), (15, 2, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 2, 6, 'none', 0, 0, 0, 0, 0, 0), (15, 3, 4, 'none', 0, 0, 0, 0, 0, 0), (15, 3, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 4, 5, 'none', 0, 0, 0, 0, 0, 0), (15, 4, 6, 'none', 0, 0, 0, 0, 0, 0), (15, 5, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 3, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 2, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 3, 4, 'none', 0, 0, 0, 0, 0, 0), (25, 3, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 4, 5, 'none', 0, 0, 0, 0, 0, 0), (25, 4, 6, 'none', 0, 0, 0, 0, 0, 0), (25, 5, 6, 'none', 0, 0, 0, 0, 0, 0)] db_rows += [(i + 1, j + 1, k + 1, # nb1, nb2, nb3 distcode(i + 1, j + 1, k + 1), # code k + 1 < i + j + 2, # constructible triangle? (i == j and j != k) or (i == k and i != j) or (j == k and i != j), # isosceles? (but not equilateral) i == j == k, # equilateral? (k + 1) ** 2 == (i + 1) ** 2 + (j + 1) ** 2, # pythagorean? (i == j or j == k or k == i), # at least 2 equal sides? 0 # drawDate ) for i in range(NNTRIPLES_MAX) for j in range(NNTRIPLES_MAX) for k in range(NNTRIPLES_MAX) if k >= j >= i] sys.stderr.write('Insert natural numbers triples...') for i in range(100): sys.stderr.write('\rInsert natural numbers triples... {} %' .format(i)) natural_nb_tuples_db\ .executemany("INSERT " "INTO triples(nb1, nb2, nb3, code, " "constructible, isosceles, equilateral, pythagorean," "equal_sides, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert natural numbers triples... 100 %\n') # sys.stderr.flush() sys.stderr.write('Create natural numbers quadruples...\n') creation_query = '''CREATE TABLE quadruples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, code TEXT, constructible INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) # Tables of 1, 2, 3... NNQUADRUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, # nb1, nb2, nb3, nb4 distcode(i + 1, j + 1, k + 1, n + 1), # code n + 1 < i + j + k + 3, # constructible quadrilateral? i == j == k == n, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n), # at least 2 equal sides? 0 # drawDate ) for i in range(NNQUADRUPLES_MAX) for j in range(NNQUADRUPLES_MAX) for k in range(NNQUADRUPLES_MAX) for n in range(NNQUADRUPLES_MAX) if n >= k >= j >= i] sys.stderr.write('Insert natural numbers quadruples...') for i in range(100): sys.stderr.write('\rInsert natural numbers quadruples... {} %' .format(i)) natural_nb_tuples_db\ .executemany("INSERT " "INTO quadruples(nb1, nb2, nb3, nb4, code, " "constructible, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert natural numbers quadruples... 100 %\n') # sys.stderr.flush() sys.stderr.write('Create natural numbers quintuples...\n') creation_query = '''CREATE TABLE quintuples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, nb5 INTEGER, code TEXT, constructible INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) # Tables of 1, 2, 3... NNQUINTUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, # nb1, nb2, nb3, nb4, nb5 distcode(i + 1, j + 1, k + 1, n + 1, p + 1), # code p + 1 < i + j + k + n + 4, # constructible? i == j == k == n == p, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n or i == p or j == p or k == p or n == p), # at least 2 equal sides? 0 # drawDate ) for i in range(NNQUINTUPLES_MAX) for j in range(NNQUINTUPLES_MAX) for k in range(NNQUINTUPLES_MAX) for n in range(NNQUINTUPLES_MAX) for p in range(NNQUINTUPLES_MAX) if p >= n >= k >= j >= i] sys.stderr.write('Insert natural numbers quintuples...') for i in range(100): sys.stderr.write('\rInsert natural numbers quintuples... {} %' .format(i)) natural_nb_tuples_db\ .executemany("INSERT " "INTO quintuples(nb1, nb2, nb3, nb4, nb5, code, " "constructible, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert natural numbers quintuples... 100 %\n') # sys.stderr.flush() sys.stderr.write('Create natural numbers sextuples...\n') creation_query = '''CREATE TABLE sextuples (id INTEGER PRIMARY KEY, nb1 INTEGER, nb2 INTEGER, nb3 INTEGER, nb4 INTEGER, nb5 INTEGER, nb6 INTEGER, code TEXT, constructible INTEGER, equilateral INTEGER, equal_sides INTEGER, drawDate INTEGER)''' natural_nb_tuples_db_creation_queries.append(creation_query) natural_nb_tuples_db.execute(creation_query) # Tables of 1, 2, 3... NNSEXTUPLES_MAX db_rows = [(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1, # nb1, nb2, nb3, nb4, nb5, nb6 distcode(i + 1, j + 1, k + 1, n + 1, p + 1, q + 1), # code q + 1 < i + j + k + n + p + 5, # constructible hexagon? i == j == k == n == p == q, # equilateral? (i == j or j == k or k == i or i == n or j == n or k == n or i == p or j == p or k == p or n == p or i == q or j == q or k == q or n == q or p == q), # at least 2 equal sides? 0 # drawDate ) for i in range(NNSEXTUPLES_MAX) for j in range(NNSEXTUPLES_MAX) for k in range(NNSEXTUPLES_MAX) for n in range(NNSEXTUPLES_MAX) for p in range(NNSEXTUPLES_MAX) for q in range(NNSEXTUPLES_MAX) if q >= p >= n >= k >= j >= i] sys.stderr.write('Insert natural numbers sextuples...') for i in range(100): sys.stderr.write('\rInsert natural numbers sextuples... {} %' .format(i)) natural_nb_tuples_db\ .executemany("INSERT " "INTO sextuples(nb1, nb2, nb3, nb4, nb5, nb6, code, " "constructible, equilateral, equal_sides, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows[i * len(db_rows) // 100: (i + 1) * len(db_rows) // 100]) sys.stderr.write('\rInsert natural numbers sextuples... 100 %\n') # sys.stderr.flush() sys.stderr.write('Setup integers pairs: clever (5)...\n') for couple in [(2, 5), (2, 50), (2, 500), (5, 20), (5, 200)]: db.execute("UPDATE int_pairs SET clever = 5" + " WHERE nb1 = '" + str(couple[0]) + "' and nb2 = '" + str(couple[1]) + "';") sys.stderr.write('Setup integers pairs: clever (4)...\n') for couple in [(4, 25), (4, 250)]: db.execute("UPDATE int_pairs SET clever = 4" + " WHERE nb1 = '" + str(couple[0]) + "' and nb2 = '" + str(couple[1]) + "';") sys.stderr.write('Insert integer×decimal "clever" pairs...\n') # Insert integer/decimal "clever" pairs into the db # The tenths series (only one yet) is identified by a 10 # the quarters series by a 4 # the halfs/fifths series by a 5 start_id = tuple(db.execute("SELECT MAX(id) FROM int_pairs "))[0][0] + 1 db_rows = list(zip([i + start_id for i in range(5)], [0.2, 2, 4, 4, 0.1], [5, 0.5, 0.25, 2.5, 10], [0, 0, 0, 0, 0], [5, 5, 4, 4, 10])) db.executemany("INSERT " "INTO int_deci_clever_pairs(id, nb1, nb2, drawDate, " "clever) " "VALUES(?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Insert single integers...\n') # Single ints db_rows = [(i + 1, 0) for i in range(SINGLEINTS_MAX)] db.executemany("INSERT " "INTO single_ints(nb1, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Insert simple fractions...\n') db_rows = [(i + 1, j + 1, 0 if gcd(i + 1, j + 1) == 1 else 1, 0) for i in range(10) for j in range(10) if j > i] db.executemany("INSERT " "INTO simple_fractions(nb1, nb2, reducible, drawDate) " "VALUES(?, ?, ?, ?)", db_rows) sys.stderr.write('Insert single decimals from 0.0 to 100.0...\n') # Single decimal numbers db_rows = [(i / 10, 0) for i in range(1001)] db.executemany("INSERT " "INTO single_deci1(nb1, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Generate single decimals from 0.001 to 10.000...') # Single decimal numbers db_rows = [] for j in range(100): sys.stderr.write( '\rGenerate single decimals from 0.001 to 10.000... {} %' .format(j)) db_rows += [((100 * j + i + 1) / 1000, Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .nonzero_digits_nb(), Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .isolated_zeros(), Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .fracdigits_nb(), Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .overlap_level(), Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .is_pure_half(), Number((Decimal(100 * j + i + 1)) / Decimal(1000)) .is_pure_quarter(), 0) for i in range(100)] sys.stderr.write('\rGenerate single decimals from 0.001 to 10.000...' ' 100 %\n') sys.stderr.write('Insert single decimals from 0.001 to 10.000...\n') db.executemany("INSERT " "INTO decimals(nb1, nz, iz, fd, overlap_level, " "pure_half, pure_quarter, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Insert angle ranges...\n') # Angle ranges db_rows = [(i - 20, i + 20, 0) for i in [0, 90, 180, 270]] db.executemany("INSERT " "INTO angle_ranges(nb1, nb2, drawDate) " "VALUES(?, ?, ?)", db_rows) sys.stderr.write('Insert variants of order_of_operations...\n') # Variant numbers for order_of_operations questions. db_rows = [(i, 0) for i in range(24)] db.executemany("INSERT " "INTO order_of_operations_variants" "(nb1, drawDate) " "VALUES(?, ?)", db_rows) db_rows = [(i + 100, 0) for i in range(88)] db.executemany("INSERT " "INTO order_of_operations_variants" "(nb1, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Insert unit conversions...\n') db_rows = [('km', 'hm', 'right', 'length', 1, 1, 0), # simple units, ('hm', 'dam', 'right', 'length', 1, 1, 0), # one column or ('dam', 'm', 'right', 'length', 1, 1, 0), # "classic" ('m', 'dm', 'right', 'length', 1, 1, 0), # conversions ('dm', 'cm', 'right', 'length', 1, 1, 0), ('cm', 'mm', 'right', 'length', 1, 1, 0), ('km', 'm', 'right', 'length', 1, 1, 0), ('m', 'cm', 'right', 'length', 1, 1, 0), ('m', 'mm', 'right', 'length', 1, 1, 0), ('hL', 'daL', 'right', 'capacity', 1, 1, 0), ('daL', 'L', 'right', 'capacity', 1, 1, 0), ('L', 'dL', 'right', 'capacity', 1, 1, 0), ('dL', 'cL', 'right', 'capacity', 1, 1, 0), ('cL', 'mL', 'right', 'capacity', 1, 1, 0), ('hL', 'L', 'right', 'capacity', 1, 1, 0), ('kg', 'hg', 'right', 'mass', 1, 1, 0), ('hg', 'dag', 'right', 'mass', 1, 1, 0), ('dag', 'g', 'right', 'mass', 1, 1, 0), ('g', 'dg', 'right', 'mass', 1, 1, 0), ('dg', 'cg', 'right', 'mass', 1, 1, 0), ('cg', 'mg', 'right', 'mass', 1, 1, 0), ('kg', 'g', 'right', 'mass', 1, 1, 0), ('hm', 'km', 'left', 'length', 1, 1, 0), ('dam', 'hm', 'left', 'length', 1, 1, 0), ('m', 'dam', 'left', 'length', 1, 1, 0), ('dm', 'm', 'left', 'length', 1, 1, 0), ('cm', 'dm', 'left', 'length', 1, 1, 0), ('mm', 'cm', 'left', 'length', 1, 1, 0), ('m', 'km', 'left', 'length', 1, 1, 0), ('cm', 'm', 'left', 'length', 1, 1, 0), ('daL', 'hL', 'left', 'capacity', 1, 1, 0), ('L', 'daL', 'left', 'capacity', 1, 1, 0), ('dL', 'L', 'left', 'capacity', 1, 1, 0), ('cL', 'dL', 'left', 'capacity', 1, 1, 0), ('mL', 'cL', 'left', 'capacity', 1, 1, 0), ('L', 'hL', 'left', 'capacity', 1, 1, 0), ('hg', 'kg', 'left', 'mass', 1, 1, 0), ('dag', 'hg', 'left', 'mass', 1, 1, 0), ('g', 'dag', 'left', 'mass', 1, 1, 0), ('dg', 'g', 'left', 'mass', 1, 1, 0), ('cg', 'dg', 'left', 'mass', 1, 1, 0), ('mg', 'cg', 'left', 'mass', 1, 1, 0), ('g', 'kg', 'left', 'mass', 1, 1, 0), ('km', 'dam', 'right', 'length', 2, 1, 0), # two columns ('hm', 'm', 'right', 'length', 2, 1, 0), ('dam', 'dm', 'right', 'length', 2, 1, 0), ('dm', 'mm', 'right', 'length', 2, 1, 0), ('daL', 'dL', 'right', 'capacity', 2, 1, 0), ('L', 'cL', 'right', 'capacity', 2, 1, 0), ('dL', 'mL', 'right', 'capacity', 2, 1, 0), ('kg', 'dag', 'right', 'mass', 2, 1, 0), ('hg', 'g', 'right', 'mass', 2, 1, 0), ('dag', 'dg', 'right', 'mass', 2, 1, 0), ('g', 'cg', 'right', 'mass', 2, 1, 0), ('dg', 'mg', 'right', 'mass', 2, 1, 0), ('dam', 'km', 'left', 'length', 2, 1, 0), ('m', 'hm', 'left', 'length', 2, 1, 0), ('dm', 'dam', 'left', 'length', 2, 1, 0), ('mm', 'dm', 'left', 'length', 2, 1, 0), ('dL', 'daL', 'left', 'capacity', 2, 1, 0), ('cL', 'L', 'left', 'capacity', 2, 1, 0), ('mL', 'dL', 'left', 'capacity', 2, 1, 0), ('dag', 'kg', 'left', 'mass', 2, 1, 0), ('g', 'hg', 'left', 'mass', 2, 1, 0), ('dg', 'dag', 'left', 'mass', 2, 1, 0), ('cg', 'g', 'left', 'mass', 2, 1, 0), ('mg', 'dg', 'left', 'mass', 2, 1, 0), ('hm', 'dm', 'right', 'length', 3, 1, 0), # three columns ('dam', 'cm', 'right', 'length', 3, 1, 0), ('dm', 'hm', 'left', 'length', 3, 1, 0), ('cm', 'dam', 'left', 'length', 3, 1, 0), ('km', 'hm', 'right', 'area', 2, 2, 0), # area: 1 column [2] ('hm', 'dam', 'right', 'area', 2, 2, 0), ('dam', 'm', 'right', 'area', 2, 2, 0), ('m', 'dm', 'right', 'area', 2, 2, 0), ('dm', 'cm', 'right', 'area', 2, 2, 0), ('cm', 'mm', 'right', 'area', 2, 2, 0), ('hm', 'km', 'left', 'area', 2, 2, 0), ('dam', 'hm', 'left', 'area', 2, 2, 0), ('m', 'dam', 'left', 'area', 2, 2, 0), ('dm', 'm', 'left', 'area', 2, 2, 0), ('cm', 'dm', 'left', 'area', 2, 2, 0), ('mm', 'cm', 'left', 'area', 2, 2, 0), ('km', 'dam', 'right', 'area', 4, 2, 0), # area: 2 columns [4] ('hm', 'm', 'right', 'area', 4, 2, 0), ('dam', 'dm', 'right', 'area', 4, 2, 0), ('m', 'cm', 'right', 'area', 4, 2, 0), ('dm', 'mm', 'right', 'area', 4, 2, 0), ('dam', 'km', 'left', 'area', 4, 2, 0), ('m', 'hm', 'left', 'area', 4, 2, 0), ('dm', 'dam', 'left', 'area', 4, 2, 0), ('cm', 'm', 'left', 'area', 4, 2, 0), ('mm', 'dm', 'left', 'area', 4, 2, 0), ('km', 'hm', 'right', 'volume', 3, 3, 0), # vol: 1 column [3] ('hm', 'dam', 'right', 'volume', 3, 3, 0), ('dam', 'm', 'right', 'volume', 3, 3, 0), ('m', 'dm', 'right', 'volume', 3, 3, 0), ('dm', 'cm', 'right', 'volume', 3, 3, 0), ('cm', 'mm', 'right', 'volume', 3, 3, 0), ('hm', 'km', 'left', 'volume', 3, 3, 0), ('dam', 'hm', 'left', 'volume', 3, 3, 0), ('m', 'dam', 'left', 'volume', 3, 3, 0), ('dm', 'm', 'left', 'volume', 3, 3, 0), ('cm', 'dm', 'left', 'volume', 3, 3, 0), ('mm', 'cm', 'left', 'volume', 3, 3, 0), ('km', 'dam', 'right', 'volume', 6, 3, 0), # vol: 2 columns [6] ('hm', 'm', 'right', 'volume', 6, 3, 0), ('dam', 'dm', 'right', 'volume', 6, 3, 0), ('m', 'cm', 'right', 'volume', 6, 3, 0), ('dm', 'mm', 'right', 'volume', 6, 3, 0), ('dam', 'km', 'left', 'volume', 6, 3, 0), ('m', 'hm', 'left', 'volume', 6, 3, 0), ('dm', 'dam', 'left', 'volume', 6, 3, 0), ('cm', 'm', 'left', 'volume', 6, 3, 0), ('mm', 'dm', 'left', 'volume', 6, 3, 0), # vol -> capacity ('dm', 'L', 'none', 'volume2capacity', 3, 3, 0), ('cm', 'mL', 'none', 'volume2capacity', 3, 3, 0), ('m', 'L', 'right', 'volume2capacity', 4, 3, 0), ('dm', 'mL', 'right', 'volume2capacity', 4, 3, 0), ('m', 'hL', 'right', 'volume2capacity', 7, 3, 0), ('m', 'daL', 'right', 'volume2capacity', 7, 3, 0), ('m', 'dL', 'right', 'volume2capacity', 7, 3, 0), ('m', 'cL', 'right', 'volume2capacity', 7, 3, 0), ('m', 'mL', 'right', 'volume2capacity', 7, 3, 0), ('dm', 'hL', 'left', 'volume2capacity', 7, 3, 0), ('dm', 'daL', 'left', 'volume2capacity', 7, 3, 0), ('dm', 'dL', 'right', 'volume2capacity', 7, 3, 0), ('dm', 'cL', 'right', 'volume2capacity', 7, 3, 0), ('cm', 'hL', 'left', 'volume2capacity', 7, 3, 0), ('cm', 'daL', 'left', 'volume2capacity', 7, 3, 0), ('cm', 'L', 'left', 'volume2capacity', 4, 3, 0), ('cm', 'dL', 'left', 'volume2capacity', 7, 3, 0), ('cm', 'cL', 'left', 'volume2capacity', 7, 3, 0), ('mm', 'hL', 'left', 'volume2capacity', 8, 3, 0), ('mm', 'daL', 'left', 'volume2capacity', 8, 3, 0), ('mm', 'L', 'left', 'volume2capacity', 8, 3, 0), ('mm', 'dL', 'left', 'volume2capacity', 8, 3, 0), ('mm', 'cL', 'left', 'volume2capacity', 8, 3, 0), ('mm', 'mL', 'left', 'volume2capacity', 7, 3, 0), # capacity -> vol ('L', 'dm', 'none', 'capacity2volume', 3, 3, 0), ('mL', 'cm', 'none', 'capacity2volume', 3, 3, 0), ('L', 'm', 'left', 'capacity2volume', 4, 3, 0), ('mL', 'dm', 'left', 'capacity2volume', 4, 3, 0), ('hL', 'm', 'left', 'capacity2volume', 7, 3, 0), ('daL', 'm', 'left', 'capacity2volume', 7, 3, 0), ('dL', 'm', 'left', 'capacity2volume', 7, 3, 0), ('cL', 'm', 'left', 'capacity2volume', 7, 3, 0), ('mL', 'm', 'left', 'capacity2volume', 7, 3, 0), ('hL', 'dm', 'right', 'capacity2volume', 7, 3, 0), ('daL', 'dm', 'right', 'capacity2volume', 7, 3, 0), ('dL', 'dm', 'left', 'capacity2volume', 7, 3, 0), ('cL', 'dm', 'left', 'capacity2volume', 7, 3, 0), ('hL', 'cm', 'right', 'capacity2volume', 7, 3, 0), ('daL', 'cm', 'right', 'capacity2volume', 7, 3, 0), ('L', 'cm', 'right', 'capacity2volume', 4, 3, 0), ('dL', 'cm', 'right', 'capacity2volume', 7, 3, 0), ('cL', 'cm', 'right', 'capacity2volume', 7, 3, 0), ('hL', 'mm', 'right', 'capacity2volume', 8, 3, 0), ('daL', 'mm', 'right', 'capacity2volume', 8, 3, 0), ('L', 'mm', 'right', 'capacity2volume', 8, 3, 0), ('dL', 'mm', 'right', 'capacity2volume', 8, 3, 0), ('cL', 'mm', 'right', 'capacity2volume', 8, 3, 0), ('mL', 'mm', 'right', 'capacity2volume', 7, 3, 0), ] db.executemany("INSERT " "INTO units_conversions" "(unit1, unit2, direction, category, level, dimension, " "drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Insert digits places...\n') db_rows = [(str(elt), 0) for elt in DIGITSPLACES] db.executemany("INSERT " "INTO digits_places" "(place, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Insert fractional digits places...\n') db_rows = [(str(elt), 0) for elt in DIGITSPLACES_DECIMAL] db.executemany("INSERT " "INTO fracdigits_places" "(place, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Insert dvipsnames selection...\n') db_rows = [('Apricot', 0), ('BurntOrange', 0), ('Dandelion', 0), ('Goldenrod', 0), ('Lavender', 0), ('LimeGreen', 0), ('NavyBlue', 0), ('Red', 0), ('SkyBlue', 0), ('Periwinkle', 0)] db.executemany("INSERT " "INTO dvipsnames_selection(color_name, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('Insert line segments\' marks...\n') creation_query = '''CREATE TABLE ls_marks (id INTEGER PRIMARY KEY, mark TEXT, drawDate INTEGER)''' db_creation_queries.append(creation_query) db.execute(creation_query) db_rows = [('|', 0), ('||', 0), ('|||', 0), ('O', 0), (r'\triangle', 0), (r'\square', 0), (r'\lozenge', 0), (r'\bigstar', 0)] db.executemany("INSERT " "INTO ls_marks(mark, drawDate) " "VALUES(?, ?)", db_rows) anglessets_db_creation_queries = [] sys.stderr.write('Anglessets db: insert anglessets...\n') creation_query = '''CREATE TABLE anglessets (id INTEGER PRIMARY KEY, nbof_angles INTEGER, distcode TEXT, variant INTEGER, nbof_right_angles INTEGER, equal_angles TEXT, table2 INTEGER, table3 INTEGER, table4 INTEGER, table5 INTEGER, table6 INTEGER, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(2, '1_1', 0, 0, 'all_different', 0, 0, 0, 0, 0, 0), (2, '1_1r', 0, 1, 'all_different', 0, 0, 0, 0, 0, 0), (2, '1_1r', 1, 1, 'all_different', 0, 0, 0, 0, 0, 0), (2, '2', 0, 0, 'equilateral', 1, 0, 0, 0, 0, 0), (3, '1_1_1', 0, 0, 'all_different', 0, 0, 0, 0, 0, 0), (3, '1_1_1r', 0, 1, 'all_different', 0, 0, 0, 0, 0, 0), (3, '1_1_1r', 1, 1, 'all_different', 0, 0, 0, 0, 0, 0), (3, '1_1_1r', 2, 1, 'all_different', 0, 0, 0, 0, 0, 0), (3, '2_1', 0, 0, 'none', 1, 0, 0, 0, 0, 0), (3, '2_1', 1, 0, 'none', 1, 0, 0, 0, 0, 0), (3, '2_1', 2, 0, 'none', 1, 0, 0, 0, 0, 0), (3, '2_1r', 0, 1, 'none', 1, 0, 0, 0, 0, 0), (3, '2_1r', 1, 1, 'none', 1, 0, 0, 0, 0, 0), (3, '2_1r', 2, 1, 'none', 1, 0, 0, 0, 0, 0), (3, '3', 0, 0, 'equilateral', 0, 1, 0, 0, 0, 0)] anglessets_db.executemany( "INSERT INTO anglessets(" "nbof_angles, distcode, variant, nbof_right_angles, equal_angles, " "table2, table3, table4, table5, table6, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Anglessets db: insert anglessets subvariants...\n') creation_query = '''CREATE TABLE _1_1_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0)] anglessets_db.executemany( "INSERT INTO _1_1_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _1_1r_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0)] anglessets_db.executemany( "INSERT INTO _1_1r_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _2_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0)] anglessets_db.executemany( "INSERT INTO _2_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _1_1_1_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0)] anglessets_db.executemany( "INSERT INTO _1_1_1_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _1_1_1r_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0)] anglessets_db.executemany( "INSERT INTO _1_1_1r_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _2_1_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0)] anglessets_db.executemany( "INSERT INTO _2_1_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _2_1r_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0)] anglessets_db.executemany( "INSERT INTO _2_1r_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE _3_subvariants (id INTEGER PRIMARY KEY, subvariant_nb, drawDate INTEGER)''' anglessets_db_creation_queries.append(creation_query) anglessets_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0)] anglessets_db.executemany( "INSERT INTO _3_subvariants(subvariant_nb, drawDate) " "VALUES(?, ?)", db_rows) shapes_db_creation_queries = [] sys.stderr.write('Shapes db: insert polygons...\n') creation_query = '''CREATE TABLE polygons (id INTEGER PRIMARY KEY, sides_nb INTEGER, type TEXT, special TEXT, codename TEXT, sides_particularity TEXT, level INTEGER, variant INTEGER, table2 INTEGER, table3 INTEGER, table4 INTEGER, table5 INTEGER, table6 INTEGER, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(3, 'triangle', 'scalene_triangle', 'triangle_1_1_1', 'all_different', 2, 0, 0, 0, 0, 0, 0, 0), (3, 'triangle', 'right_triangle', 'triangle_1_1_1', 'all_different', 2, 1, 0, 0, 0, 0, 0, 0), (3, 'triangle', 'isosceles_triangle', 'triangle_2_1', 'none', 2, 0, 1, 0, 0, 0, 0, 0), (3, 'triangle', 'equilateral_triangle', 'triangle_3', 'equilateral', 1, 0, 0, 1, 0, 0, 0, 0), (4, 'quadrilateral', '', 'quadrilateral_1_1_1_1', 'all_different', 3, 0, 0, 0, 0, 0, 0, 0), (4, 'quadrilateral', '', 'quadrilateral_2_1_1', 'none', 3, 0, 1, 0, 0, 0, 0, 0), (4, 'quadrilateral', '', 'quadrilateral_2_1_1', 'none', 3, 1, 1, 0, 0, 0, 0, 0), (4, 'quadrilateral', 'kite', 'quadrilateral_2_2', 'none', 3, 0, 1, 0, 0, 0, 0, 0), (4, 'quadrilateral', 'parallelelogram', 'quadrilateral_2_2', 'none', 3, 1, 1, 0, 0, 0, 0, 0), (4, 'quadrilateral', 'rectangle', 'quadrilateral_2_2', 'none', 2, 2, 1, 0, 0, 0, 0, 0), (4, 'quadrilateral', '', 'quadrilateral_3_1', 'none', 2, 0, 0, 1, 0, 0, 0, 0), (4, 'quadrilateral', 'rhombus', 'quadrilateral_4', 'equilateral', 1, 0, 0, 0, 1, 0, 0, 0), (4, 'quadrilateral', 'square', 'quadrilateral_4', 'equilateral', 1, 1, 0, 0, 1, 0, 0, 0), (5, 'pentagon', '', 'pentagon_1_1_1_1_1', 'all_different', 4, 0, 0, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_2_1_1_1', 'none', 4, 0, 1, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_2_1_1_1', 'none', 4, 1, 1, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_2_2_1', 'none', 4, 0, 1, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_2_2_1', 'none', 4, 1, 1, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_2_2_1', 'none', 4, 2, 1, 0, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_3_1_1', 'none', 3, 0, 0, 1, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_3_1_1', 'none', 3, 1, 0, 1, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_3_2', 'none', 3, 0, 1, 1, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_3_2', 'none', 3, 1, 1, 1, 0, 0, 0, 0), (5, 'pentagon', '', 'pentagon_4_1', 'none', 2, 0, 0, 0, 1, 0, 0, 0), (5, 'pentagon', '', 'pentagon_5', 'equilateral', 1, 0, 0, 0, 0, 1, 0, 0), (6, 'hexagon', '', 'hexagon_1_1_1_1_1_1', 'all_different', 5, 0, 0, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none', 5, 0, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none', 5, 1, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_1_1_1_1', 'none', 5, 2, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 0, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 1, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 2, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 3, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 4, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 5, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 6, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_1_1', 'none', 5, 7, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_2', 'none', 3, 0, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_2', 'none', 3, 1, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_2', 'none', 3, 2, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_2_2_2', 'none', 3, 3, 1, 0, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_1_1_1', 'none', 4, 0, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_1_1_1', 'none', 4, 1, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_1_1_1', 'none', 4, 2, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_2_1', 'none', 4, 0, 1, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_2_1', 'none', 4, 1, 1, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_2_1', 'none', 4, 2, 1, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_3', 'none', 3, 0, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_3', 'none', 3, 1, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_3_3', 'none', 3, 2, 0, 1, 0, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_1_1', 'none', 3, 0, 0, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_1_1', 'none', 3, 1, 0, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_1_1', 'none', 3, 2, 0, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_2', 'none', 3, 0, 1, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_2', 'none', 3, 1, 1, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_4_2', 'none', 3, 2, 1, 0, 1, 0, 0, 0), (6, 'hexagon', '', 'hexagon_5_1', 'none', 2, 0, 0, 0, 0, 1, 0, 0), (6, 'hexagon', '', 'hexagon_6', 'equilateral', 1, 0, 0, 0, 0, 0, 1, 0)] shapes_db.executemany( "INSERT INTO polygons(" "sides_nb, type, special, codename, sides_particularity, level, " "variant, table2, table3, table4, table5, table6, drawDate) " "VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", db_rows) sys.stderr.write('Shapes db: insert shapes variants: scalene triangles...') creation_query = '''CREATE TABLE scalene_triangle_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)] shapes_db.executemany( "INSERT INTO scalene_triangle_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, ' 'right triangles...') creation_query = '''CREATE TABLE right_triangle_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)] shapes_db.executemany( "INSERT INTO right_triangle_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, ' 'right triangles, isosceles triangles...') creation_query = '''CREATE TABLE triangle_2_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0), (7, 0), (8, 0)] shapes_db.executemany( "INSERT INTO triangle_2_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, ' 'right triangles, isosceles triangles, equilateral ' 'triangles...') creation_query = '''CREATE TABLE triangle_3_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)] shapes_db.executemany( "INSERT INTO triangle_3_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, ' 'right triangles, isosceles triangles, equilateral ' 'triangles, quadrilaterals...') creation_query = '''CREATE TABLE quadrilateral_1_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0), (5, 0), (6, 0)] shapes_db.executemany( "INSERT INTO quadrilateral_1_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE quadrilateral_2_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)] shapes_db.executemany( "INSERT INTO quadrilateral_2_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE quadrilateral_2_2_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)] shapes_db.executemany( "INSERT INTO quadrilateral_2_2_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE quadrilateral_3_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), (3, 0), (4, 0)] shapes_db.executemany( "INSERT INTO quadrilateral_3_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE quadrilateral_4_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0)] shapes_db.executemany( "INSERT INTO quadrilateral_4_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) sys.stderr.write('\rShapes db: insert shapes variants: scalene triangles, ' 'right triangles, isosceles triangles, equilateral ' 'triangles, quadrilaterals, pentagons...\n') creation_query = '''CREATE TABLE pentagon_1_1_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_1_1_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_2_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_2_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_2_2_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_2_2_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_3_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_3_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_3_2_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_3_2_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_4_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_4_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE pentagon_5_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO pentagon_5_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_1_1_1_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO hexagon_1_1_1_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_2_1_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0)] shapes_db.executemany( "INSERT INTO hexagon_2_1_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_2_2_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_2_2_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_2_2_2_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_2_2_2_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_3_1_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_3_1_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_3_2_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_3_2_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_3_3_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_3_3_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_4_1_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_4_1_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_4_2_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), ] shapes_db.executemany( "INSERT INTO hexagon_4_2_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_5_1_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), ] shapes_db.executemany( "INSERT INTO hexagon_5_1_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) creation_query = '''CREATE TABLE hexagon_6_shapes (id INTEGER PRIMARY KEY, shape_nb, drawDate INTEGER)''' shapes_db_creation_queries.append(creation_query) shapes_db.execute(creation_query) db_rows = [(1, 0), (2, 0), ] shapes_db.executemany( "INSERT INTO hexagon_6_shapes(shape_nb, drawDate) " "VALUES(?, ?)", db_rows) solids_db_creation_queries = [] sys.stderr.write('Solids db: insert solids...\n') # type will be: cuboid, cube, prism etc. creation_query = '''CREATE TABLE polyhedra (id INTEGER PRIMARY KEY, faces_nb INTEGER, type TEXT, variant INTEGER, drawDate INTEGER)''' solids_db_creation_queries.append(creation_query) solids_db.execute(creation_query) db_rows = [(6, 'rightcuboid', 0, 0), (6, 'rightcuboid', 1, 0), (6, 'rightcuboid', 2, 0), (6, 'rightcuboid', 3, 0), (6, 'rightcuboid', 4, 0), (6, 'rightcuboid', 5, 0), ] solids_db.executemany( "INSERT INTO polyhedra(" "faces_nb, type, variant, drawDate) " "VALUES(?, ?, ?, ?)", db_rows) sys.stderr.write('Commit changes to databases...\n') db.commit() shapes_db.commit() solids_db.commit() anglessets_db.commit() natural_nb_tuples_db.commit() sys.stderr.write('Close databases...\n') db.close() shapes_db.close() solids_db.close() anglessets_db.close() natural_nb_tuples_db.close() sys.stderr.write('Create databases\' indices...\n') db_index = {} for qr in db_creation_queries: key, value = parse_sql_creation_query(qr) db_index.update({key: value}) with open(settings.db_index_path, 'w') as f: json.dump(db_index, f, indent=4) f.write('\n') shapes_db_index = {} for qr in shapes_db_creation_queries: key, value = parse_sql_creation_query(qr) shapes_db_index.update({key: value}) with open(settings.shapes_db_index_path, 'w') as f: json.dump(shapes_db_index, f, indent=4) f.write('\n') solids_db_index = {} for qr in solids_db_creation_queries: key, value = parse_sql_creation_query(qr) solids_db_index.update({key: value}) with open(settings.solids_db_index_path, 'w') as f: json.dump(solids_db_index, f, indent=4) f.write('\n') anglessets_db_index = {} for qr in anglessets_db_creation_queries: key, value = parse_sql_creation_query(qr) anglessets_db_index.update({key: value}) with open(settings.anglessets_db_index_path, 'w') as f: json.dump(anglessets_db_index, f, indent=4) f.write('\n') natural_nb_tuples_db_index = {} for qr in natural_nb_tuples_db_creation_queries: key, value = parse_sql_creation_query(qr) natural_nb_tuples_db_index.update({key: value}) with open(settings.natural_nb_tuples_db_index_path, 'w') as f: json.dump(natural_nb_tuples_db_index, f, indent=4) f.write('\n') sys.stderr.write('Done!\n') if __name__ == '__main__': __main__()
gpl-3.0
5,954,798,789,077,043,000
47.293737
79
0.482178
false
3.287027
false
false
false
aberon10/training
training/ticketing_system/views.py
1
10444
# -*- coding: utf-8 -*- import time from django.db.models import Q from django.shortcuts import render from django.http import HttpResponseRedirect from django.http import HttpResponseNotFound from django.contrib.auth.hashers import make_password from django.contrib.auth.hashers import check_password from django.views.generic.edit import FormView from django.views.generic import TemplateView from django.views.generic import RedirectView from .forms import SignInForm from .forms import LoginForm from .forms import TicketCreateForm from .models import User from .models import Ticket class LoginView(FormView): """ Login View. """ form_class = LoginForm template_name = 'ticketing_system/login.html' success_url = '/dashboard' def get(self, request, *args, **kwargs): if request.session.get('user'): return HttpResponseRedirect(self.get_success_url()) else: return render( request, template_name=self.template_name, context={'form': self.form_class} ) def form_valid(self, form): context = { 'form': form, 'error_login': 'The user and/or password do not match' } email = form.cleaned_data['email'] password = form.cleaned_data['password'] try: user = User.objects.get(email=email) except User.DoesNotExist: pass else: if check_password(password, user.password): # create the new user session self.request.session['user'] = user.email self.request.session['name'] = user.name return HttpResponseRedirect(self.get_success_url()) return render( self.request, template_name=self.template_name, context=context ) class LogoutView(RedirectView): """ Logout View. """ url = '/login' def get(self, request, *args, **kwargs): try: # delete the user session del request.session['user'] del request.session['name'] except KeyError: pass return super(LogoutView, self).get(request, *args, **kwargs) class RegisterView(TemplateView): """ Register View. """ template_name = 'ticketing_system/register.html' def get(self, request, *args, **kwargs): form = SignInForm() return render( request, template_name=self.template_name, context={'register_form': form} ) def post(self, request, *args, **kwargs): form = SignInForm(request.POST) response = { 'register_form': form, 'message': '', 'success': False } if form.is_valid(): email = form.cleaned_data['email'] password = form.cleaned_data['password'] confirm_password = form.cleaned_data['confirm_password'] name = form.cleaned_data['name'] if password != confirm_password: response['register_form']['confirm_password'].error_messages =\ 'Passwords do not match..' else: try: User.objects.get(email=email) except User.DoesNotExist: user = User( email=email, name=name, password=make_password(password) ) user.save() response['register_form'] = SignInForm() response['success'] = True response['message'] = 'You have successfully \ registered!' else: response['register_form']['email'].error_messages = \ 'User already exists' return render( request, template_name=self.template_name, context=response, ) class DashboardView(TemplateView): """ Dashboard View. """ template_name = 'ticketing_system/dashboard.html' def get(self, request, *args, **kwargs): if request.session.get('user'): user = User.objects.get(email=request.session['user']) tickets = Ticket.objects.filter( Q(status='O'), Q(author=user) | Q(assignee=user) ).distinct() return render( request, template_name=self.template_name, context={ 'current_path': request.path.split('/')[1], 'tickets': tickets } ) else: return HttpResponseRedirect('/login') def post(self, request, *args, **kwargs): if request.session['user']: title = request.POST.get('title') status = request.POST.get('status') user = User.objects.get(email=request.session['user']) tickets = Ticket.objects.all() if title != '': tickets = tickets.filter(title__icontains=title) tickets = tickets.filter( Q(status=status), Q(author=user) | Q(assignee=user) ).distinct() return render( request, template_name=self.template_name, context={ 'current_path': request.path.split('/')[1], 'tickets': tickets } ) else: return HttpResponseRedirect('/login') class TicketView(FormView): """ Ticket View. """ form_class = TicketCreateForm template_name = 'ticketing_system/ticket_form.html' success_url = '/ticket' def get(self, request, *args, **kwargs): if not request.session.get('user'): return HttpResponseRedirect('/login') else: user = User.objects.get(email=request.session['user']) try: if kwargs['id_ticket']: try: ticket = Ticket.objects.filter( Q(pk=int(kwargs['id_ticket'])), Q(author=user) | Q(assignee=user) )[0] except Ticket.DoesNotExist: return HttpResponseNotFound('<h1>Page not found</h1>') else: form = self.form_class(initial={ 'title': ticket.title, 'body': ticket.body, 'author': ticket.author, 'created': ticket.created, 'status': ticket.status, 'assignee': ticket.assignee.all() }) except KeyError: form = self.form_class(initial={ 'author': request.session['user'], 'created': time.strftime('%Y-%m-%d'), 'status': 'O', 'assignee': user.id }) return render( request, template_name=self.template_name, context={'form': form} ) def post(self, request, *args, **kwargs): if not request.session.get('user'): return HttpResponseRedirect('/login') else: error_message = '' ticket = Ticket() assignees_users = request.POST.getlist('assignee') form = TicketCreateForm({ 'title': request.POST.get('title'), 'body': request.POST.get('body'), 'status': request.POST.get('status'), 'created': request.POST.get('created') }) if form.is_valid(): title = form.cleaned_data['title'] body = form.cleaned_data['body'] email = self.request.session['user'] created = form.cleaned_data['created'] status = form.cleaned_data['status'] author = User.objects.get(email=email) try: if kwargs['id_ticket']: ticket = Ticket.objects.get( pk=int(kwargs['id_ticket']) ) for item in ticket.assignee.all(): user = User.objects.get(pk=int(item.id)) ticket.assignee.remove(user) except KeyError: pass try: users = [] for user in assignees_users: users.append(User.objects.get(pk=int(user))) except User.DoesNotExist: error_message = 'Error creating ticket' else: ticket.title = title ticket.body = body ticket.author = author ticket.created = created ticket.status = status ticket.save() if not users: users.append(author) ticket.assignee.set(users) return HttpResponseRedirect('/dashboard') return render( request, template_name=self.template_name, context={ 'form': TicketCreateForm(request.POST), 'error_message': error_message } ) class TicketDeleteView(TemplateView): def get(self, request, *args, **kwargs): if not request.session.get('user'): return HttpResponseRedirect('/login') else: try: if kwargs['id_ticket']: user = User.objects.get(email=request.session['user']) ticket = Ticket.objects.filter( Q(pk=int(kwargs['id_ticket'])), Q(author=user) | Q(assignee=user) ).distinct() ticket.delete() except KeyError: pass except Ticket.DoesNotExist: pass return HttpResponseRedirect('/dashboard')
mit
-1,894,123,065,612,673,500
32.474359
79
0.486021
false
4.949763
false
false
false
MadsJensen/RP_scripts
extract_ts_epochs_interupt.py
1
1207
import sys import numpy as np import mne from mne.minimum_norm import read_inverse_operator, apply_inverse_epochs from my_settings import (mne_folder, epochs_folder, source_folder) subject = sys.argv[1] method = "dSPM" snr = 1. lambda2 = 1. / snr**2 labels = mne.read_labels_from_annot( subject=subject, parc="PALS_B12_Brodmann", regexp="Brodmann") condition = "interupt" inv = read_inverse_operator(mne_folder + "%s_%s-inv.fif" % (subject, condition )) epochs = mne.read_epochs(epochs_folder + "%s_%s-epo.fif" % (subject, condition )) # epochs.resample(500) stcs = apply_inverse_epochs( epochs["press"], inv, lambda2, method=method, pick_ori=None) ts = [ mne.extract_label_time_course( stc, labels, inv["src"], mode="mean_flip") for stc in stcs ] # for h, tc in enumerate(ts): # for j, t in enumerate(tc): # t *= np.sign(t[np.argmax(np.abs(t))]) # tc[j, :] = t # ts[h] = tc ts = np.asarray(ts) stc.save(source_folder + "%s_%s_epo" % (subject, condition)) np.save(source_folder + "ave_ts/%s_%s_ts-epo.npy" % (subject, condition), ts)
bsd-3-clause
1,019,196,104,266,555,100
29.175
78
0.589064
false
3.002488
false
false
false
ContinuumIO/chaco
setup.py
1
2424
# Copyright (c) 2008-2012 by Enthought, Inc. # All rights reserved. from os.path import join from numpy import get_include from setuptools import setup, Extension, find_packages info = {} execfile(join('chaco', '__init__.py'), info) numpy_include_dir = get_include() # Register Python extensions contour = Extension( 'chaco.contour.contour', sources=['chaco/contour/cntr.c'], include_dirs=[numpy_include_dir], define_macros=[('NUMPY', None)] ) cython_speedups = Extension( 'chaco._cython_speedups', sources=['chaco/_cython_speedups.c'], include_dirs=[numpy_include_dir], ) # Commenting this out for now, until we get the module fully tested and working #speedups = Extension( # 'chaco._speedups', # sources = ['chaco/_speedups.cpp'], # include_dirs = [get_include()], # define_macros=[('NUMPY', None)] # ) setup( name = 'chaco', version = info['__version__'], author = 'Peter Wang, et. al.', author_email = 'pwang@enthought.com', maintainer = 'ETS Developers', maintainer_email = 'enthought-dev@enthought.com', url = 'http://code.enthought.com/projects/chaco', classifiers = [c.strip() for c in """\ Development Status :: 5 - Production/Stable Intended Audience :: Developers Intended Audience :: Science/Research License :: OSI Approved :: BSD License Operating System :: MacOS Operating System :: Microsoft :: Windows Operating System :: OS Independent Operating System :: POSIX Operating System :: Unix Programming Language :: C Programming Language :: Python Topic :: Scientific/Engineering Topic :: Software Development Topic :: Software Development :: Libraries """.splitlines() if len(c.strip()) > 0], package_data={'chaco': ['tools/toolbars/images/*.png', 'layers/data/*.svg']}, description = 'interactive 2-dimensional plotting', long_description = open('README.rst').read(), download_url = ('http://www.enthought.com/repo/ets/chaco-%s.tar.gz' % info['__version__']), ext_modules = [contour, cython_speedups], include_package_data = True, install_requires = info['__requires__'], license = 'BSD', packages = find_packages(), platforms = ["Windows", "Linux", "Mac OS-X", "Unix", "Solaris"], zip_safe = False, )
bsd-3-clause
-1,258,937,183,096,643,600
31.756757
79
0.622937
false
3.781591
false
false
false
cread/ecks
ecks/plugins/disk.py
1
2091
""" Ecks plugin to collect disk usage information Copyright 2011 Chris Read (chris.read@gmail.com) Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from pprint import pprint def _calc_size(block_size, block_count): if block_size and block_count: return block_size * block_count else: return -1 def get_disk(parent, host, community): """ This is a plugin to be loaded by Ecks return an array of tuples containing (type, path, size in bytes, used bytes) for each block device type is an integer which is one of the following: hrStorageOther = 1 hrStorageRam = 2 hrStorageVirtualMemory = 3 hrStorageFixedDisk = 4 hrStorageRemovableDisk = 5 hrStorageFloppyDisk = 6 hrStorageCompactDisc = 7 hrStorageRamDisk = 8 hrStorageFlashMemory = 9 hrStorageNetworkDisk = 10 """ disks = (1,3,6,1,2,1,25,2,3,1) # HOST-RESOURCES-MIB data = parent.get_snmp_data(host, community, disks, 1) # We need to work this out the long was as there are cases where size or used is not supplied details = [] formatted = [] for i in [t for t in parent._extract(data, int, 1)]: details.append([ value for (oid, (data_type, index), value) in data if index == i and data_type != 1]) for dev in details: if len(dev) != 5: continue formatted.append(( tuple(dev[0])[-1], str(dev[1]), int(dev[2]) * int(dev[3]), int(dev[2]) * int(dev[4]) )) return formatted
apache-2.0
7,506,630,265,331,763,000
30.681818
109
0.642276
false
3.808743
false
false
false
MaxTyutyunnikov/lino
obsolete/src/sandbox/cherrypy/4/hello4.py
1
1278
#!/usr/bin/python import cherrypy from HyperText.Documents import Document from HyperText import HTML as html from lino.apps.pinboard import demo class Root: def __init__(self,dbsess): self.dbsess = dbsess def default(self,*args,**kw): title=str(self.dbsess) doc=Document(title=html.TITLE(title)) div = html.DIV(klass="title") doc.append(div) div.append(html.H1(title)) div = html.DIV(klass="menu") doc.append(div) p = html.P("Menu:") div.append(p) p.append(html.BR()) p.append(html.A("home",href="/")) p.append(html.BR()) p.append(html.A("foo",href="foo/bar/baz")) p.append(html.BR()) p.append(html.A("reports",href="report")) doc.append(html.P(self.dbsess.app.aboutString())) doc.append(html.P('args='+repr(args))) doc.append(html.P('kw='+repr(kw))) # div = html.DIV(klass="footer") doc.append(div) div.append(html.P("foo "+cherrypy.request.base + " bar")) return str(doc) default.exposed = True dbsess=demo.startup() frm = dbsess.db.app.showMainForm() cherrypy.root = Root(dbsess) cherrypy.server.start()
gpl-3.0
2,859,601,189,977,585,700
22.236364
65
0.564945
false
3.380952
false
false
false
alpine9000/amiga_examples
tools/external/amitools/amitools/binfmt/hunk/HunkReader.py
1
30698
"""A class for reading Amiga executables and object files in Hunk format""" import os import struct import StringIO from types import * from Hunk import * class HunkReader: """Load Amiga executable Hunk structures""" def __init__(self): self.hunks = [] self.error_string = None self.type = None self.header = None self.segments = [] self.overlay = None self.overlay_headers = None self.overlay_segments = None self.libs = None self.units = None def get_struct_summary(self, obj): if type(obj) == ListType: result = [] for a in obj: v = self.get_struct_summary(a) if v != None: result.append(v) return "[" + ",".join(result) + "]" elif type(obj) == DictType: if obj.has_key('type_name'): type_name = obj['type_name'] return type_name.replace('HUNK_','') else: result = [] for k in obj.keys(): v = self.get_struct_summary(obj[k]) if v != None: result.append(k + ":" + v) return '{' + ",".join(result) + '}' else: return None def get_long(self, data): return struct.unpack(">I",data)[0] def read_long(self, f): data = f.read(4) if len(data) == 0: return -1 elif len(data) != 4: return -(len(data)+1) return struct.unpack(">I",data)[0] def read_word(self, f): data = f.read(2) if len(data) == 0: return -1 elif len(data) != 2: return -(len(data)+1) return struct.unpack(">H",data)[0] def read_name(self, f): num_longs = self.read_long(f) if num_longs < 0: return -1,None elif num_longs == 0: return 0,"" else: return self.read_name_size(f, num_longs) def read_tag(self, f): data = f.read(4) if len(data) == 0: return -1; elif len(data) != 4: return -(len(data)+1) return data def read_name_size(self, f, num_longs): size = (num_longs & 0xffffff) * 4 data = f.read(size) if len(data) < size: return -1,None endpos = data.find('\0') if endpos == -1: return size,data elif endpos == 0: return 0,"" else: return size,data[:endpos] def get_index_name(self, strtab, offset): end = strtab.find('\0',offset) if end == -1: return strtab[offset:] else: return strtab[offset:end] def is_valid_first_hunk_type(self, hunk_type): return hunk_type == HUNK_HEADER or hunk_type == HUNK_LIB or hunk_type == HUNK_UNIT def parse_header(self, f, hunk): names = [] hunk['names'] = names while True: l,s = self.read_name(f) if l < 0: self.error_string = "Error parsing HUNK_HEADER names" return RESULT_INVALID_HUNK_FILE elif l == 0: break names.append(s) # table size and hunk range table_size = self.read_long(f) first_hunk = self.read_long(f) last_hunk = self.read_long(f) if table_size < 0 or first_hunk < 0 or last_hunk < 0: self.error_string = "HUNK_HEADER invalid table_size or first_hunk or last_hunk" return RESULT_INVALID_HUNK_FILE hunk['table_size'] = table_size hunk['first_hunk'] = first_hunk hunk['last_hunk'] = last_hunk # determine number of hunks in size table num_hunks = last_hunk - first_hunk + 1 hunk_table = [] for a in xrange(num_hunks): hunk_info = {} hunk_size = self.read_long(f) if hunk_size < 0: self.error_string = "HUNK_HEADER contains invalid hunk_size" return RESULT_INVALID_HUNK_FILE hunk_bytes = hunk_size & ~HUNKF_ALL hunk_bytes *= 4 # longs to bytes hunk_info['size'] = hunk_bytes self.set_mem_flags(hunk_info, hunk_size & HUNKF_ALL, 30) hunk_table.append(hunk_info) hunk['hunks'] = hunk_table return RESULT_OK def parse_code_or_data(self, f, hunk): num_longs = self.read_long(f) if num_longs < 0: self.error_string = "%s has invalid size" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE # read in hunk data size = num_longs * 4 hunk['size'] = size & ~HUNKF_ALL flags = size & HUNKF_ALL self.set_mem_flags(hunk, flags, 30) hunk['data_file_offset'] = f.tell() data = f.read(hunk['size']) hunk['data'] = data return RESULT_OK def parse_bss(self, f, hunk): num_longs = self.read_long(f) if num_longs < 0: self.error_string = "%s has invalid size" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE # read in hunk data size = num_longs * 4 hunk['size'] = size & ~HUNKF_ALL flags = size & HUNKF_ALL self.set_mem_flags(hunk, flags, 30) return RESULT_OK def parse_reloc(self, f, hunk): num_relocs = 1 reloc = {} hunk['reloc'] = reloc while num_relocs != 0: num_relocs = self.read_long(f) if num_relocs < 0: self.error_string = "%s has invalid number of relocations" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE elif num_relocs == 0: # last relocation found break # build reloc map hunk_num = self.read_long(f) if hunk_num < 0: self.error_string = "%s has invalid hunk num" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE offsets = [] for a in xrange(num_relocs & 0xffff): offset = self.read_long(f) if offset < 0: self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \ % (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell()) return RESULT_INVALID_HUNK_FILE offsets.append(offset) reloc[hunk_num] = offsets return RESULT_OK def parse_reloc_short(self, f, hunk): num_relocs = 1 reloc = {} hunk['reloc'] = reloc total_words = 0 while num_relocs != 0: num_relocs = self.read_word(f) if num_relocs < 0: self.error_string = "%s has invalid number of relocations" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE elif num_relocs == 0: # last relocation found total_words += 1 break # build reloc map hunk_num = self.read_word(f) if hunk_num < 0: self.error_string = "%s has invalid hunk num" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE offsets = [] count = num_relocs & 0xffff total_words += count + 2 for a in xrange(count): offset = self.read_word(f) if offset < 0: self.error_string = "%s has invalid relocation #%d offset %d (num_relocs=%d hunk_num=%d, offset=%d)" \ % (hunk['type_name'],a,offset,num_relocs,hunk_num,f.tell()) return RESULT_INVALID_HUNK_FILE offsets.append(offset) reloc[hunk_num] = offsets # padding if total_words & 1 == 1: self.read_word(f) return RESULT_OK def parse_symbol(self, f, hunk): name_len = 1 symbols = [] hunk['symbols'] = symbols while name_len > 0: (name_len, name) = self.read_name(f) if name_len < 0: self.error_string = "%s has invalid symbol name" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE elif name_len == 0: # last name occurred break value = self.read_long(f) if value < 0: self.error_string = "%s has invalid symbol value" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE symbols.append( (name,value) ) return RESULT_OK def parse_debug(self, f, hunk): num_longs = self.read_long(f) if num_longs < 0: self.error_string = "%s has invalid size" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE size = num_longs * 4 offset = self.read_long(f) hunk['debug_offset'] = offset; tag = self.read_tag(f) hunk['debug_type'] = tag; size -= 8 if tag == 'LINE': # parse LINE: source line -> code offset mapping l = self.read_long(f) size -= l * 4 + 4; l,n = self.read_name_size(f,l) src_map = [] hunk['src_file'] = n hunk['src_map'] = src_map while size > 0: line_no = self.read_long(f) offset = self.read_long(f) size -= 8 src_map.append([line_no,offset]) else: # read unknown DEBUG hunk hunk['data'] = f.read(size) return RESULT_OK def find_first_code_hunk(self): for hunk in self.hunks: if hunk['type'] == HUNK_CODE: return hunk return None def parse_overlay(self, f, hunk): # read size of overlay hunk ov_size = self.read_long(f) if ov_size < 0: self.error_string = "%s has invalid size" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE # read data of overlay byte_size = (ov_size + 1) *4 ov_data = f.read(byte_size) hunk['ov_data'] = ov_data # check: first get header hunk hdr_hunk = self.hunks[0] if hdr_hunk['type'] != HUNK_HEADER: self.error_string = "%s has no header hunk" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE # first find the code segment of the overlay manager overlay_mgr_hunk = self.find_first_code_hunk() if overlay_mgr_hunk == None: self.error_string = "%s has no overlay manager hunk" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE # check overlay manager overlay_mgr_data = overlay_mgr_hunk['data'] magic = self.get_long(overlay_mgr_data[4:8]) if magic != 0xabcd: self.error_string = "no valid overlay manager magic found" return RESULT_INVALID_HUNK_FILE # check for standard overlay manager magic2 = self.get_long(overlay_mgr_data[24:28]) magic3 = self.get_long(overlay_mgr_data[28:32]) magic4 = self.get_long(overlay_mgr_data[32:36]) std_overlay = (magic2 == 0x5ba0) and (magic3 == 0x074f7665) and (magic4 == 0x726c6179) hunk['ov_std'] = std_overlay return RESULT_OK def parse_lib(self, f, hunk): lib_size = self.read_long(f) hunk['lib_file_offset'] = f.tell() return RESULT_OK,lib_size * 4 def parse_index(self, f, hunk): index_size = self.read_long(f) total_size = index_size * 4 # first read string table strtab_size = self.read_word(f) strtab = f.read(strtab_size) total_size -= strtab_size + 2 # read units units = [] hunk['units'] = units unit_no = 0 while total_size > 2: # read name of unit name_offset = self.read_word(f) total_size -= 2 if name_offset == 0: break unit = {} units.append(unit) unit['unit_no'] = unit_no unit_no += 1 # generate unit name unit['name'] = self.get_index_name(strtab, name_offset) # hunks in unit hunk_begin = self.read_word(f) num_hunks = self.read_word(f) total_size -= 4 unit['hunk_begin_offset'] = hunk_begin # for all hunks in unit ihunks = [] unit['hunk_infos'] = ihunks for a in xrange(num_hunks): ihunk = {} ihunks.append(ihunk) # get hunk info name_offset = self.read_word(f) hunk_size = self.read_word(f) hunk_type = self.read_word(f) total_size -= 6 ihunk['name'] = self.get_index_name(strtab, name_offset) ihunk['size'] = hunk_size ihunk['type'] = hunk_type & 0x3fff self.set_mem_flags(ihunk,hunk_type & 0xc000,14) ihunk['type_name'] = hunk_names[hunk_type & 0x3fff] # get references num_refs = self.read_word(f) total_size -= 2 if num_refs > 0: refs = [] ihunk['refs'] = refs for b in xrange(num_refs): ref = {} name_offset = self.read_word(f) total_size -= 2 name = self.get_index_name(strtab, name_offset) if name == '': # 16 bit refs point to the previous zero byte before the string entry... name = self.get_index_name(strtab, name_offset+1) ref['bits'] = 16 else: ref['bits'] = 32 ref['name'] = name refs.append(ref) # get definitions num_defs = self.read_word(f) total_size -= 2 if num_defs > 0: defs = [] ihunk['defs'] = defs for b in xrange(num_defs): name_offset = self.read_word(f) def_value = self.read_word(f) def_type_flags = self.read_word(f) def_type = def_type_flags & 0x3fff def_flags = def_type_flags & 0xc000 total_size -= 6 name = self.get_index_name(strtab, name_offset) d = { 'name':name, 'value':def_value,'type':def_type} self.set_mem_flags(d,def_flags,14) defs.append(d) # align hunk if total_size == 2: self.read_word(f) elif total_size != 0: self.error_string = "%s has invalid padding" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE return RESULT_OK def parse_ext(self, f, hunk): ext_def = [] ext_ref = [] ext_common = [] hunk['ext_def'] = ext_def hunk['ext_ref'] = ext_ref hunk['ext_common'] = ext_common ext_type_size = 1 while ext_type_size > 0: # ext type | size ext_type_size = self.read_long(f) if ext_type_size < 0: self.error_string = "%s has invalid size" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE ext_type = ext_type_size >> EXT_TYPE_SHIFT ext_size = ext_type_size & EXT_TYPE_SIZE_MASK # ext name l,ext_name = self.read_name_size(f, ext_size) if l < 0: self.error_string = "%s has invalid name" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE elif l == 0: break # create local ext object ext = { 'type' : ext_type, 'name' : ext_name } # check and setup type name if not ext_names.has_key(ext_type): self.error_string = "%s has unspported ext entry %d" % (hunk['type_name'],ext_type) return RESULT_INVALID_HUNK_FILE ext['type_name'] = ext_names[ext_type] # ext common if ext_type == EXT_ABSCOMMON or ext_type == EXT_RELCOMMON: ext['common_size'] = self.read_long(f) ext_common.append(ext) # ext def elif ext_type == EXT_DEF or ext_type == EXT_ABS or ext_type == EXT_RES: ext['def'] = self.read_long(f) ext_def.append(ext) # ext ref else: num_refs = self.read_long(f) if num_refs == 0: num_refs = 1 refs = [] for a in xrange(num_refs): ref = self.read_long(f) refs.append(ref) ext['refs'] = refs ext_ref.append(ext) return RESULT_OK def parse_unit_or_name(self, f, hunk): l,n = self.read_name(f) if l < 0: self.error_string = "%s has invalid name" % (hunk['type_name']) return RESULT_INVALID_HUNK_FILE elif l > 0: hunk['name'] = n else: hunk['name'] = "" return RESULT_OK def set_mem_flags(self, hunk, flags, shift): f = flags >> shift if f & 1 == 1: hunk['memf'] = 'chip' elif f & 2 == 2: hunk['memf'] = 'fast' else: hunk['memf'] = '' # ----- public functions ----- """Read a hunk file and build internal hunk structure Return status and set self.error_string on failure """ def read_file(self, hfile, v37_compat=None): with open(hfile, "rb") as f: return self.read_file_obj(hfile, f, v37_compat) """Read a hunk from memory""" def read_mem(self, name, data, v37_compat=None): fobj = StringIO.StringIO(data) return self.read_file_obj(name, fobj, v37_compat) def read_file_obj(self, hfile, f, v37_compat): self.hunks = [] is_first_hunk = True was_end = False was_potentail_v37_hunk = False was_overlay = False self.error_string = None lib_size = 0 last_file_offset = 0 while True: hunk_file_offset = f.tell() # read hunk type hunk_raw_type = self.read_long(f) if hunk_raw_type == -1 or hunk_raw_type == -2: # tolerate extra byte at end if is_first_hunk: self.error_string = "No valid hunk file: '%s' is empty" % (hfile) return RESULT_NO_HUNK_FILE else: # eof break elif hunk_raw_type < 0: if is_first_hunk: self.error_string = "No valid hunk file: '%s' is too short" % (hfile) return RESULT_NO_HUNK_FILE else: self.error_string = "Error reading hunk type @%08x" % (f.tell()) return RESULT_INVALID_HUNK_FILE hunk_type = hunk_raw_type & HUNK_TYPE_MASK hunk_flags = hunk_raw_type & HUNK_FLAGS_MASK # check range of hunk type if not hunk_names.has_key(hunk_type): # no hunk file? if is_first_hunk: self.error_string = "No hunk file: '%s' type was %d" % (hfile, hunk_type) return RESULT_NO_HUNK_FILE elif was_end: # garbage after an end tag is ignored return RESULT_OK elif was_potentail_v37_hunk: # auto fix v37 -> reread whole file f.seek(0) return self.read_file_obj(hfile, f, True) elif was_overlay: # seems to be a custom overlay -> read to end of file ov_custom_data = f.read() self.hunks[-1]['custom_data'] = ov_custom_data return RESULT_OK else: self.error_string = "Invalid hunk type %d/%x found at @%08x" % (hunk_type,hunk_type,f.tell()) return RESULT_INVALID_HUNK_FILE else: # check for valid first hunk type if is_first_hunk and not self.is_valid_first_hunk_type(hunk_type): self.error_string = "No hunk file: '%s' first hunk type was %d" % (hfile, hunk_type) return RESULT_NO_HUNK_FILE is_first_hunk = False was_end = False was_potentail_v37_hunk = False was_overlay = False hunk = { 'type' : hunk_type, 'hunk_file_offset' : hunk_file_offset } self.hunks.append(hunk) hunk['type_name'] = hunk_names[hunk_type] self.set_mem_flags(hunk, hunk_flags, 30) # account for lib last_hunk_size = hunk_file_offset - last_file_offset if lib_size > 0: lib_size -= last_hunk_size if lib_size > 0: hunk['in_lib'] = True # V37 fix? if hunk_type == HUNK_DREL32: # try to fix automatically... if v37_compat == None: was_potentail_v37_hunk = True # fix was forced elif v37_compat: hunk_type = HUNK_RELOC32SHORT hunk['fixes'] = 'v37' # ----- HUNK_HEADER ----- if hunk_type == HUNK_HEADER: result = self.parse_header(f,hunk) # ----- HUNK_CODE/HUNK_DATA ------ elif hunk_type == HUNK_CODE or hunk_type == HUNK_DATA or hunk_type == HUNK_PPC_CODE: result = self.parse_code_or_data(f,hunk) # ---- HUNK_BSS ---- elif hunk_type == HUNK_BSS: result = self.parse_bss(f,hunk) # ----- HUNK_<reloc> ----- elif hunk_type == HUNK_RELRELOC32 or hunk_type == HUNK_ABSRELOC16 \ or hunk_type == HUNK_RELRELOC8 or hunk_type == HUNK_RELRELOC16 or hunk_type == HUNK_ABSRELOC32 \ or hunk_type == HUNK_DREL32 or hunk_type == HUNK_DREL16 or hunk_type == HUNK_DREL8 \ or hunk_type == HUNK_RELRELOC26: result = self.parse_reloc(f,hunk) # auto fix v37 bug? if hunk_type == HUNK_DREL32 and result != RESULT_OK and v37_compat == None: f.seek(0) return self.read_file_obj(hfile, f, True) # ---- HUNK_<reloc short> ----- elif hunk_type == HUNK_RELOC32SHORT: result = self.parse_reloc_short(f,hunk) # ----- HUNK_SYMBOL ----- elif hunk_type == HUNK_SYMBOL: result = self.parse_symbol(f,hunk) # ----- HUNK_DEBUG ----- elif hunk_type == HUNK_DEBUG: result = self.parse_debug(f,hunk) # ----- HUNK_END ----- elif hunk_type == HUNK_END: was_end = True result = RESULT_OK # ----- HUNK_OVERLAY ----- elif hunk_type == HUNK_OVERLAY: result = self.parse_overlay(f,hunk) was_overlay = True # ----- HUNK_BREAK ----- elif hunk_type == HUNK_BREAK: result = RESULT_OK # ----- HUNK_LIB ----- elif hunk_type == HUNK_LIB: result,lib_size = self.parse_lib(f,hunk) lib_size += 8 # add size of HUNK_LIB itself # ----- HUNK_INDEX ----- elif hunk_type == HUNK_INDEX: result = self.parse_index(f,hunk) # ----- HUNK_EXT ----- elif hunk_type == HUNK_EXT: result = self.parse_ext(f,hunk) # ----- HUNK_UNIT ----- elif hunk_type == HUNK_UNIT or hunk_type == HUNK_NAME: result = self.parse_unit_or_name(f,hunk) # ----- oops! unsupported hunk ----- else: self.error_string = "unsupported hunk %d" % (hunk_type) return RESULT_UNSUPPORTED_HUNKS # a parse error occurred if result != RESULT_OK: return result last_file_offset = hunk_file_offset return RESULT_OK """Return a list with all the hunk type names that were found """ def get_hunk_summary(self): return self.get_struct_summary(self.hunks) # ---------- Build Segments from Hunks ---------- def build_loadseg(self): in_header = True seek_begin = False segment = None segment_list = self.segments for e in self.hunks: hunk_type = e['type'] # check for end of header if in_header and hunk_type in loadseg_valid_begin_hunks: in_header = False seek_begin = True if in_header: if hunk_type == HUNK_HEADER: # we are in an overlay! if self.overlay != None: segment_list = [] self.overlay_segments.append(segment_list) self.overlay_headers.append(e) else: # set load_seg() header self.header = e # start a new segment segment = [] # setup hunk counter hunk_no = e['first_hunk'] # we allow a debug hunk in header for SAS compatibility elif hunk_type == HUNK_DEBUG: segment.append(e) else: self.error_string = "Expected header in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False elif seek_begin: # a new hunk shall begin if hunk_type in loadseg_valid_begin_hunks: segment = [e] segment_list.append(segment) seek_header = False seek_begin = False e['hunk_no'] = hunk_no e['alloc_size'] = self.header['hunks'][hunk_no]['size'] hunk_no += 1 # add an extra overlay "hunk" elif hunk_type == HUNK_OVERLAY: # assume hunk to be empty if self.overlay != None: self.error_string = "Multiple overlay in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False self.overlay = e self.overlay_headers = [] self.overlay_segments = [] in_header = True # break elif hunk_type == HUNK_BREAK: # assume hunk to be empty in_header = True # broken hunk: multiple END or other hunks elif hunk_type in [HUNK_END, HUNK_NAME, HUNK_DEBUG]: pass else: self.error_string = "Expected hunk start in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False else: # an extra block in hunk or end is expected if hunk_type == HUNK_END: seek_begin = True # contents of hunk elif hunk_type in loadseg_valid_extra_hunks or hunk_type == HUNK_DREL32: segment.append(e) # broken hunk file without END tag elif hunk_type in loadseg_valid_begin_hunks: segment = [e] segment_list.append(segment) seek_header = False seek_begin = False e['hunk_no'] = hunk_no e['alloc_size'] = self.header['hunks'][hunk_no]['size'] hunk_no += 1 # unecpected hunk?! else: self.error_string = "Unexpected hunk extra in loadseg: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False return True def build_unit(self): force_unit = True in_hunk = False name = None segment = None unit = None self.units = [] unit_no = 0 for e in self.hunks: hunk_type = e['type'] # optional unit as first entry if hunk_type == HUNK_UNIT: unit = {} unit['name'] = e['name'] unit['unit_no'] = unit_no unit['segments'] = [] unit['unit'] = e unit_no += 1 self.units.append(unit) force_unit = False hunk_no = 0 elif force_unit: self.error_string = "Expected name hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False elif not in_hunk: # begin a named hunk if hunk_type == HUNK_NAME: name = e['name'] # main hunk block elif hunk_type in unit_valid_main_hunks: segment = [e] unit['segments'].append(segment) # give main block the NAME if name != None: e['name'] = name name = None e['hunk_no'] = hunk_no hunk_no += 1 in_hunk = True # broken hunk: ignore multi ENDs elif hunk_type == HUNK_END: pass else: self.error_string = "Expected main hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False else: # a hunk is finished if hunk_type == HUNK_END: in_hunk = False # contents of hunk elif hunk_type in unit_valid_extra_hunks: segment.append(e) # unecpected hunk?! else: self.error_string = "Unexpected hunk in unit: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False return True def build_lib(self): self.libs = [] lib_segments = [] seek_lib = True seek_main = False for e in self.hunks: hunk_type = e['type'] # seeking for a LIB hunk if seek_lib: if hunk_type == HUNK_LIB: segment_list = [] lib_segments.append(segment_list) seek_lib = False seek_main = True hunk_no = 0 # get start address of lib hunk in file lib_file_offset = e['lib_file_offset'] else: self.error_string = "Expected lib hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False elif seek_main: # end of lib? -> index! if hunk_type == HUNK_INDEX: seek_main = False seek_lib = True lib_units = [] if not self.resolve_index_hunks(e, segment_list, lib_units): self.error_string = "Error resolving index hunks!" return False lib = {} lib['units'] = lib_units lib['lib_no'] = len(self.libs) lib['index'] = e self.libs.append(lib) # start of a hunk elif hunk_type in unit_valid_main_hunks: segment = [e] e['hunk_no'] = hunk_no hunk_no += 1 segment_list.append(segment) seek_main = False # calc relative lib address hunk_lib_offset = e['hunk_file_offset'] - lib_file_offset e['hunk_lib_offset'] = hunk_lib_offset else: self.error_string = "Expected main hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False else: # end hunk if hunk_type == HUNK_END: seek_main = True # extra contents elif hunk_type in unit_valid_extra_hunks: segment.append(e) else: self.error_string = "Unexpected hunk in lib: %s %d/%x" % (e['type_name'], hunk_type, hunk_type) return False return True """Resolve hunks referenced in the index""" def resolve_index_hunks(self, index, segment_list, lib_units): units = index['units'] no = 0 for unit in units: lib_unit = {} unit_segments = [] lib_unit['segments'] = unit_segments lib_unit['name'] = unit['name'] lib_unit['unit_no'] = no lib_unit['index_unit'] = unit lib_units.append(lib_unit) no += 1 # try to find segment with start offset hunk_offset = unit['hunk_begin_offset'] found = False for segment in segment_list: hunk_no = segment[0]['hunk_no'] lib_off = segment[0]['hunk_lib_offset'] / 4 # is in longwords if lib_off == hunk_offset: # found segment num_segs = len(unit['hunk_infos']) for i in xrange(num_segs): info = unit['hunk_infos'][i] seg = segment_list[hunk_no+i] unit_segments.append(seg) # renumber hunk seg[0]['hunk_no'] = i seg[0]['name'] = info['name'] seg[0]['index_hunk'] = info found = True if not found: return False return True """From the hunk list build a set of segments that form the actual binary""" def build_segments(self): self.segments = [] if len(self.hunks) == 0: self.type = TYPE_UNKNOWN return False # determine type of file from first hunk first_hunk_type = self.hunks[0]['type'] if first_hunk_type == HUNK_HEADER: self.type = TYPE_LOADSEG return self.build_loadseg() elif first_hunk_type == HUNK_UNIT: self.type = TYPE_UNIT return self.build_unit() elif first_hunk_type == HUNK_LIB: self.type = TYPE_LIB return self.build_lib() else: self.type = TYPE_UNKNOWN return False """Return a summary of the created segment structure""" def get_segment_summary(self): return self.get_struct_summary(self.segments) def get_overlay_segment_summary(self): if self.overlay_segments != None: return self.get_struct_summary(self.overlay_segments) else: return None def get_libs_summary(self): if self.libs != None: return self.get_struct_summary(self.libs) else: return None def get_units_summary(self): if self.units != None: return self.get_struct_summary(self.units) else: return None
bsd-2-clause
-663,237,826,273,252,900
30.197154
115
0.555052
false
3.417724
false
false
false
hankcs/HanLP
hanlp/components/parsers/conll.py
1
2534
# -*- coding:utf-8 -*- # Author: hankcs # Date: 2019-12-26 15:37 from typing import Union from hanlp.utils.io_util import get_resource, TimingFileIterator from hanlp.utils.log_util import logger def collapse_enhanced_empty_nodes(sent: list): collapsed = [] for cells in sent: if isinstance(cells[0], float): id = cells[0] head, deprel = cells[8].split(':', 1) for x in sent: arrows = [s.split(':', 1) for s in x[8].split('|')] arrows = [(head, f'{head}:{deprel}>{r}') if h == str(id) else (h, r) for h, r in arrows] arrows = sorted(arrows) x[8] = '|'.join(f'{h}:{r}' for h, r in arrows) sent[head][7] += f'>{cells[7]}' else: collapsed.append(cells) return collapsed def read_conll(filepath: Union[str, TimingFileIterator], underline_to_none=False, enhanced_collapse_empty_nodes=False): sent = [] if isinstance(filepath, str): filepath: str = get_resource(filepath) if filepath.endswith('.conllu') and enhanced_collapse_empty_nodes is None: enhanced_collapse_empty_nodes = True src = open(filepath, encoding='utf-8') else: src = filepath for idx, line in enumerate(src): if line.startswith('#'): continue line = line.strip() cells = line.split('\t') if line and cells: if enhanced_collapse_empty_nodes and '.' in cells[0]: cells[0] = float(cells[0]) cells[6] = None else: if '-' in cells[0] or '.' in cells[0]: # sent[-1][1] += cells[1] continue cells[0] = int(cells[0]) if cells[6] != '_': try: cells[6] = int(cells[6]) except ValueError: cells[6] = 0 logger.exception(f'Wrong CoNLL format {filepath}:{idx + 1}\n{line}') if underline_to_none: for i, x in enumerate(cells): if x == '_': cells[i] = None sent.append(cells) else: if enhanced_collapse_empty_nodes: sent = collapse_enhanced_empty_nodes(sent) yield sent sent = [] if sent: if enhanced_collapse_empty_nodes: sent = collapse_enhanced_empty_nodes(sent) yield sent src.close()
apache-2.0
6,535,195,912,218,275,000
33.712329
119
0.500395
false
3.874618
false
false
false
gautamMalu/XenInBox
pyanaconda/rescue.py
1
18256
# # rescue.py - anaconda rescue mode setup # # Copyright (C) 2001, 2002, 2003, 2004 Red Hat, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # # Author(s): Mike Fulbright <msf@redhat.com> # Jeremy Katz <katzj@redhat.com> # import sys import os from pyanaconda import iutil import shutil import time import re import subprocess from snack import ButtonChoiceWindow, ListboxChoiceWindow,SnackScreen from pyanaconda.constants import ANACONDA_CLEANUP from pyanaconda.constants_text import TEXT_OK_BUTTON, TEXT_NO_BUTTON, TEXT_YES_BUTTON from pyanaconda.text import WaitWindow, OkCancelWindow, ProgressWindow, PassphraseEntryWindow from pyanaconda.flags import flags from pyanaconda.installinterfacebase import InstallInterfaceBase from pyanaconda.i18n import _ from blivet import mountExistingSystem from blivet.errors import StorageError, DirtyFSError from blivet.devices import LUKSDevice from pykickstart.constants import KS_REBOOT, KS_SHUTDOWN import meh.ui.text import logging log = logging.getLogger("anaconda") class RescueInterface(InstallInterfaceBase): def waitWindow(self, title, text): return WaitWindow(self.screen, title, text) def progressWindow(self, title, text, total, updpct = 0.05, pulse = False): return ProgressWindow(self.screen, title, text, total, updpct, pulse) def detailedMessageWindow(self, title, text, longText=None, ty="ok", default=None, custom_icon=None, custom_buttons=None, expanded=False): return self.messageWindow(title, text, ty, default, custom_icon, custom_buttons) def messageWindow(self, title, text, ty = "ok", default = None, custom_icon=None, custom_buttons=None): if custom_buttons is None: custom_buttons = [] if ty == "ok": ButtonChoiceWindow(self.screen, title, text, buttons=[TEXT_OK_BUTTON]) elif ty == "yesno": if default and default == "no": btnlist = [TEXT_NO_BUTTON, TEXT_YES_BUTTON] else: btnlist = [TEXT_YES_BUTTON, TEXT_NO_BUTTON] rc = ButtonChoiceWindow(self.screen, title, text, buttons=btnlist) if rc == "yes": return 1 else: return 0 elif ty == "custom": tmpbut = [] for but in custom_buttons: tmpbut.append(but.replace("_","")) rc = ButtonChoiceWindow(self.screen, title, text, width=60, buttons=tmpbut) idx = 0 for b in tmpbut: if b.lower() == rc: return idx idx = idx + 1 return 0 else: return OkCancelWindow(self.screen, title, text) def passphraseEntryWindow(self, device): w = PassphraseEntryWindow(self.screen, device) passphrase = w.run() w.pop() return passphrase @property def meh_interface(self): return self._meh_interface @property def tty_num(self): return 1 def shutdown (self): self.screen.finish() def suspend(self): pass def resume(self): pass def __init__(self): InstallInterfaceBase.__init__(self) self.screen = SnackScreen() self._meh_interface = meh.ui.text.TextIntf() def makeFStab(instPath = ""): if os.access("/proc/mounts", os.R_OK): f = open("/proc/mounts", "r") buf = f.read() f.close() else: buf = "" try: f = open(instPath + "/etc/fstab", "a") if buf: f.write(buf) f.close() except IOError as e: log.info("failed to write /etc/fstab: %s", e) # make sure they have a resolv.conf in the chroot def makeResolvConf(instPath): if flags.imageInstall: return if not os.access("/etc/resolv.conf", os.R_OK): return if os.access("%s/etc/resolv.conf" %(instPath,), os.R_OK): f = open("%s/etc/resolv.conf" %(instPath,), "r") buf = f.read() f.close() else: buf = "" # already have a nameserver line, don't worry about it if buf.find("nameserver") != -1: return f = open("/etc/resolv.conf", "r") buf = f.read() f.close() # no nameserver, we can't do much about it if buf.find("nameserver") == -1: return shutil.copyfile("%s/etc/resolv.conf" %(instPath,), "%s/etc/resolv.conf.bak" %(instPath,)) f = open("%s/etc/resolv.conf" %(instPath,), "w+") f.write(buf) f.close() def runShell(screen = None, msg=""): if screen: screen.suspend() print if msg: print (msg) if flags.imageInstall: print(_("Run %s to unmount the system when you are finished.") % ANACONDA_CLEANUP) else: print(_("When finished please exit from the shell and your " "system will reboot.")) print proc = None if os.path.exists("/usr/bin/firstaidkit-qs"): proc = subprocess.Popen(["/usr/bin/firstaidkit-qs"]) proc.wait() if proc is None or proc.returncode!=0: if os.path.exists("/bin/bash"): iutil.execConsole() else: print(_("Unable to find /bin/sh to execute! Not starting shell")) time.sleep(5) if screen: screen.finish() def _exception_handler_wrapper(orig_except_handler, screen, *args): """ Helper function that wraps the exception handler with snack shutdown. :param orig_except_handler: original exception handler that should be run after the wrapping changes are done :type orig_except_handler: exception handler as can be set as sys.excepthook :param screen: snack screen that should be shut down before further actions :type screen: snack screen """ screen.finish() return orig_except_handler(*args) def _unlock_devices(intf, storage): try_passphrase = None for device in storage.devices: if device.format.type == "luks": skip = False unlocked = False while not (skip or unlocked): if try_passphrase is None: passphrase = intf.passphraseEntryWindow(device.name) else: passphrase = try_passphrase if passphrase is None: # canceled skip = True else: device.format.passphrase = passphrase try: device.setup() device.format.setup() luks_dev = LUKSDevice(device.format.mapName, parents=[device], exists=True) storage.devicetree._addDevice(luks_dev) storage.devicetree.populate() unlocked = True # try to use the same passhprase for other devices try_passphrase = passphrase except StorageError as serr: log.error("Failed to unlock %s: %s", device.name, serr) device.teardown(recursive=True) device.format.passphrase = None try_passphrase = None def doRescue(intf, rescue_mount, ksdata): import blivet # XXX: hook the exception handler wrapper that turns off snack first orig_hook = sys.excepthook sys.excepthook = lambda ty, val, tb: _exception_handler_wrapper(orig_hook, intf.screen, ty, val, tb) for f in [ "services", "protocols", "group", "joe", "man.config", "nsswitch.conf", "selinux", "mke2fs.conf" ]: try: os.symlink('/mnt/runtime/etc/' + f, '/etc/' + f) except OSError: pass # Early shell access with no disk access attempts if not rescue_mount: # the %post should be responsible for mounting all needed file systems # NOTE: 1st script must be bash or simple python as nothing else might be available in the rescue image if flags.automatedInstall and ksdata.scripts: from pyanaconda.kickstart import runPostScripts runPostScripts(ksdata.scripts) else: runShell() sys.exit(0) if flags.automatedInstall: readOnly = ksdata.rescue.romount else: # prompt to see if we should try and find root filesystem and mount # everything in /etc/fstab on that root while True: rc = ButtonChoiceWindow(intf.screen, _("Rescue"), _("The rescue environment will now attempt to find your " "Linux installation and mount it under the directory " "%s. You can then make any changes required to your " "system. If you want to proceed with this step choose " "'Continue'. You can also choose to mount your file systems " "read-only instead of read-write by choosing 'Read-Only'. " "\n\n" "If for some reason this process fails you can choose 'Skip' " "and this step will be skipped and you will go directly to a " "command shell.\n\n") % (iutil.getSysroot(),), [_("Continue"), _("Read-Only"), _("Skip")] ) if rc == _("Skip").lower(): runShell(intf.screen) sys.exit(0) else: readOnly = rc == _("Read-Only").lower() break sto = blivet.Blivet(ksdata=ksdata) blivet.storageInitialize(sto, ksdata, []) _unlock_devices(intf, sto) roots = blivet.findExistingInstallations(sto.devicetree) if not roots: root = None elif len(roots) == 1: root = roots[0] else: height = min (len (roots), 12) if height == 12: scroll = 1 else: scroll = 0 lst = [] for root in roots: lst.append("%s" % root.name) (button, choice) = \ ListboxChoiceWindow(intf.screen, _("System to Rescue"), _("Which device holds the root partition " "of your installation?"), lst, [ _("OK"), _("Exit") ], width = 30, scroll = scroll, height = height, help = "multipleroot") if button == _("Exit").lower(): root = None else: root = roots[choice] rootmounted = False if root: try: if not flags.imageInstall: msg = _("The system will reboot automatically when you exit " "from the shell.") else: msg = _("Run %s to unmount the system " "when you are finished.") % ANACONDA_CLEANUP try: mountExistingSystem(sto.fsset, root.device, allowDirty = True, readOnly = readOnly) except DirtyFSError: if flags.automatedInstall: log.error("System had dirty file systems which you chose not to mount") else: ButtonChoiceWindow(intf.screen, _("Rescue"), _("Your system had dirty file systems which you chose not " "to mount. Press return to get a shell from which " "you can fsck and mount your partitions. %s") % msg, [_("OK")], width = 50) rootmounted = False else: if flags.automatedInstall: log.info("System has been mounted under: %s", iutil.getSysroot()) else: ButtonChoiceWindow(intf.screen, _("Rescue"), _("Your system has been mounted under %(rootPath)s.\n\n" "Press <return> to get a shell. If you would like to " "make your system the root environment, run the command:\n\n" "\tchroot %(rootPath)s\n\n%(msg)s") % {'rootPath': iutil.getSysroot(), 'msg': msg}, [_("OK")] ) rootmounted = True # now turn on swap if not readOnly: try: sto.turnOnSwap() except StorageError: log.error("Error enabling swap") # and selinux too if flags.selinux: # we have to catch the possible exception # because we support read-only mounting try: fd = open("%s/.autorelabel" % iutil.getSysroot(), "w+") fd.close() except IOError: log.warning("cannot touch /.autorelabel") # set a library path to use mounted fs libdirs = os.environ.get("LD_LIBRARY_PATH", "").split(":") mounted = map(lambda dir: "/mnt/sysimage%s" % dir, libdirs) os.environ["LD_LIBRARY_PATH"] = ":".join(libdirs + mounted) # find groff data dir gversion = None try: glst = os.listdir("/mnt/sysimage/usr/share/groff") except OSError: pass else: # find a directory which is a numeral, its where # data files are for gdir in glst: if re.match(r'\d[.\d]+\d$', gdir): gversion = gdir break if gversion is not None: gpath = "/mnt/sysimage/usr/share/groff/"+gversion os.environ["GROFF_FONT_PATH"] = gpath + '/font' os.environ["GROFF_TMAC_PATH"] = "%s:/mnt/sysimage/usr/share/groff/site-tmac" % (gpath + '/tmac',) # do we have bash? try: if os.access("/usr/bin/bash", os.R_OK): os.symlink ("/usr/bin/bash", "/bin/bash") except OSError: pass except (ValueError, LookupError, SyntaxError, NameError): raise except Exception as e: # pylint: disable=W0703 log.error("doRescue caught exception: %s", e) if flags.automatedInstall: log.error("An error occurred trying to mount some or all of your system") else: if not flags.imageInstall: msg = _("The system will reboot automatically when you " "exit from the shell.") else: msg = _("Run %s to unmount the system " "when you are finished.") % ANACONDA_CLEANUP ButtonChoiceWindow(intf.screen, _("Rescue"), _("An error occurred trying to mount some or all of your " "system. Some of it may be mounted under %s.\n\n" "Press <return> to get a shell.") % iutil.getSysroot() + msg, [_("OK")] ) else: if flags.automatedInstall and ksdata.reboot.action in [KS_REBOOT, KS_SHUTDOWN]: log.info("No Linux partitions found") intf.screen.finish() print(_("You don't have any Linux partitions. Rebooting.\n")) sys.exit(0) else: if not flags.imageInstall: msg = _(" The system will reboot automatically when you exit " "from the shell.") else: msg = "" ButtonChoiceWindow(intf.screen, _("Rescue Mode"), _("You don't have any Linux partitions. Press " "return to get a shell.%s") % msg, [ _("OK") ], width = 50) msgStr = "" if rootmounted and not readOnly: sto.makeMtab() try: makeResolvConf(iutil.getSysroot()) except (OSError, IOError) as e: log.error("error making a resolv.conf: %s", e) msgStr = _("Your system is mounted under the %s directory.") % iutil.getSysroot() ButtonChoiceWindow(intf.screen, _("Rescue"), msgStr, [_("OK")] ) # we do not need ncurses anymore, shut them down intf.shutdown() #create /etc/fstab in ramdisk, so it is easier to work with RO mounted filesystems makeFStab() # run %post if we've mounted everything if rootmounted and not readOnly and flags.automatedInstall: from pyanaconda.kickstart import runPostScripts runPostScripts(ksdata.scripts) # start shell if reboot wasn't requested if not flags.automatedInstall or not ksdata.reboot.action in [KS_REBOOT, KS_SHUTDOWN]: runShell(msg=msgStr) sys.exit(0)
gpl-2.0
1,907,631,276,107,746,300
36.030426
117
0.526731
false
4.390572
false
false
false
WilJoey/tn_ckan
ckan/migration/versions/037_role_anon_editor.py
1
1728
from sqlalchemy import * from sqlalchemy.sql import select, and_ from migrate import * import logging log = logging.getLogger(__name__) def upgrade(migrate_engine): '''#1066 Change Visitor role on System from "reader" to "anon_editor".''' metadata = MetaData(migrate_engine) # get visitor ID user = Table('user', metadata, autoload=True) s = select([user.c.id, user.c.name], user.c.name == u'visitor') results = migrate_engine.execute(s).fetchall() if len(results) == 0: log.debug('No visitor on the system - obviously init hasn\'t been run yet' \ 'and that will init visitor to an anon_editor') return visitor_id, visitor_name = results[0] # find visitor role as reader on system uor = Table('user_object_role', metadata, autoload=True) visitor_system_condition = and_(uor.c.context == u'System', uor.c.user_id == visitor_id) s = select([uor.c.context, uor.c.user_id, uor.c.role], visitor_system_condition) results = migrate_engine.execute(s).fetchall() if len(results) != 1: log.warn('Could not find a Right for a Visitor on the System') return context, user_id, role = results[0] if role != 'reader': log.info('Visitor right for the System is not "reader", so not upgrading it to anon_editor.') return # change visitor role to anon_editor log.info('Visitor is a "reader" on the System, so upgrading it to "anon_editor".') sql = uor.update().where(visitor_system_condition).\ values(role=u'anon_editor') migrate_engine.execute(sql) def downgrade(migrate_engine): raise NotImplementedError()
mit
-45,108,048,257,190,770
35
101
0.634259
false
3.74026
false
false
false
TIGER-NET/Temporal_profile_tool
ui/dlgabout.py
1
2609
# -*- coding: utf-8 -*- """ *************************************************************************** temporalprofileplugin.py ------------------------------------- Copyright (C) 2014 TIGER-NET (www.tiger-net.org) Based on Profile tool plugin: Copyright (C) 2012 Patrice Verchere *************************************************************************** * This plugin is part of the Water Observation Information System (WOIS) * * developed under the TIGER-NET project funded by the European Space * * Agency as part of the long-term TIGER initiative aiming at promoting * * the use of Earth Observation (EO) for improved Integrated Water * * Resources Management (IWRM) in Africa. * * * * WOIS is a free software i.e. you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published * * by the Free Software Foundation, either version 3 of the License, * * or (at your option) any later version. * * * * WOIS is distributed in the hope that it will be useful, but WITHOUT ANY * * WARRANTY; without even the implied warranty of MERCHANTABILITY or * * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * * for more details. * * * * You should have received a copy of the GNU General Public License along * * with this program. If not, see <http://www.gnu.org/licenses/>. * *************************************************************************** """ from PyQt4 import uic from PyQt4.QtCore import QSettings from PyQt4.QtGui import QDialog import platform import os uiFilePath = os.path.abspath(os.path.join(os.path.dirname(__file__), 'about.ui')) FormClass = uic.loadUiType(uiFilePath)[0] class DlgAbout(QDialog, FormClass): def __init__(self, parent=None): QDialog.__init__(self, parent) self.setupUi(self) fp = os.path.join( os.path.abspath(os.path.join(os.path.dirname(__file__),"..")) , "metadata.txt") iniText = QSettings(fp, QSettings.IniFormat) verno = iniText.value("version") name = iniText.value("name") description = iniText.value("description") self.title.setText( name ) self.description.setText( description + " - " + verno)
gpl-3.0
-6,917,851,896,494,671,000
41.770492
106
0.520889
false
4.422034
false
false
false
nikkomidoy/project_soa
tests/engine.py
1
5462
from subprocess import call from os import path import hitchpostgres import hitchselenium import hitchpython import hitchserve import hitchredis import hitchtest import hitchsmtp # Get directory above this file PROJECT_DIRECTORY = path.abspath(path.join(path.dirname(__file__), '..')) class ExecutionEngine(hitchtest.ExecutionEngine): """Engine for orchestating and interacting with the app.""" def set_up(self): """Ensure virtualenv present, then run all services.""" python_package = hitchpython.PythonPackage( python_version=self.settings['python_version'] ) python_package.build() call([ python_package.pip, "install", "-r", path.join(PROJECT_DIRECTORY, "requirements/local.txt") ]) postgres_package = hitchpostgres.PostgresPackage() postgres_package.build() redis_package = hitchredis.RedisPackage() redis_package.build() self.services = hitchserve.ServiceBundle( project_directory=PROJECT_DIRECTORY, startup_timeout=float(self.settings["startup_timeout"]), shutdown_timeout=float(self.settings["shutdown_timeout"]), ) postgres_user = hitchpostgres.PostgresUser("project_soa", "password") self.services['Postgres'] = hitchpostgres.PostgresService( postgres_package=postgres_package, users=[postgres_user, ], databases=[hitchpostgres.PostgresDatabase("project_soa", postgres_user), ] ) self.services['HitchSMTP'] = hitchsmtp.HitchSMTPService(port=1025) self.services['Django'] = hitchpython.DjangoService( python=python_package.python, port=8000, settings="project_soa.settings.local", needs=[self.services['Postgres'], ], env_vars=self.settings['environment_variables'], ) self.services['Redis'] = hitchredis.RedisService( redis_package=redis_package, port=16379, ) self.services['Firefox'] = hitchselenium.SeleniumService( xvfb=self.settings.get("xvfb", False), no_libfaketime=True, ) # import hitchcron # self.services['Cron'] = hitchcron.CronService( # run=self.services['Django'].manage("trigger").command, # every=1, # needs=[ self.services['Django'], ], # ) self.services.startup(interactive=False) # Docs : https://hitchtest.readthedocs.org/en/latest/plugins/hitchselenium.html self.driver = self.services['Firefox'].driver self.webapp = hitchselenium.SeleniumStepLibrary( selenium_webdriver=self.driver, wait_for_timeout=5, ) # Add selenium steps self.click = self.webapp.click self.wait_to_appear = self.webapp.wait_to_appear self.wait_to_contain = self.webapp.wait_to_contain self.wait_for_any_to_contain = self.webapp.wait_for_any_to_contain self.click_and_dont_wait_for_page_load = self.webapp.click_and_dont_wait_for_page_load # Configure selenium driver self.driver.set_window_size(self.settings['window_size']['width'], self.settings['window_size']['height']) self.driver.set_window_position(0, 0) self.driver.implicitly_wait(2.0) self.driver.accept_next_alert = True def pause(self, message=None): """Stop. IPython time.""" if hasattr(self, 'services'): self.services.start_interactive_mode() self.ipython(message) if hasattr(self, 'services'): self.services.stop_interactive_mode() def load_website(self): """Navigate to website in Firefox.""" self.driver.get(self.services['Django'].url()) self.click("djHideToolBarButton") def fill_form(self, **kwargs): """Fill in a form with id=value.""" for element, text in kwargs.items(): self.driver.find_element_by_id(element).send_keys(text) def confirm_emails_sent(self, number): """Count number of emails sent by app.""" assert len(self.services['HitchSMTP'].logs.json()) == int(number) def click_on_link_in_last_email(self, which=1): """Click on the nth link in the last email sent.""" self.driver.get( self.services['HitchSMTP'].logs.json()[-1]['links'][which - 1] ) def wait_for_email(self, containing=None): """Wait for, and return email.""" self.services['HitchSMTP'].logs.out.tail.until_json( lambda email: containing in email['payload'] or containing in email['subject'], timeout=25, lines_back=1, ) def time_travel(self, days=""): """Make all services think that time has skipped forward.""" self.services.time_travel(days=int(days)) def on_failure(self): """Stop and IPython.""" if not self.settings['quiet']: if self.settings.get("pause_on_failure", False): self.pause(message=self.stacktrace.to_template()) def on_success(self): """Pause on success if enabled.""" if self.settings.get("pause_on_success", False): self.pause(message="SUCCESS") def tear_down(self): """Shut down services required to run your test.""" if hasattr(self, 'services'): self.services.shutdown()
mit
-8,286,656,587,252,442,000
34.23871
114
0.617906
false
3.99269
false
false
false
GeoMop/GeoMop
testing_integration/Analysis/store_restore.py
1
3919
import os import shutil import subprocess from client_pipeline.mj_preparation import * from pipeline.pipeline_processor import * # setting testing directory test_dir = "d:/test/store_restore" # remove old files workspace = os.path.join(test_dir, "workspace") shutil.rmtree(workspace, ignore_errors=True) # copy files to testing directory shutil.copytree("store_restore_res/workspace", workspace) # ------------ # analysis an1 # ------------ # prepare mj analysis="an1" mj="mj1" python_script="s.py" pipeline_name="Pipeline_5" err, input_files = MjPreparation.prepare(workspace=workspace, analysis=analysis, mj=mj, python_script=python_script, pipeline_name=pipeline_name) if len(err) > 0: for e in err: print(e) exit() # mj_config_dir mj_config_dir = os.path.join(workspace, analysis, "mj", mj, "mj_config") # change cwd cwd = os.getcwd() os.chdir(mj_config_dir) # run script try: with open(python_script, 'r') as fd: script_text = fd.read() except (RuntimeError, IOError) as e: print("Can't open script file: {0}".format(e)) exit() action_types.__action_counter__ = 0 exec(script_text) pipeline = locals()[pipeline_name] # pipeline processor pp = Pipelineprocessor(pipeline) # validation err = pp.validate() if len(err) > 0: for e in err: print(e) exit() # run pipeline names = [] pp.run() i = 0 while pp.is_run(): runner = pp.get_next_job() if runner is None: time.sleep(0.1) else: names.append(runner.name) command = runner.command if command[0] == "flow123d": command[0] = "flow123d.bat" process = subprocess.Popen(command, stderr=subprocess.PIPE) return_code = process.wait(10) if return_code is not None: #print(process.stderr) pass pp.set_job_finished(runner.id) i += 1 assert i < 1000, "Timeout" print("\nrun flows\n---------") for name in names: print(name) print("") # return cwd os.chdir(cwd) # ------------ # analysis an2 # ------------ # prepare mj analysis="an2" mj="mj1" python_script="s.py" pipeline_name="Pipeline_7" last_analysis="an1" err, input_files = MjPreparation.prepare(workspace=workspace, analysis=analysis, mj=mj, python_script=python_script, pipeline_name=pipeline_name, last_analysis=last_analysis) if len(err) > 0: for e in err: print(e) exit() # mj_config_dir mj_config_dir = os.path.join(workspace, analysis, "mj", mj, "mj_config") # change cwd cwd = os.getcwd() os.chdir(mj_config_dir) # run script try: with open(python_script, 'r') as fd: script_text = fd.read() except (RuntimeError, IOError) as e: print("Can't open script file: {0}".format(e)) exit() action_types.__action_counter__ = 0 exec(script_text) pipeline = locals()[pipeline_name] # identical list il_file = os.path.join(mj_config_dir, "identical_list.json") if not os.path.isfile(il_file): il_file = None # pipeline processor pp = Pipelineprocessor(pipeline, identical_list=il_file) # validation err = pp.validate() if len(err) > 0: for e in err: print(e) exit() # run pipeline names = [] pp.run() i = 0 while pp.is_run(): runner = pp.get_next_job() if runner is None: time.sleep(0.1) else: names.append(runner.name) command = runner.command if command[0] == "flow123d": command[0] = "flow123d.bat" process = subprocess.Popen(command, stderr=subprocess.PIPE) return_code = process.wait(10) if return_code is not None: #print(process.stderr) pass pp.set_job_finished(runner.id) i += 1 assert i < 1000, "Timeout" print("\nrun flows\n---------") for name in names: print(name) print("") # return cwd os.chdir(cwd)
gpl-3.0
-8,578,005,189,924,669,000
21.267045
98
0.615718
false
3.183591
true
false
false