repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
CCI-MOC/GUI-Backend | core/tasks.py | Python | apache-2.0 | 1,847 | 0.001624 | # -*- coding: utf-8 -*-
"""
Core application tasks
"""
from celery.decorators import task
from django.core.mail import EmailMessage
from atmosphere import settings
from threepio import celery_logger, email_logger
from core.models.status_type import get_status_type
@task(name="send_email")
def send_email(subject, | body, from_email, to, cc=None,
fail_silently=False, html=False):
"""
Use django.core.mail.EmailMessage to send and log an Atmosphere email.
"""
try:
msg = EmailMessage(subject=subject, body=body,
from_email=from_email,
to=to,
cc=cc)
if html:
msg.content_subtype = 'html'
log_message = "\n> From: | {0}\n> To:{1}\n> Cc:{2}\n> Subject:{3}\n> Body:\n{4}"
args = (from_email, to, cc, subject, body)
email_logger.info(log_message.format(*args))
if getattr(settings, "SEND_EMAILS", True):
msg.send(fail_silently=fail_silently)
email_logger.info("NOTE: Above message sent successfully")
celery_logger.info("NOTE: Above message sent successfully")
else:
email_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False")
celery_logger.info("NOTE: Above message not sent -- SEND_EMAILS was False")
return True
except Exception as e:
celery_logger.exception(e)
return False
@task(name="close_request")
def close_request(request):
"""
Close the request and email approval message
"""
request.status = get_status_type(status="closed")
request.save()
@task(name='set_request_as_failed')
def set_request_as_failed(request):
"""
Set the request as failed
"""
request.status = get_status_type(status="failed")
request.save()
|
hfp/tensorflow-xsmm | tensorflow/python/kernel_tests/conv1d_test.py | Python | apache-2.0 | 3,666 | 0.009547 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for convolution related functionality in tensorflow.ops.nn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class Conv1DTest(test.TestCase):
def testBasic(self):
"""Test that argument passing to conv1d is handled properly."""
for dtype in [dtypes.float16, dtypes.float32, dtypes.float64]:
x = constant_op.constant([1, 2, 3, 4], dtype=dtype)
x = array_ops.expand_dims(x, 0) # Add batch dimension
x = array_ops.expand_dims(x, 2) # And depth dimension
filters = constant_op.constant([2, 1], dtype=dtype)
filters = array_ops.expand_dims(filters, 1) # in_channels
filters = array_ops.expand_d | ims(filters, 2) # out_channels
# Filters is 2x1x1
for stride in [1, 2]:
with self.cached_session(use_gpu=test.is_gpu_available()):
c = nn_ops.conv1d(x, filters, stride, padding="VALID")
reduced = array_ops.squeeze(c)
output = self.evaluate(reduced)
if stride == 1:
self.assertEqual(len(output), 3)
self.assertAllClose(output,
[2 * 1 + 1 * 2, 2 * 2 + 1 * 3, 2 * | 3 + 1 * 4])
else:
self.assertEqual(len(output), 2)
self.assertAllClose(output, [2 * 1 + 1 * 2, 2 * 3 + 1 * 4])
def testConv1DTranspose(self):
with self.cached_session():
stride = 2
# Input, output: [batch, width, depth]
x_shape = [2, 4, 3]
y_shape = [2, 9, 2]
# Filter: [kernel_width, output_depth, input_depth]
f_shape = [3, 2, 3]
x = constant_op.constant(
1.0, shape=x_shape, name="x", dtype=dtypes.float32)
f = constant_op.constant(
1.0, shape=f_shape, name="filter", dtype=dtypes.float32)
output = nn_ops.conv1d_transpose(
x, f, y_shape, stride=stride, padding="VALID")
value = self.evaluate(output)
cache_values = np.zeros(y_shape, dtype=np.float32)
# The amount of padding added
pad = 1
for n in xrange(x_shape[0]):
for k in xrange(f_shape[1]):
for w in xrange(pad, y_shape[1] - pad):
target = 3.0
# We add a case for locations divisible by the stride.
w_in = w % stride == 0 and w > pad and w < y_shape[1] - 1 - pad
if w_in:
target += 3.0
cache_values[n, w, k] = target
# copy values in the border
cache_values[n, 0, k] = cache_values[n, 1, k]
cache_values[n, -1, k] = cache_values[n, -2, k]
self.assertAllClose(cache_values, value)
if __name__ == "__main__":
test.main()
|
madscatt/zazzie_1.5 | trunk/sassie/interface/capriqorn_filter.py | Python | gpl-3.0 | 3,940 | 0.011168 | '''
SASSIE: Copyright (C) 2011 Joseph E. Curtis, Ph.D.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import os,sys
import input_filter
import sasmol.sasmol as sasmol
def check_capriqorn(variables,**kwargs):
error=[]
runname = variables['runname'][0]
pdbfile = variables['pdbfile'][0]
dcdfile = variables['dcdfile'][0]
number_q_values = variables['number_q_values'][0]
q_max = variables['q_max'][0]
if number_q_values < 1:
error.append('number of q-values needs to be greater than zero')
return error
elif q_max <= 0:
error.append('q-max needs to be greater than zero')
return error
create_alias_flag = variables['create_alias_flag'][0]
if not create_alias_flag:
aliasfile = variables['aliasfile'][0]
### OPEN check if aliasfile exists and can be read
### OPEN check if aliasfile can be used with supplied pdbfile
### OPEN advanced options
### OPEN check number of gpu/cpu cores etc.
### OPEN check alias.dat file
error = input_filter.check_name(runname)
if(error!=[]):
return error
error=input_filter.check_file_exists(pdbfile)
if(len(error) != 0):
error.append('input pdb file, '+pdbfile+', does not exist')
return error
ev,value=input_filter.check_pdb_dcd(pdbfile,'pdb')
if(ev == 0):
error.append('check input pdb file: '+pdbfile)
return error
if(value == 0):
error.append( 'input pdb file, '+pdbfile+', is not a valid pdb file')
return error
try:
m1 = sasmol.SasMol(0)
m1.read_pdb(pdbfile)
number_of_frames = m1.number_of_frames()
print '> found '+str(number_of_frames)+' frames in PDB file'
except:
error.append('could not open PDB file '+pdbfile+' to check number of frames')
return error
if(number_of_frames < 1):
error.append('PDB file has no fr | ames : '+pdbfile)
return error
if dcdfile[-3:] == 'dcd':
ev,value=input_filter.chec | k_pdb_dcd(dcdfile,'dcd')
value = 0
if(ev == 1): # if the file exists
if(value == 0): # not a pdb file
# 'checking infile : as dcd'
ev,value=input_filter.check_pdb_dcd(dcdfile,'dcd')
if(value == 1):
cvalue=input_filter.certify_pdb_dcd(pdbfile,dcdfile)
if(cvalue == 0):
error.append('input pdb file '+pdbfile+' and dcd file '+dcdfile+' are not compatible (different number of atoms)')
return error
else:
error.append('dcd input file '+dcdfile+' is not a valid dcd file')
return error
xstfile = variables['xstfile'][0]
### OPEN check if xstfile exists and can be read
### OPEN check if xstfile can be used with supplied dcdfile
elif dcdfile[-6:] == 'crdbox':
pass
### OPEN check if crdbox exists and can be read
### OPEN check if crdbox can be used with supplied pdbfile
### OPEN check if box dimensions are readable and have ~valid values
else:
error.append('infile needs to have a "dcd" or "crdbox" suffix')
return error
return error
|
agendaTCC/AgendaTCC | tccweb/apps/website/views.py | Python | gpl-2.0 | 8,944 | 0.017008 | # -*- coding: utf-8 -*-
from django.shortcuts import render_to_response, redirect, render
from django.contrib import messages
from django.template import RequestContext
from django.contrib.auth import forms, authenticate, views, login
from django.contrib.auth.decorators import login_required
from django import http
from django.core.urlresolvers import reverse
from django.db.models import Q
from datetime import date
from itertools import chain
from django.contrib.auth.models import Group
from models import Splash, Imagens
from semestre.models import Semestre
from projetos.models import ProjetoDeGraduacao
from disciplinas.models import Disciplina
from departamentos.models import Departamento
from noticias.models import Noticia
from bancas.models import Banca
from questionarios.models import Questionario
def splash(request):
modelo = Splash.objects.get(id = 1)
imagens = Imagens.objects.filter(splash = 1)
noticias = Noticia.objects.all()[:8]
texto = modelo.texto
ex | ibir_noticias = modelo.exibir_noticias
exibir_texto = modelo.exibir_texto
exibir_imagens = modelo.exibir_imagens
return render_to_response('website/splash.html', {
'imagens':imagens,
'texto': texto,
'exibir_noticias':exibir_noticias,
'exibir_texto': exibir_texto,
'exibir_imag | ens':exibir_imagens,
# 'noticias': noticias,
}, context_instance=RequestContext(request))
@login_required
def dashboard(request):
usuario = request.user
semestres = request.session.get("semestre")
departamentos = Departamento.objects.all()
todos_semestres = {}
for departamento in departamentos:
todos_semestres.update({departamento:Semestre.objects.filter(grupo = departamento)})
list_semestre = []
list_departamentos = []
for departamento,semestre in semestres.iteritems():
list_semestre.append(semestre)
list_departamentos.append(departamento)
#Query para disciplinas do semestre
disciplinas_semestre = Disciplina.objects.filter(esta_ativa=True,semestre__in = list_semestre)
noticias = Noticia.objects.all()[:8]
dicionario = {'todos_semestres':todos_semestres, 'noticias':noticias};
if not usuario.is_superuser or usuario.docente or usuario.funcionario or usuario.secretario:
#
# ALUNO
#
if usuario.aluno and usuario.curso:
#Disciplinas Matriculadas
#Filtra disciplinas do semestre para as matriculadas pelo aluno
disciplinas_aluno = {usuario.curso.departamento:disciplinas_semestre.filter(alunos=usuario.id)}
dicionario.update({'disiplinas_aluno':disciplinas_aluno})
#Projetos do semestre
#Query projetos para projetos pertencentes ao aluno e a disciplinas do semestre
projetos = ProjetoDeGraduacao.objects.filter(aluno = usuario, disciplina__in = disciplinas_semestre)
projetos_aluno = {usuario.curso.departamento:projetos}
dicionario.update({'projetos_aluno':projetos_aluno})
#Questionarios
questionarios = Questionario.objects.filter(departamento = usuario.curso.departamento)
dicionario.update({'questionarios':questionarios})
#
# DOCENTE
#
if usuario.docente:
#Disciplinas Docente
#Filtra todos as disciplinas nas quais o docente é responsavel para todos os departamentos, então
#lista todos os projetos para tais disciplinas
disciplinas_docente = {}
for departamento,semestre in semestres.iteritems():
disciplinas_dict = {}
disciplinas = disciplinas_semestre.filter(professores = usuario, semestre = semestre)
for disciplina in disciplinas:
projetos = ProjetoDeGraduacao.objects.filter(disciplina = disciplina)
disciplinas_dict.update({disciplina:projetos})
disciplinas_docente.update({departamento:disciplinas_dict})
dicionario.update({'disciplinas_docente':disciplinas_docente})
projetos_docente = {}
for departamento,semestre in semestres.iteritems():
disciplinas = disciplinas_semestre.filter(semestre = semestre)
projetos = ProjetoDeGraduacao.objects.filter(orientador = usuario, disciplina__in = disciplinas)
projetos_docente.update({departamento:projetos})
dicionario.update({'projetos_docente':projetos_docente})
bancas_docente_responsavel = {}
for departamento,semestre in semestres.iteritems():
bancas = Banca.objects.filter(banca_docente = usuario, semestre = semestre)
bancas_docente_responsavel.update({departamento:bancas})
dicionario.update({'bancas_docente_responsavel':bancas_docente_responsavel})
bancas_docente_convidado = {}
for departamento,semestre in semestres.iteritems():
bancas = Banca.objects.filter(banca_convidado = usuario, semestre = semestre)
bancas_docente_convidado.update({departamento:bancas})
dicionario.update({'bancas_docente_convidado':bancas_docente_convidado})
#
# SUPERVISOR
#
if usuario.supervisor:
projetos_supervisor = {}
for departamento,semestre in semestres.iteritems():
disciplinas = disciplinas_semestre.filter(semestre = semestre)
projetos = ProjetoDeGraduacao.objects.filter(supervisor = usuario, disciplina__in = disciplinas)
projetos_supervisor.update({departamento:projetos})
dicionario.update({'projetos_supervisor':projetos_supervisor})
#
# FUNCIONARIO
#
if usuario.funcionario or usuario.secretario:
disciplinas_funcionario = {}
for departamento,semestre in semestres.iteritems():
disciplinas_dict = {}
disciplinas = disciplinas_semestre.filter(semestre = semestre)
for disciplina in disciplinas:
projetos = ProjetoDeGraduacao.objects.filter(disciplina = disciplina)
disciplinas_dict.update({disciplina:projetos})
disciplinas_funcionario.update({departamento:disciplinas_dict})
dicionario.update({'disciplinas_funcionario':disciplinas_funcionario})
if usuario.doutorando:
bancas_doutorando_convidado = {}
for departamento,semestre in semestres.iteritems():
bancas = Banca.objects.filter(banca_convidado = usuario, semestre = semestre)
bancas_doutorando_convidado.update({departamento:bancas})
dicionario.update({'bancas_doutorando_convidado':bancas_doutorando_convidado})
if usuario.mestrando:
bancas_mestrando_convidado = {}
for departamento,semestre in semestres.iteritems():
bancas = Banca.objects.filter(banca_convidado = usuario, semestre = semestre)
bancas_mestrando_convidado.update({departamento:bancas})
dicionario.update({'bancas_mestrando_convidado':bancas_mestrando_convidado})
return render(request,'website/dashboard.html', dicionario)
def index(request):
if request.user.is_authenticated():
#Ao se autenticar o sistema busca os semestres atuais(atual = true) releventes para cada tipo de usuario
#Cada departamento no sistema tem sua configuração de semestre propria, cada objeto de semestre é pareado
#com se departamento em um dicionario e salvo na seção do usuario
semestres = []
if not request.user.is_superuser:
if request.user.aluno:
if request.user.curso:
semestres = Semestre.objects.semestreAtual(request.user.curso.departamento)
if request.user.docente:
semestres = Semestre.objects.filter(atual = True)
if request.user.doutorando:
semestres = Semestre.objects.filter(atual = True)
if request.user.mestrando:
semestres = S |
limix/glimix-core | glimix_core/mean/_sum.py | Python | mit | 2,431 | 0 | from numpy import add
from optimix import Function
class SumMean(Function):
"""
Sum mean function, 𝐟₀ + 𝐟₁ + ….
The mathematical representation is
𝐦 = 𝐟₀ + 𝐟₁ + …
In other words, it is a sum of mean vectors.
Example
-------
.. doctest::
>>> from glimix_core.mean import OffsetMean, LinearMean, SumMean
>>>
>>> X = [[5.1, 1.0],
... [2.1, -0.2]]
>>>
>>> mean0 = LinearMean(X)
>>> mean0.effsizes = [-1.0, 0.5]
>>>
>>> mean1 = OffsetMean(2)
>>> mean1.offset = 2.0
>>>
>>> mean = SumMean([mean0, mean1])
>>>
>>> print(mean.value())
[-2. | 6 -0.2]
>>> | g = mean.gradient()
>>> print(g["SumMean[0].effsizes"])
[[ 5.1 1. ]
[ 2.1 -0.2]]
>>> print(g["SumMean[1].offset"])
[1. 1.]
>>> mean0.name = "A"
>>> mean1.name = "B"
>>> mean.name = "A+B"
>>> print(mean)
SumMean(means=...): A+B
LinearMean(m=2): A
effsizes: [-1. 0.5]
OffsetMean(): B
offset: 2.0
"""
def __init__(self, means):
"""
Constructor.
Parameters
----------
means : list
List of mean functions.
"""
self._means = [c for c in means]
Function.__init__(self, "SumMean", composite=self._means)
def value(self):
"""
Sum of mean vectors, 𝐟₀ + 𝐟₁ + ….
Returns
-------
𝐦 : ndarray
𝐟₀ + 𝐟₁ + ….
"""
return add.reduce([mean.value() for mean in self._means])
def gradient(self):
"""
Sum of mean function derivatives.
Returns
-------
∂𝐦 : dict
∂𝐟₀ + ∂𝐟₁ + ….
"""
grad = {}
for i, f in enumerate(self._means):
for varname, g in f.gradient().items():
grad[f"{self._name}[{i}].{varname}"] = g
return grad
def __str__(self):
tname = type(self).__name__
msg = "{}(means=...)".format(tname)
if self.name is not None:
msg += ": {}".format(self.name)
for m in self._means:
spl = str(m).split("\n")
msg = msg + "\n" + "\n".join([" " + s for s in spl])
return msg
|
Shouqun/node-gn | tools/depot_tools/third_party/logilab/astroid/brain/pysix_moves.py | Python | mit | 8,703 | 0.000575 | # copyright 2003-2014 LOGILAB S.A. (Paris, FRANCE), all rights reserved.
# contact http://www.logilab.fr/ -- mailto:contact@logilab.fr
#
# This file is part of astroid.
#
# astroid is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the Free
# Software Foundation, either version 2.1 of the License, or (at your option) any
# later version.
#
# astroid is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with astroid. If not, see <http://www.gnu.org/licenses/>.
"""Astroid hooks for six.moves."""
import sys
from textwrap import dedent
from astroid import MANAGER, register_module_extender
from astroid.builder import AstroidBuilder
def six_moves_transform_py2():
return AstroidBuilder(MANAGER).string_build(dedent('''
import urllib as _urllib
import urllib2 as _urllib2
import urlparse as _urlparse
class Moves(object):
import BaseHTTPServer
import CGIHTTPServer
import SimpleHTTPServer
from StringIO import StringIO
from cStringIO import StringIO as cStringIO
from UserDict import UserDict
from UserList import UserList
from UserString import UserString
import __builtin__ as builtins
import thread as _thread
import dummy_thread as _dummy_thread
import ConfigParser as configparser
import copy_reg as copyreg
from itertools import (imap as map,
ifilter as filter,
ifilterfalse as filterfalse,
izip_ | longest as zip_longest,
izip as zip)
import htmlentitydefs as html_entities
import HT | MLParser as html_parser
import httplib as http_client
import cookielib as http_cookiejar
import Cookie as http_cookies
import Queue as queue
import repr as reprlib
from pipes import quote as shlex_quote
import SocketServer as socketserver
import SimpleXMLRPCServer as xmlrpc_server
import xmlrpclib as xmlrpc_client
import _winreg as winreg
import robotparser as urllib_robotparser
input = raw_input
intern = intern
range = xrange
xrange = xrange
reduce = reduce
reload_module = reload
class UrllibParse(object):
ParseResult = _urlparse.ParseResult
SplitResult = _urlparse.SplitResult
parse_qs = _urlparse.parse_qs
parse_qsl = _urlparse.parse_qsl
urldefrag = _urlparse.urldefrag
urljoin = _urlparse.urljoin
urlparse = _urlparse.urlparse
urlsplit = _urlparse.urlsplit
urlunparse = _urlparse.urlunparse
urlunsplit = _urlparse.urlunsplit
quote = _urllib.quote
quote_plus = _urllib.quote_plus
unquote = _urllib.unquote
unquote_plus = _urllib.unquote_plus
urlencode = _urllib.urlencode
splitquery = _urllib.splitquery
splittag = _urllib.splittag
splituser = _urllib.splituser
uses_fragment = _urlparse.uses_fragment
uses_netloc = _urlparse.uses_netloc
uses_params = _urlparse.uses_params
uses_query = _urlparse.uses_query
uses_relative = _urlparse.uses_relative
class UrllibError(object):
URLError = _urllib2.URLError
HTTPError = _urllib2.HTTPError
ContentTooShortError = _urllib.ContentTooShortError
class DummyModule(object):
pass
class UrllibRequest(object):
urlopen = _urllib2.urlopen
install_opener = _urllib2.install_opener
build_opener = _urllib2.build_opener
pathname2url = _urllib.pathname2url
url2pathname = _urllib.url2pathname
getproxies = _urllib.getproxies
Request = _urllib2.Request
OpenerDirector = _urllib2.OpenerDirector
HTTPDefaultErrorHandler = _urllib2.HTTPDefaultErrorHandler
HTTPRedirectHandler = _urllib2.HTTPRedirectHandler
HTTPCookieProcessor = _urllib2.HTTPCookieProcessor
ProxyHandler = _urllib2.ProxyHandler
BaseHandler = _urllib2.BaseHandler
HTTPPasswordMgr = _urllib2.HTTPPasswordMgr
HTTPPasswordMgrWithDefaultRealm = _urllib2.HTTPPasswordMgrWithDefaultRealm
AbstractBasicAuthHandler = _urllib2.AbstractBasicAuthHandler
HTTPBasicAuthHandler = _urllib2.HTTPBasicAuthHandler
ProxyBasicAuthHandler = _urllib2.ProxyBasicAuthHandler
AbstractDigestAuthHandler = _urllib2.AbstractDigestAuthHandler
HTTPDigestAuthHandler = _urllib2.HTTPDigestAuthHandler
ProxyDigestAuthHandler = _urllib2.ProxyDigestAuthHandler
HTTPHandler = _urllib2.HTTPHandler
HTTPSHandler = _urllib2.HTTPSHandler
FileHandler = _urllib2.FileHandler
FTPHandler = _urllib2.FTPHandler
CacheFTPHandler = _urllib2.CacheFTPHandler
UnknownHandler = _urllib2.UnknownHandler
HTTPErrorProcessor = _urllib2.HTTPErrorProcessor
urlretrieve = _urllib.urlretrieve
urlcleanup = _urllib.urlcleanup
proxy_bypass = _urllib.proxy_bypass
urllib_parse = UrllibParse()
urllib_error = UrllibError()
urllib = DummyModule()
urllib.request = UrllibRequest()
urllib.parse = UrllibParse()
urllib.error = UrllibError()
moves = Moves()
'''))
def six_moves_transform_py3():
return AstroidBuilder(MANAGER).string_build(dedent('''
class Moves(object):
import _io
cStringIO = _io.StringIO
filter = filter
from itertools import filterfalse
input = input
from sys import intern
map = map
range = range
from imp import reload as reload_module
from functools import reduce
from shlex import quote as shlex_quote
from io import StringIO
from collections import UserDict, UserList, UserString
xrange = range
zip = zip
from itertools import zip_longest
import builtins
import configparser
import copyreg
import _dummy_thread
import http.cookiejar as http_cookiejar
import http.cookies as http_cookies
import html.entities as html_entities
import html.parser as html_parser
import http.client as http_client
import http.server
BaseHTTPServer = CGIHTTPServer = SimpleHTTPServer = http.server
import pickle as cPickle
import queue
import reprlib
import socketserver
import _thread
import winreg
import xmlrpc.server as xmlrpc_server
import xmlrpc.client as xmlrpc_client
import urllib.robotparser as urllib_robotparser
import email.mime.multipart as email_mime_multipart
import email.mime.nonmultipart as email_mime_nonmultipart
import email.mime.text as email_mime_text
import email.mime.base as email_mime_base
import urllib.parse as urllib_parse
import urllib.error as urllib_error
import tkinter
import tkinter.dialog as tkinter_dialog
import tkinter.filedialog as tkinter_filedialog
import tkinter.scrolledtext as tkinter_scrolledtext
import tkinter.simpledialog as tkinder_simpledialog
import tkinter.tix as tkinter_tix
import tkinter.ttk as tkinter_ttk
import tkinter.constants as tkinter_constants
import tkinter.dnd as tkinter_dnd
import tkinter.colorchooser as tkinter_colorchooser
import tkinter.commondialog as tkinter_commondialog
|
JonasT/pyrsnapshotd | src/pysnapshotd/pipeobject.py | Python | gpl-2.0 | 4,032 | 0.001488 |
# This file is a part of pysnapshotd, a program for automated backups
# Copyright (C) 2015-2016 Jonas Thiem
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import threading
class BufferedPipeObject(object):
def __init__(self):
self.closed = False
self.contents = b""
self.access_mutex = threading.Lock()
self.waiting_for_content_semaphore = \
threading.Semaphore()
self.waiting_for_content_counter = 0
self._write_func = None
def _set_write_func(self, f):
self.access_mutex.acquire()
self._write_func = f
self.access_mutex.release()
def close(self):
self.access_mutex.acquire()
self.closed = True
self.access_mutex.release()
def write(self, data):
# First, check if pipe is still open at all:
self.access_mutex.acquire()
if self.closed:
self.access_mutex.release()
raise OSError("broken pipe - pipe has been closed")
# Do nothing for an obvious dummy command:
if len(data) == 0:
self.access_mutex.release()
return 0
# Try to write with the write func if given:
# (which means this pipe object itself will always remain empty and
# .read() on it will block forever, since things are somewhat bypassed
# directly to some target write function)
if self._write_func != None:
try:
self._write_func(data)
except Exception:
self.closed = True
finally:
self.access_mutex.release()
return
# Otherwise, just put contents in internal buffer for reading from
# this pipe from "the other end":
try:
self.contents += data
| i = 0
while i < self.waiting_for_content_counter:
| self.waiting_for_content_semaphore.\
release()
i += 1
finally:
self.access_mutex.release()
def read(self, amount):
print(" >> PIPE READ: " + str(amount))
if amount <= 0:
print(" >> PIPE READ DATA: <empty read>")
return b""
self.access_mutex.acquire()
# Try to read data as long as needed to acquire requested amount:
obtained_data = b""
while True:
# If pipe was closed along this process, abort:
if self.closed:
self.access_mutex.release()
raise OSError("broken pipe - pipe has been closed")
# Try to obtain as much data as requested:
if len(self.contents) > 0:
added_data = self.contents[:amount]
obtained_data += added_data
self.contents = self.contents[len(added_data):]
amount -= len(added_data)
# If there is not enough data available, we will need to wait for
# more:
if amount > 0:
self.waiting_for_content_counter += 1
self.access_mutex.release()
self.waiting_for_content_semaphore.acquire()
self.access_mutex.acquire()
else:
assert(len(obtained_data) > 0)
print(" >> PIPE READ DATA: " + str(obtained_data))
return obtained_data
|
DiptoDas8/Biponi | lib/python2.7/site-packages/braintree/exceptions/configuration_error.py | Python | mit | 119 | 0.008403 | from braintree.exceptions.unexpected_error import UnexpectedError
class ConfigurationError(UnexpectedError):
pa | ss
| |
51reboot/actual_09_homework | 10/jinderui/cmdb/user/models.py | Python | mit | 3,490 | 0.056291 | #coding:utf-8
from dbutils import MySQLConnection
class User(object):
def __init__(self,id,username,password,age):
self.id = id
self.username = username
self.password = password
self.age = age
@classmethod
def validate_login(self,username,password):
_columns = ('id','username')
_sql = 'select id,username from user where username=%s and password=md5(%s)'
_count,_rt_list = MySQLConnection.execute_sql(_sql,(username,password))
return dict(zip(_columns,_rt_list[0])) if _count !=0 else None
@classmethod
def get_list(self, wheres=[]):
_columns = ('id', 'username', 'password', 'age')
_sql = 'select * from user where 1=1'
_args = []
for _key, _value in wheres:
_sql += ' AND {key} = %s'.format(key=_key)
_args.append(_value)
_count, _rt_list = MySQLConnection.execute_sql(_sql, _args)
_rt = []
for _line in _rt_list:
_rt.append(dict(zip(_columns, _line)))
print _rt
return _rt
@classmethod
def get_users(self):
_columns = ('id','username','password','age')
_sql = 'select * from user'
_count,_rt_list= MySQLConnection.execute_sql(_sql)
_rt=[]
for _line in _rt_list:
_rt.append(dict(zip(_columns,_line)))
return _rt
#添加start
@classmethod
def validate_add_user(self,username,password,age):
user_list = self.get_users()
for users in user_list:
if users['username'] == username:
return False, u'用户名已存在'
if username.strip() == '':
return False, u'用户名不能为空'
if len(password) < 6:
return False, u'密码长度至少为6位'
if not str(age).isdigit() or int(age) <= 0 or int(age) > 100:
return False, u'年龄不正确'
return True, ''
@classmethod
def add_user(self,username,password,age,fetch=False):
_sql = 'insert into user(username,password,age) values(%s,md5(%s),%s)'
_args = (username,password,age)
MySQLConnection.execute_sql(_sql,_args)
#添加end
#更新start
@classmethod
def validate_user(self,age):
user_list = self.get_users()
if not str(age).isdigit() or int(age) <= 0 or int(age) > 100:
| return False, u'年龄不正确'
return True, ''
@classmethod
def change_user(self,userid,updateage):
user_list = self.get_users()
for users in user_list:
if users['age'] != updateage:
_sql = 'update user set age=%s where id =%s'
_args = (updateage,userid)
MySQLConnection.execute_sql(_sql,_args,False)
#更新end
#修改密码进行验证
@classmethod
def validate_charge_user_password(self,userid,upassword,muse | rname,mpassword):
if not self.validate_login(musername,mpassword):
return False,"管理员密码错误"
if self.get_user(userid) is None:
return False, u'用户信息不存在'
if len(upassword) <6:
return False,u'密码必须大于等于6'
return True, ''
@classmethod
def charge_user_password(self,userid,upassword):
_sql = 'update user set password=md5(%s) where id=%s'
_args = (upassword,userid)
MySQLConnection.execute_sql(_sql,_args,False)
#通过id获取指定用户的信息
@classmethod
def get_user(self,userid):
_users = self.get_users()
for _user in _users:
if _user.get('id') == long(userid):
return _user
return None
#修改密码end
@classmethod
def del_user(self,delid):
_sql = 'delete from user where id=%s'
_args=(delid,)
MySQLConnection.execute_sql(_sql,_args)
if __name__ == '__main__':
print User.validate_login('jinderui','123456')
print User.validate_login('jinderui','1234567') |
Yadnyawalkya/integration_tests | cfme/tests/configure/test_tag.py | Python | gpl-2.0 | 18,479 | 0.002327 | # -*- coding: utf-8 -*-
import fauxfactory
import pytest
from cfme import test_requirements
from cfme.cloud.provider import CloudProvider
from cfme.infrastructure.provider import InfraProvider
from cfme.markers.env_markers.provider import ONE
from cfme.markers.env_markers.provider import ONE_PER_CATEGORY
from cfme.rest.gen_data import categories as _categories
from cfme.rest.gen_data import service_templates as _service_templates
from cfme.rest.gen_data import tags as _tags
from cfme.rest.gen_data import tenants as _tenants
from cfme.rest.gen_data import users as _users
from cfme.rest.gen_data import vm as _vm
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.log import logger
from cfme.utils.rest import assert_response
from cfm | e.utils.rest import delete_resources_from_collection
from cfme.utils.rest import delete_resources_from_detail
from cfme.utils.update import update
from cfme.utils. | wait import wait_for
CLOUD_COLLECTION = [
"availability_zones",
"cloud_networks",
"cloud_subnets",
"flavors",
"network_routers",
"security_groups",
]
INFRA_COLLECTION = [
"clusters",
"hosts",
"data_stores",
"providers",
"resource_pools",
"services",
"service_templates",
"tenants",
"vms",
"users",
]
pytestmark = [
pytest.mark.provider(classes=[InfraProvider], selector=ONE),
pytest.mark.usefixtures('setup_provider')
]
@pytest.fixture
def category(appliance):
cg = appliance.collections.categories.create(
name=fauxfactory.gen_alphanumeric(8).lower(),
description=fauxfactory.gen_alphanumeric(32),
display_name=fauxfactory.gen_alphanumeric(32)
)
yield cg
if cg.exists:
cg.delete()
@pytest.fixture
def tag(category):
tag = category.collections.tags.create(
name=fauxfactory.gen_alphanumeric(8).lower(),
display_name=fauxfactory.gen_alphanumeric(32)
)
yield tag
tag.delete_if_exists()
@pytest.mark.sauce
@pytest.mark.tier(2)
@test_requirements.tag
def test_tag_crud(tag):
"""
Polarion:
assignee: anikifor
initialEstimate: 1/8h
casecomponent: Tagging
"""
assert tag.exists
tag.update({
'name': fauxfactory.gen_alphanumeric(8).lower(),
'display_name': fauxfactory.gen_alphanumeric(32)
})
@test_requirements.tag
def test_map_tagging_crud(appliance, category, soft_assert):
"""Test map tag crud with flash message assertion
Polarion:
assignee: anikifor
initialEstimate: 1/4h
casecomponent: Tagging
Bugzilla:
1707328
"""
label = fauxfactory.gen_alphanumeric(8)
map_tags_collection = appliance.collections.map_tags
map_tag_entity = map_tags_collection.create('Container Project', label, category.name)
view = appliance.browser.create_view(navigator.get_class(map_tags_collection, 'All').VIEW)
view.flash.assert_success_message('Container Label Tag Mapping "{}" was added'
.format(label)) # use label var to validate create method
with update(map_tag_entity):
map_tag_entity.category = fauxfactory.gen_alphanumeric(8)
view = appliance.browser.create_view(navigator.get_class(map_tags_collection, 'All').VIEW)
view.flash.assert_success_message(
'Container Label Tag Mapping "{}" was saved'
.format(map_tag_entity.label) # use entity label since it may get updated
)
row = next(view.table.rows(resource_label=map_tag_entity.label))
soft_assert(row.tag_category.text == map_tag_entity.category)
map_tag_entity.delete()
view = appliance.browser.create_view(navigator.get_class(map_tags_collection, 'All').VIEW)
if appliance.version >= "5.11": # BZ 1707328 is fixed only for 5.11
view.flash.assert_success_message('Container Label Tag Mapping "{}": Delete successful'
.format(map_tag_entity.label))
@test_requirements.tag
def test_updated_tag_name_on_vm(provider, tag, request):
"""
This test checks that tags don't disappear from the UI after their name (not displayed name) is
changed.
Bugzilla:
1668730
Polarion:
assignee: anikifor
casecomponent: Configuration
caseimportance: high
initialEstimate: 1/8h
testSteps:
1. create a tag
2. assign the tag to some vm, observe the tag in Smart Management section of vm
3. change name of the tag
4. on VM screen: still the same tag in Smart Management section of vm
"""
coll = provider.appliance.provider_based_collection(provider, coll_type='vms')
# need some VM to assign tags to, nothing specific is needed, so take the first one
vm = coll.all()[0]
vm.add_tag(tag)
request.addfinalizer(lambda: vm.remove_tag(tag))
# assert the tag is correctly assigned
vm_tags = vm.get_tags()
assert any(
tag.category.display_name == vm_tag.category.display_name and
tag.display_name == vm_tag.display_name
for vm_tag in vm_tags
), "tag is not assigned"
# update the name of the tag
new_tag_name = '{}_{}'.format(tag.name, fauxfactory.gen_alphanumeric(4).lower())
tag.update({'name': new_tag_name})
vm_tags = vm.get_tags()
# assert the tag was not changed in the UI
assert any(
tag.category.display_name == vm_tag.category.display_name and
tag.display_name == vm_tag.display_name
for vm_tag in vm_tags
), 'tag is not assigned'
@test_requirements.rest
class TestTagsViaREST(object):
COLLECTIONS_BULK_TAGS = ("services", "vms", "users")
def _service_body(self, **kwargs):
uid = fauxfactory.gen_alphanumeric(5)
body = {
'name': 'test_rest_service_{}'.format(uid),
'description': 'Test REST Service {}'.format(uid),
}
body.update(kwargs)
return body
def _create_services(self, request, rest_api, num=3):
# create simple service using REST API
bodies = [self._service_body() for __ in range(num)]
collection = rest_api.collections.services
new_services = collection.action.create(*bodies)
assert_response(rest_api)
new_services_backup = list(new_services)
@request.addfinalizer
def _finished():
collection.reload()
ids = [service.id for service in new_services_backup]
delete_entities = [service for service in collection if service.id in ids]
if delete_entities:
collection.action.delete(*delete_entities)
return new_services
@pytest.fixture(scope="function")
def services(self, request, appliance):
return self._create_services(request, appliance.rest_api)
@pytest.fixture(scope="function")
def categories(self, request, appliance, num=3):
return _categories(request, appliance, num)
@pytest.fixture(scope="function")
def tags(self, request, appliance, categories):
return _tags(request, appliance, categories)
@pytest.fixture(scope="module")
def services_mod(self, request, appliance):
return self._create_services(request, appliance.rest_api)
@pytest.fixture(scope="module")
def categories_mod(self, request, appliance, num=3):
return _categories(request, appliance, num)
@pytest.fixture(scope="module")
def tags_mod(self, request, appliance, categories_mod):
return _tags(request, appliance, categories_mod)
@pytest.fixture(scope="module")
def tenants(self, request, appliance):
return _tenants(request, appliance, num=1)
@pytest.fixture(scope="module")
def service_templates(self, request, appliance):
return _service_templates(request, appliance)
@pytest.fixture(scope="function")
def vm(self, request, provider, appliance):
return _vm(request, provider, appliance)
@pytest.fixture(scope="function")
def users(self, request, appliance, num=3):
return _users(request, appliance, num=num)
@pytest.mark.tier(2)
def test_edit_tags_rest(self, app |
fergalmoran/dss | core/realtime/notification.py | Python | bsd-2-clause | 928 | 0.002155 | import requests
import logging
import redis
from requests.packages.urllib3.exceptions import ConnectionError
from core.serialisers import json
from dss import localsettings
# TODO(fergal.moran@gmail.com): refactor these out to
# classes to avoid duplicating constants below
HEADERS = {
'content-type': 'application/json'
}
logger = logging.getLogger('spa')
def post_notification(session_id, image, message):
try:
| payload | = {
'sessionid': session_id,
'image': image,
'message': message
}
data = json.dumps(payload)
r = requests.post(
localsettings.REALTIME_HOST + 'notification',
data=data,
headers=HEADERS
)
if r.status_code == 200:
return ""
else:
return r.text
except ConnectionError:
#should probably implement some sort of retry in here
pass |
hernejj/vizigrep | vizigrep/guiapp/SuperGtkBuilder.py | Python | gpl-2.0 | 939 | 0.005325 | import gi
gi.require_version('GtkSource', '3.0')
from gi.repository import Gtk, GObject, GtkSource
# GtkBuilder that will populate the given object (representing a Window) with
# all of that window's UI elements as found in the GtkBuilder UI file.
class SuperGtkBuilder(Gtk.Builder):
def __init__(self, window_object, ui_file_path):
Gtk. | Builder.__init__(self)
GObject.type_register(GtkSource.View) # Needed to work with GtkSourceView objects
self.add_from_file(ui_file_path)
self.populate_window_with_ui_elements(window_object)
# Create a data-member in window_object to represent each ui e | lement
# found in the GtkBuilder UI file.
def populate_window_with_ui_elements(self, window_object):
for ui_element in self.get_objects():
if isinstance(ui_element, Gtk.TreeSelection): continue
setattr(window_object, Gtk.Buildable.get_name(ui_element), ui_element)
|
NejcZupec/ggrc-core | src/ggrc_gdrive_integration/migrations/versions/20140912211135_1efacad0fff5_index_for_object_folders.py | Python | apache-2.0 | 609 | 0.006568 | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICE | NSE file>
"""Index for object_folders
Revision ID: 1efacad0fff5
Revises: 4d7ce1eaddf2
Create Date: 2014-09-12 21:11:35.908034
"""
# revision identifiers, used by Alembic.
revision = '1efacad0fff5'
down_revision = '4d7ce1eaddf2'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.create_index('ix_folderable_id_type', 'object_folders', ['folderable_type','f | olderable_id'])
pass
def downgrade():
op.drop_index('ix_folderable_id_type', table_name='object_folders')
pass
|
dhermes/gcloud-python | videointelligence/synth.py | Python | apache-2.0 | 2,452 | 0.002855 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import synthtool as s
from synthtool import gcp
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
versions = ["v1beta1", "v1beta2", "v1p1beta1", "v1p2beta1", "v1"]
# ----------------------------------------------------------------------------
# Generate videointelligence GAPIC layer
# ----------------------------------------------------------------------------
for version in versions:
library = gapic.py_library(
"videointelligence", version, artman_output_name=f"video-intelligence-{version}"
)
# TODO: stop excluding tests and nox.py (excluded as we lack system tests)
s.move(
library,
excludes=[
"setup.py",
"nox*.py",
"README.rst",
"docs/index.rst",
f"tests/system/gapic/{version}/"
f"test_system_video_intelligence_service_{version}.py",
# f'tests/unit/gapic/{version}/'
# f'test_video_intelligence_service_client_{version}.py',
],
)
s.replace(
"**/*/video_intelligence_service_client.py",
"'google-cloud-video-intelligence', \).version",
"'google-cl | oud-videointelligence', ).version",
)
s.replace(
"tests/unit/gapic/**/test_video_intelligence_service_client_*.py",
"^(\s+)expected_request = video_intelligence_ | pb2.AnnotateVideoRequest\(\)",
"\g<1>expected_request = video_intelligence_pb2.AnnotateVideoRequest(\n"
"\g<1> input_uri=input_uri, features=features)",
)
# ----------------------------------------------------------------------------
# Add templated files
# ----------------------------------------------------------------------------
templated_files = common.py_library(unit_cov_level=97, cov_level=100)
s.move(templated_files)
s.shell.run(["nox", "-s", "blacken"], hide_output=False)
|
devanshdalal/scikit-learn | sklearn/cross_validation.py | Python | bsd-3-clause | 72,259 | 0.000028 |
"""
The :mod:`sklearn.cross_validation` module includes utilities for cross-
validation and performance evaluation.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from math import ceil, floor, factorial
import numbers
import time
from abc import ABCMeta, abstractmethod
import numpy as np
import scipy.sparse as sp
from .base import is_classifier, clone
from .utils import indexable, check_random_state, safe_indexing
from .utils.validation import (_is_arraylike, _num_samples,
column_or_1d)
from .utils.multiclass import type_of_target
from .utils.random import choice
from .externals.joblib import Parallel, delayed, logger
from .externals.six import with_metaclass
from .externals.six.moves import zip
from .metrics.scorer import check_scoring
from .utils.fixes import bincount
from .gaussian_process.kernels import Kernel as GPKernel
from .exceptions import FitFailedWarning
warnings.warn("This module was deprecated in version 0.18 in favor of the "
"model_selection module into which all the refactored classes "
"and functions are moved. Also note that the interface of the "
"new CV iterators are different from that of this module. "
"This module will be removed in 0.20.", DeprecationWarning)
__all__ = ['KFold',
'LabelKFold',
'LeaveOneLabelOut',
'LeaveOneOut',
'LeavePLabelOut',
'LeavePOut',
'ShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'LabelShuffleSplit',
'check_cv',
'cross_val_score',
'cross_val_predict',
'permutation_test_score',
'train_test_split']
class _PartitionIterator(with_metaclass(ABCMeta)):
"""Base class for CV iterators where train_mask = ~test_mask
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
Parameters
----------
n : int
Total number of elements in dataset.
"""
def __init__(self, n):
if abs(n - int(n)) >= np.finfo('f').eps:
raise ValueError("n must be an integer")
self.n = int(n)
def __iter__(self):
ind = np.arange(self.n)
for test_index in self._iter_test_masks():
train_index = np.logical_not(test_index)
train_index = ind[train_index]
test_index = ind[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices()
"""
for test_index in self._iter_test_indices():
test_mask = self._empty_mask()
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self):
"""Gene | rates integer indices corresponding to test sets."""
raise NotImplementedError
def _empty_mask(self):
return np.zeros(self.n, dtype=np.bool)
class LeaveOneOut(_PartitionIterator):
"""Leave-One-Out cross validation iterator.
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeaveOneOut` instead.
Provides train/test indices to split data in train test sets. Each
sample is u | sed once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut(n)`` is equivalent to ``KFold(n, n_folds=n)`` and
``LeavePOut(n, p=1)``.
Due to the high number of test sets (which is the same as the
number of samples) this cross validation method can be very costly.
For large datasets one should favor KFold, StratifiedKFold or
ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = cross_validation.LeaveOneOut(2)
>>> len(loo)
2
>>> print(loo)
sklearn.cross_validation.LeaveOneOut(n=2)
>>> for train_index, test_index in loo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneLabelOut for splitting the data according to explicit,
domain-specific stratification of the dataset.
"""
def _iter_test_indices(self):
return range(self.n)
def __repr__(self):
return '%s.%s(n=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
)
def __len__(self):
return self.n
class LeavePOut(_PartitionIterator):
"""Leave-P-Out cross validation iterator
.. deprecated:: 0.18
This module will be removed in 0.20.
Use :class:`sklearn.model_selection.LeavePOut` instead.
Provides train/test indices to split data in train test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(n, p)`` is NOT equivalent to ``KFold(n, n_folds=n // p)``
which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross validation method can be very costly. For
large datasets one should favor KFold, StratifiedKFold or ShuffleSplit.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n : int
Total number of elements in dataset.
p : int
Size of the test sets.
Examples
--------
>>> from sklearn import cross_validation
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = cross_validation.LeavePOut(4, 2)
>>> len(lpo)
6
>>> print(lpo)
sklearn.cross_validation.LeavePOut(n=4, p=2)
>>> for train_index, test_index in lpo:
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, n, p):
super(LeavePOut, self).__init__(n)
self.p = p
def _iter_test_indices(self):
for comb in combinations(range(self.n), self.p):
yield np.array(comb)
def __repr__(self):
return '%s.%s(n=%i, p=%i)' % (
self.__class__.__module__,
self.__class__.__name__,
self.n,
self.p,
)
def __len__(self):
return int(factorial(self.n) / factorial(self.n - self.p)
/ factorial(self.p))
class _BaseKFold(with_metaclass(ABCMeta, _PartitionIterator)):
"""Base class to validate KFold approaches"""
@abstractmethod
def __init__(self, n, n_folds, shuffle, random_state):
super(_BaseKFold, self).__init__(n)
if abs(n_folds - int(n_folds)) >= np.finfo('f').eps:
raise ValueError("n_folds must be an integer")
self.n_folds = n_folds = int(n_folds)
if n_folds <= 1:
raise ValueError(
"k-fold cross validation requires at least one"
" train / test split by settin |
sio2project/sioworkers | sio/compilers/system_fpc.py | Python | gpl-3.0 | 500 | 0.002 | from __future__ import absolute_import
from sio.compilers.common import Compiler
from sio.workers.util import tempcwd
class FPCCompiler(Compiler):
lang = 'pas' |
options = ['-O2', '-XS', '-Xt']
output_file = 'a'
def _make_cmdline(self, executor):
# Addinational sources are automatically included
return (
['fpc', tempcwd('a.pas')] + self.options + | list(self.extra_compilation_args)
)
def run(environ):
return FPCCompiler().compile(environ)
|
zhyu/leetcode | algorithms/largestNumber/largestNumber.py | Python | mit | 254 | 0 | class Solution:
# @param num, a list of integers
# @return a string
def largestNumber(self, num):
retu | rn ''.join(sorted(map(str, num), cmp=self.cmp)).lstrip('0') or '0'
def cmp(self, x, y):
return [1, | -1][x + y > y + x]
|
primaeval/script.tvguide.fullscreen | playwithchannel.py | Python | gpl-2.0 | 5,207 | 0.009602 | import sys
import xbmc,xbmcaddon,xbmcvfs,xbmcgui
import sqlite3
import datetime
import time
import subprocess
from subprocess import Popen
import re
import os,stat
def log(what):
xbmc.log(repr(what),xbmc.LOGERROR)
ADDON = xbmcaddon.Addon(id='script.tvguide.fullscreen')
channel = sys.argv[1]
start = sys.argv[2]
def adapt_datetime(ts):
return time.mktime(ts.timetuple())
def convert_datetime(ts):
try:
return datetime.datetime.fromtimestamp(float(ts))
except ValueError:
return None
def windows():
if os.name == 'nt':
return True
else:
return False
def android_get_current_appid():
with open("/proc/%d/cmdline" % os.getpid()) as fp:
return fp.read().rstrip("\0")
def ffmpeg_location():
ffmpeg_src = xbmc.translatePath(ADDON.getSetting('autoplaywiths.ffmpeg'))
if xbmc.getCondVisibility('system.platform.android'):
ffmpeg_dst = '/data/data/%s/ffmpeg' % android_get_current_appid()
if (ADDON.getSetting('autoplaywiths.ffmpeg') != ADDON.getSetting('ffmpeg.last')) or (not xbmcvfs.exists(ffmpeg_dst) and ffmpeg_src != ffmpeg_dst):
xbmcvfs.copy(ffmpeg_src, ffmpeg_dst)
ADDON.setSetting('ffmpeg.last',ADDON.getSetting('autoplaywiths.ffmpeg'))
ffmpeg = ffmpeg_dst
else:
ffmpeg = ffmp | eg_src
if ffmpeg:
try:
st = os.stat(ffmpeg)
if not (st.st_mode & stat.S_IXUSR):
try:
os.chmod(ffmpeg, st.st_mode | stat.S_IXUSR)
except:
pass
except:
pass
if xbmcvfs.exists(ffmpeg):
| return ffmpeg
else:
xbmcgui.Dialog().notification("TVGF", "ffmpeg exe not found!")
sqlite3.register_adapter(datetime.datetime, adapt_datetime)
sqlite3.register_converter('timestamp', convert_datetime)
ADDON.setSetting('playing.channel',channel)
ADDON.setSetting('playing.start',start)
path = xbmc.translatePath('special://profile/addon_data/script.tvguide.fullscreen/source.db')
try:
conn = sqlite3.connect(path, detect_types=sqlite3.PARSE_DECLTYPES)
conn.row_factory = sqlite3.Row
except Exception as detail:
xbmc.log("EXCEPTION: (script.tvguide.fullscreen) %s" % detail, xbmc.LOGERROR)
ffmpeg = ffmpeg_location()
if ffmpeg:
folder = ADDON.getSetting('autoplaywiths.folder')
c = conn.cursor()
c.execute('SELECT stream_url FROM custom_stream_url WHERE channel=?', [channel])
row = c.fetchone()
url = ""
if row:
url = row[0]
if not url:
quit()
startDate = datetime.datetime.fromtimestamp(float(start))
c.execute('SELECT DISTINCT * FROM programs WHERE channel=? AND start_date = ?', [channel,startDate])
for row in c:
title = row["title"]
is_movie = row["is_movie"]
foldertitle = re.sub("\?",'',title)
foldertitle = re.sub(":|<>\/",'',foldertitle)
subfolder = "TVShows"
if is_movie == 'Movie':
subfolder = "Movies"
folder = "%s%s/%s/" % (folder, subfolder, foldertitle)
if not xbmcvfs.exists(folder):
xbmcvfs.mkdirs(folder)
season = row["season"]
episode = row["episode"]
if season and episode:
title += " S%sE%s" % (season, episode)
endDate = row["end_date"]
duration = endDate - startDate
before = int(ADDON.getSetting('autoplaywiths.before'))
after = int(ADDON.getSetting('autoplaywiths.after'))
extra = (before + after) * 60
#TODO start from now
seconds = duration.seconds + extra
if seconds > (3600*4):
seconds = 3600*4
break
player = xbmc.Player()
player.play(url)
count = 30
url = ""
while count:
count = count - 1
time.sleep(1)
if player.isPlaying():
url = player.getPlayingFile()
break
time.sleep(1)
player.stop()
time.sleep(1)
# Play with your own preferred player and paths
if url:
name = "%s - %s - %s" % (re.sub(r"[^\w' ]+", "", channel, flags=re.UNICODE),re.sub(r"[^\w' ]+", "", title, flags=re.UNICODE),time.strftime('%Y-%m-%d %H-%M'))
#name = re.sub("\?",'',name)
#name = re.sub(":|<>\/",'',name)
#name = name.encode("cp1252")
#name = re.sub(r"[^\w' ]+", "", name, flags=re.UNICODE)
filename = xbmc.translatePath("%s%s.ts" % (folder,name))
seconds = 3600*4
#cmd = [ffmpeg, "-y", "-i", url, "-c", "copy", "-t", str(seconds), filename]
#log(cmd)
#p = Popen(cmd,shell=windows())
cmd = [ffmpeg, "-y", "-i", url]
cmd = cmd + ["-reconnect", "1", "-reconnect_at_eof", "1", "-reconnect_streamed", "1", "-reconnect_delay_max", "300", "-t", str(seconds), "-c", "copy"]
cmd = cmd + ['-f', 'mpegts','-']
log(("start",cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=windows())
video = xbmcvfs.File(filename,'wb')
while True:
data = p.stdout.read(1000000)
if not data:
break
video.write(data)
video.close()
p.wait()
log(("done",cmd))
quit()
|
pyexcel/pyexcel-text | .moban.d/setup.py | Python | bsd-3-clause | 264 | 0.045455 | {% exten | ds 'setup.py.jj2' %}
{%block platform_block%}
{%endblock%}
{%block additional_keywords%}
"plain",
"simple",
"grid",
"pipe",
"orgtbl",
"rst",
"mediawiki",
"latex",
"latex_booktabs",
"html",
"json"
{%endblock%}
| |
jasimmonsv/CodingExercises | EulerProject/python/problem4.py | Python | gpl-2.0 | 510 | 0.033333 | #! /usr/bin/python
answerx = 0
answery=0
def palindrom( | num):
palin=''
tmp = str(num)
y=0
for x in reversed(range(len(tmp))):
palin=palin+str(tmp[x])
y=y+1
return int(palin)
for x in range(100,999):
for y in range(100,999):
if (x * y) == palindrom(x*y):
| if x*y > answerx*answery:
answerx = x
answery = y
y=y+1
y=100
x = x+1
print(answerx)
print(answery)
print(answerx * answery) |
Elizaveta239/PyDev.Debugger | tests_python/debugger_fixtures.py | Python | epl-1.0 | 14,176 | 0.001975 | from contextlib import contextmanager
import os
import threading
import time
import pytest
from tests_python import debugger_unittest
from tests_python.debugger_unittest import get_free_port, overrides, IS_CPYTHON, IS_JYTHON, IS_IRONPYTHON, \
IS_PY3K
import sys
def get_java_location():
from java.lang import System # @UnresolvedImport
jre_dir = System.getProperty("java.home")
for f in [os.path.join(jre_dir, 'bin', 'java.exe'), os.path.join(jre_dir, 'bin', 'java')]:
if os.path.exists(f):
return f
raise RuntimeError('Unable to find java executable')
def get_jython_jar():
from java.lang import ClassLoader # @UnresolvedImport
cl = ClassLoader.getSystemClassLoader()
paths = map(lambda url: url.getFile(), cl.getURLs())
for p in paths:
if 'jython.jar' in p:
return p
raise RuntimeError('Unable to find jython.jar')
class _WriterThreadCaseMSwitch(debugger_unittest.AbstractWriterThread):
TEST_FILE = 'tests_python.resources._debugger_case_m_switch'
IS_MODULE = True
@overrides(debugger_unittest.AbstractWriterThread.get_environ)
def get_environ(self):
env = os.environ.copy()
curr_pythonpath = env.get('PYTHONPATH', '')
root_dirname = os.path.dirname(os.path.dirname(__file__))
curr_pythonpath += root_dirname + os.pathsep
env['PYTHONPATH'] = curr_pythonpath
return env
@overrides(debugger_unittest.AbstractWriterThread.get_main_filename)
def get_main_filename(self):
return debugger_unittest._get_debugger_test_file('_debugger_case_m_switch.py')
class _WriterThreadCaseModuleWithEntryPoint(_WriterThreadCaseMSwitch):
TEST_FILE = 'tests_python.resources._debugger_case_module_entry_point:main'
IS_MODULE = True
@overrides(_WriterThreadCaseMSwitch.get_main_filename)
def get_main_filename(self):
return debugger_unittest._get_debugger_test_file('_debugger_case_module_entry_point.py')
class AbstractWriterThreadCaseFlask(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
FLASK_FOLDER = None
TEST_FILE = 'flask'
IS_MODULE = True
def write_add_breakpoint_jinja2(self, line, func, template):
'''
@param line: starts at 1
'''
assert self.FLASK_FOLDER is not None
breakpoint_id = self.next_breakpoint_id()
template_file = debugger_unittest._get_debugger_test_file(os.path.join(self.FLASK_FOLDER, 'templates', template))
self.write("111\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % (self.next_seq(), breakpoint_id, 'jinja2-line', template_file, line, func))
self.log.append('write_add_breakpoint_jinja: %s line: %s func: %s' % (breakpoint_id, line, func))
return breakpoint_id
@overrides(debugger_unittest.AbstractWriterThread.get_environ)
def get_environ(self):
import platform
env = os.environ.copy()
env['FLASK_APP'] = 'app.py'
env['FLASK_ENV'] = 'development'
env['FLASK_DEBUG'] = '0'
if platform.system() != 'Windows':
locale = 'en_US.utf8' if platform.system() == 'Linux' else 'en_US.UTF-8'
env.update({
'LC_ALL': locale,
'LANG': locale,
})
return env
def get_cwd(self):
return debugger_unittest._get_debugger_test_file(self.FLASK_FOLDER)
def get_command_line_args(self):
assert self.FLASK_FOLDER is not None
free_port = get_free_port()
self.flask_port = free_port
return [
'flask',
'run',
'--no-debugger',
'--no-reload',
'--with-threads',
'--port',
str(free_port),
]
def _ignore_stderr_line(self, line):
if debugger_unittest.AbstractWriterThread._ignore_stderr_line(self, line):
return True
if 'Running on http:' in line:
return True
if 'GET / HTTP/' in line:
return True
return False
def create_request_thread(self):
outer = self
class T(threading.Thread):
def run(self):
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
for _ in range(10):
try:
stream = urlopen('http://127.0.0.1:%s' % (outer.flask_port,))
contents = stream.read()
if IS_PY3K:
contents = contents.decode('utf-8')
self.contents = contents
break
except IOError:
continue
t = T()
t.daemon = True
return t
class AbstractWriterThreadCaseDjango(debugger_unittest.AbstractWriterThread):
FORCE_KILL_PROCESS_WHEN_FINISHED_OK = True
DJANGO_FOLDER = None
def _ignore_stderr_line(self, line):
if debugger_unittest.AbstractWriterThread._ignore_stderr_line(self, line):
return True
if 'GET /my_app' in line:
return True
return False
def get_command_line_args(self):
assert self.DJANGO_FOLDER is not None
free_port = get_free_port()
self.django_port = free_port
return [
debugger_unittest._get_debugger_test_file(os.path.join(self.DJANGO_FOLDER, 'manage.py')),
'runserver',
'--noreload',
str(free_port),
]
def write_add_breakpoint_django(self, line, func, template):
'''
@param line: starts at 1
'''
assert self.DJANGO_FOLDER is not None
breakpoint_id = self.next_breakpoint_id()
template_file = debugger_unittest._get_debugger_test_file(os.path.join(self.DJANGO_FOLDER, 'my_app', 'templates', 'my_app', template))
self.write("111\t%s\t%s\t%s\t%s\t%s\t%s\tNone\tNone" % (self.next_seq(), breakpoint_id, 'django-line', template_file, line, func))
self.log.append('write_add_django_breakpoint: %s line: %s func: %s' % (breakpoint_id, line, func))
return breakpoint_id
def create_request_thre | ad(self, uri):
outer = self
class T(threading.Thread):
def run(self):
try:
from urllib.request import urlopen
except ImportError:
| from urllib import urlopen
for _ in range(10):
try:
stream = urlopen('http://127.0.0.1:%s/%s' % (outer.django_port, uri))
contents = stream.read()
if IS_PY3K:
contents = contents.decode('utf-8')
self.contents = contents
break
except IOError:
continue
t = T()
t.daemon = True
return t
class DebuggerRunnerSimple(debugger_unittest.DebuggerRunner):
def get_command_line(self):
if IS_JYTHON:
if sys.executable is not None:
# i.e.: we're running with the provided jython.exe
return [sys.executable]
else:
return [
get_java_location(),
'-classpath',
get_jython_jar(),
'org.python.util.jython'
]
if IS_CPYTHON:
return [sys.executable, '-u']
if IS_IRONPYTHON:
return [
sys.executable,
'-X:Frames'
]
raise RuntimeError('Unable to provide command line')
class DebuggerRunnerRemote(debugger_unittest.DebuggerRunner):
def get_command_line(self):
return [sys.executable, '-u']
def add_command_line_args(self, args):
writer = self.writer
ret = args + [self.writer.TEST_FILE]
ret = writer.update_command_line_args(ret) # Provide a hook for the writer
return ret
@pytest.fixture
def case_s |
py-in-the-sky/challenges | codility/brackets.py | Python | mit | 531 | 0 | """
https://cod | ility.com/programmers/task/brackets/
"""
from collections import deque
def solution(S):
q = deque()
for char in S:
if char in ')]}':
if q and are_mirrors(char, q[-1]):
q.pop()
else:
return 0
else:
q.append(char)
return int(len(q) == 0)
def are_mirrors(source, target):
if source == ')':
return target == '('
elif s | ource == ']':
return target == '['
else:
return target == '{'
|
fabiocaccamo/django-admin-interface | admin_interface/migrations/0009_add_enviroment.py | Python | mit | 960 | 0.001042 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration( | migrations.Migration):
dependencies = [
("admin_interface", "0008_change_related_modal_background_opacity_type"),
]
operat | ions = [
migrations.AddField(
model_name="theme",
name="env",
field=models.CharField(
choices=[
("development", "Development"),
("testing", "Testing"),
("staging", "Staging"),
("production", "Production"),
],
default="development",
max_length=50,
verbose_name="enviroment",
),
),
migrations.AddField(
model_name="theme",
name="env_visible",
field=models.BooleanField(default=True, verbose_name="visible"),
),
]
|
apdavison/space-station-transylvania | xkcd.py | Python | mit | 870 | 0 | """
Example taken from http://matplotlib.org/1.5.0/examples/showcase/xkcd.html
"""
import matplotlib.pyplot as plt
import numpy as np |
with plt.xkcd():
# Based on "The Data So Far" from XKCD by Randall Monroe
# http://xkcd.com/373/
index = [0, 1]
data = [0, 100]
labels = ['CONFIRMED BY EXPERIMENT', 'REFUTED BY EXPERIMENT'] |
fig = plt.figure()
ax = fig.add_axes((0.1, 0.2, 0.8, 0.7))
ax.bar(index, data, 0.25)
ax.spines['right'].set_color('none')
ax.spines['top'].set_color('none')
ax.xaxis.set_ticks_position('bottom')
ax.set_xticks([0, 1])
ax.set_xlim([-0.5, 1.5])
ax.set_ylim([0, 110])
ax.set_xticklabels(labels)
plt.yticks([])
plt.title("CLAIMS OF SUPERNATURAL POWERS")
fig.text(
0.5, 0.05,
'"The Data So Far" from xkcd by Randall Monroe',
ha='center')
plt.show()
|
tickbox-smc-ltd/xfero | src/xfero/test/test_copy_file.py | Python | agpl-3.0 | 18,198 | 0.001649 | #!/usr/bin/env python
'''Test Copy FIle'''
import os
import shutil
import unittest
import /xfero/.workflow_manager.copy_file as copy_file
class Test(unittest.TestCase):
'''
**Purpose:**
Unit Test class for the function ```copy_file```
+------------+-------------+-----------------------------------------------+
| Date | Author | Change Details |
+============+=============+===============================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-----------------------------------------------+
'''
def setUp(self):
'''
**Purpose:**
Set up Unit Test artifacts for the function ```copy_file```.
*Notes:*
This function uses files in the ```test``` directory in the XFERO
installation directory.
+------------+-------------+-------------------------------------------+
| Date | Author | Change Details |
+============+=============+===========================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-------------------------------------------+
'''
self.origdir = os.getcwd()
for dirn in ("targetdir", "sourcedir", 'workingdir'):
self.dirname = self.origdir + os.sep + dirn
os.makedirs(self.origdir + os.sep + dirn)
os.chdir(self.dirname) # This is sourcedir
for filename in ("file1", "file2", "file3", "move_me.txt",
"dont_move.txt", "dont_move.txt"):
fhandle = open(filename, "w")
fhandle.write("Just a test file\n")
fhandle.close()
os.chdir(self.origdir)
self.created_files_list = []
def tearDown(self):
'''
**Purpose:**
Tear down Unit Test artifacts created in setup() for the function
```copy_file```
*Removes*
Created files
+------------+-------------+-------------------------------------------+
| Date | Author | Change Details |
+============+=============+===========================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-------------------------------------------+
'''
for dirn in ('targetdir', 'sourcedir', 'doesnotexist', 'workingdir'):
self.dirname = self.origdir + os.sep + dirn
if os.path.isdir(self.dirname):
shutil.rmtree(self.dirname)
def test_copy_files(self):
'''
**Purpose:**
Uses the ```copy_files``` function to create a copy of the file and add
the current timestamp.
*Test Confirmation*
The following assertions are performed:
Performs an ```assertEqual``` on the result of the call to
``copy_file``` with the original filename:
```self.assertEqual(copied_fn[0:5], self.full_file_name[0:5])```
+------------+-------------+-------------------------------------------+
| Date | Author | Change Details |
+============+=============+===========================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-------------------------------------------+
'''
self.src_files = os.listdir(self.dirname)
for self.file_name in self.src_files:
self.full_file_name = os.path.join(self.dirname, self.file_name)
if os.path.isfile(self.full_file_name):
copied = copy_file.Copy_File()
copied_fn = copied.copy_file(self.full_file_name)
self.assertEqual(copied_fn[0:5], self.full_file_name[0:5])
self.created_files_list.append(copied_fn)
def test_copy_file_file_does_not_exist(self):
'''
**Purpose:**
Uses the ```copy_files``` function to create a copy of a file which does
not exist to generate an OSError.
*Test Confirmation*
The following assertions are performed:
Performs an ```assertEqual``` on the result of the call to
```copy_file``` with the original filename:
```self.assertEqual(err.args, (5, 'Error copying file
/Users/chrisfalck/Documents/workspace/FTH/test_files/filedoesnotexist'),
'Invalid test result')```
+------------+-------------+-------------------------------------------+
| Date | Author | Change Details |
+============+=============+===========================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-------------------------------------------+
'''
# Only way to test is to change permissions on the file to be renamed
self.full_file_name = self.dirname + os.sep + "filedoesnotexist"
try:
copied = copy_file.Copy_File()
copied.copy_file(self.full_file_name)
except OSError as err:
# print(err)
# print(err.args)
# print(err.filename)
self.assertEqual(
err.args, (2, 'No such file or directory'),
'Invalid test result')
def test_success_move(self):
'''
**Purpose:**
Uses the ```move_file``` function to move the file to a new location.
The new location does not exist so the function will create the
directory
*Test Confirmation*
The following assertions are performed:
Performs an ```assertEqual``` on the result of the call to
```move_file``` with the expected result:
```expected = self.tdir + os.sep + 'move_me.txt'```
```self.assertEqual(expected,result,"Not moved correctly")```
Performs an ```assertEqual``` on the path to ensure the file exists on
the file system with the correct file name:
```self.assertEqual(os.path.exists(expected) == 1, True, "File not
renamed correctly")```
+------------+-------------+-------------------------------------------+
| Date | Author | Change Details |
+============+=============+===========================================+
| 02/06/2013 | Chris Falck | Created |
+------------+-------------+-------------------------------------------+
'''
self.origdir = os.getcwd()
self.tdir = self.origdir + os.sep + 'doesnotexist'
self.sfile = self.origdir + os.sep + 'sourcedir' + os.sep + \
'move_me.txt'
args = (self.sfile, self.tdir)
expected = self.tdir + os.sep + 'move_me.txt'
moved = copy_file.Copy_File()
moved_fn = moved.move_ | file(*args)
self.assertEqual(expected, moved_fn, "Not moved correctly")
# Ensure file is on os
self.assertEqual(
os.path.exists(expected) == 1, True, "File not renamed correctly")
def test_success_move_targetdir_exists(self):
'''
**P | urpose:**
Uses the ```move_file``` function to move the file to a new location.
The new location exists so there is no need to create it
*Test Confirmation*
The following assertions are performed:
Performs an ```assertEqual``` on the result of the call to
```move_file``` with the expected result:
```expected = self.tdir + os.sep + 'move_me.txt'```
```self.assertEqual(expecte |
jabbalaci/Bash-Utils | is_net_back.py | Python | mit | 866 | 0.001155 | #!/usr/bin/env python3
"""
Play a sound when the Internet connection is back.
"""
import os
import socket
from pathlib import Path
from time import sleep
from lib import network
from lib.audio import play
ROOT = os.path.dirname(os.path.abspath(__file__))
TIMEOUT = 3
AUDIO = str(Path(ROOT, "assets", "alert.wav"))
def ma | in():
cnt = 0
while True:
cnt += 1
print('# testing...' if cnt == 1 else '# test again...')
if network.is_internet_on(method=3):
print('# Whoa, your net is alive!')
play(AUDIO)
play(AUDIO)
play(AUDIO)
break
else:
print('# no connection, waiting...')
| sleep(10)
#############################################################################
if __name__ == "__main__":
socket.setdefaulttimeout(TIMEOUT)
main()
|
bcgov/gwells | app/backend/wells/migrations/0123_retire_wells_sub_class_codes_water_1589.py | Python | apache-2.0 | 22,630 | 0.006673 | # Generated by Django 2.2.18 on 2021-02-17 22:10
from django.db import migrations
# What are we doing in this migration?:
# As per WATER-1589 JIRA ticket, we:
# - create a new Not Applicable for WATR_SPPLY well_sublclass_code
# - retire well_subclass_code = 'SPECIAL' AND well_class_code = 'GEOTECH'
# well_subclass_code = 'DOMESTIC' AND well_class_code = 'WATR_SPPLY'
# well_subclass_code = 'NON_DOMEST' AND well_class_code = 'WATR_SPPLY'
# by altering the expiry_date to pgsql now()
# - update the activity_submission records where well_subclass_code is DOMESTIC OR NON_DOMEST and class code is WATR_SPPLY to WATR_SPPLY/NA
# - update the well records where well_subclass_code is DOMESTIC OR NON_DOMEST and class code is WATR_SPPLY to WATR_SPPLY/NA
#
# While running this migration I found that the fixtures run after the migration
# This causes issues cause our code table data for well class codes and well subclass codes aren't present
# 3 well subclass codes have orphaned parent well class code records (ie. there's no well class code for these and its null) (new ticket to be created for a cleanup on that)
# - Adjusted the fixtures to not create the code table values for well class code and well subclass code, this is done in the migration
#
# Manual Testing Instructions: (automating this as a test doesn't seem straight foward):
# Developer review steps:
# 1. pull code, ensuring you're starting from a fresh database
# to do this, ensure that you do not have a volume for postgres (docker volume ls | grep gwells_pgdata-volume) should return no rows then run docker volume rm gwells_pgdata-volume
# if this is a problem (volume in use) run docker-compose down then delete the volume docker volume rm gwells_pgdata-volume
# 2. run docker-compose up
# 3. once the backend container is running, connect to the database and run the following sql commands:
#
# -- are the subclass codes expired?
# select count(*) as subclass_codes_expired from well_subclass_code where expiry_date < now() and update_user = 'WATER-1589';
#
# -- is our new subclass_code present?
# select count(*) as new_subclass_present from well_subclass_code where create_date > '2021-01-01' and create_user = 'WATER-1589';
#
# -- get wells with the expired subclass guids (track the resulting count)
# select count(*) from well where well_subclass_guid in ('5a3147d8-47e7-11e7-a919-92ebcb67fe33',
# '5a313ffe-47e7-11e7-a919-92ebcb67fe33',
# '5a3141c0-47e7-11e7-a919-92ebcb67fe33');
#
# -- run the update (track the resulting count):
# UPDATE well
# SET well_subclass_guid = (SELECT well_subclass_guid FROM well_subclass_code WHERE well_subclass_code = 'NA' AND well_class_code = 'WATR_SPPLY' LIMIT 1),
# well_class_code = 'WATR_SPPLY',
# update_user = 'WATER-1589',
# update_date = now()
# WHERE well_subclass_guid in (SELECT well_subclass_guid FROM well_subclass_code
# WHERE (well_subclass_code = 'DOMESTIC' AND well_class_code = 'WATR_SPPLY')
# OR (well_subclass_code = 'NON_DOMEST' AND well_class_code = 'WATR_SPPLY'));
#
# -- ensure that the well records have been changed and do not have well_subclass guids from expired subclass codes (track the resulting count)
# select count(*) from well where well_subclass_guid in ('5a3147d8-47e7-11e7-a919-92ebcb67fe33',
# '5a313ffe-47e7-11e7-a919-92ebcb67fe33',
# '5a3141c0-47e7-11e7-a919-92ebcb67fe33');
#
# -- ensure that we now have well records with our new subclass guid (track this result)
# select count(*) from well where well_subclass_guid = 'ce97445a-664e-44f1-a096-95c97ffd084e' and update_user = 'WATER-1589';
#
# -- finally, run this select where we double check we still have wells records that were not changed (not in)
# select * from well where well_subclass_guid not in ('5a3147d8-47e7-11e7-a919-92ebcb67fe33',
# '5a313ffe-47e7-11e7-a919-92ebcb67fe33',
# '5a3141c0-47e7-11e7-a919-92ebcb67fe33')
# That's the end of the proofing
USER = 'WATER-1589'
ETL_USER = 'ETL_USER'
WELLS_USER = 'WELLS_USER'
NA_WATR_SPPLY_WELL_SUBCLASS_CODE_UUID = 'ce97445a-664e-44f1-a096-95c97ffd084e'
DEFAULT_NEVER_EXPIRES_DATE = '9999-12-31 23:59:59.999999+00'
# insert well class codes if they're not present
CREATE_IF_NOT_EXISTS_WELL_CODES = f"""
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2019-02-12 01:00:00+00', '{ETL_USER}', '2019-02-12 01:00:00+00', 'UNK', 'Unknown', 19, '2019-02-12 01:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'UNK');
INSERT INTO well_class_code(create_user, create_d | ate, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'WATR_SPPLY', 'Water Supply', 2, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'WATR_SPPL | Y');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'MONITOR', 'Monitoring', 4, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'MONITOR');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'INJECTION', 'Injection', 6, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'INJECTION');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'GEOTECH', 'Geotechnical', 8, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'GEOTECH');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'RECHARGE', 'Recharge', 12, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'RECHARGE');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'REMEDIATE', 'Remediation', 14, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'REMEDIATE');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{ETL_USER}', '2017-07-01 08:00:00+00', '{ETL_USER}', '2017-07-01 08:00:00+00', 'CLS_LP_GEO', 'Closed Loop Geo', 16, '2018-05-17 00:00:00+00', '{DEFAULT_NEVER_EXPIRES_DATE}'
WHERE NOT EXISTS (SELECT 1 FROM well_class_code WHERE well_class_code = 'CLS_LP_GEO');
INSERT INTO well_class_code(create_user, create_date, update_user, update_date, well_class_code, description, display_order, effective_date, expiry_date)
SELECT '{WELLS_USER}', '2017-07-01 08:00:00+00', '{WELLS_USER}', '2017-07-01 08:00:00+00', 'DEW_DRA', 'Dewatering/drainage', 20, '2020-01 |
seblefevre/testerman | plugins/codecs/SoapDigitalSignature.py | Python | gpl-2.0 | 12,816 | 0.025523 | # -*- coding: utf-8 -*-
##
# This file is part of Testerman, a test automation system.
# Copyright (c) 2011 Sebastien Lefevre and other contributors
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later
# version.
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
##
##
# A special codec that deals with SOAP Ws-Security 1.0/1.1 header
# for digital signature.
##
import CodecManager
import SoapSecurity
import libxml2
# A default 1024-bit key
DEFAULT_SIGNING_PRIVATE_KEY = \
"""-----BEGIN RSA PRIVATE KEY-----
MIICXQIBAAKBgQDa7M8bdhCyls5fAcB4hnXsMSE84qjrcNExT9VgLQvwnCgw8xgj
bVbPWysyJumGjWVN1YP9RuwGWuq0BroxlFo54ZMneZcQKm0e2+0nATtzRF9j6o3X
kcMwQfLjC9AUG5bV6odwLbeFrtKW/eZMGoDTVp/BBggJYbzyPeN0SytQwwIDAQAB
AoGBALFhJf1uD+FjZxp7ZONCrtEMjY2zaII7CoQV1yDx3ra5D6d5j5lEwg2IJNuh
w5yNfAMweJ0ClcIgcAIlYT9CoEa82BBUDn797ZUrUN1mgTXbzioyDBdHG8usFjPn
5nvcknLTbLvrlAa9t5arCcKQ511OQD+ktnhcHB4TkBtYiugBAkEA/xCKJg2q0SCL
z7Za1Jlm6T/7/IJ1Gx3RGCUccmovTRzZvo6TsWLRFiMACygr8DoAOC5tLEqj6NBu
OidgiC3bwwJBANu6VzogJXoZXAZrP2HYY85AEGWnhhXmmOupGNFPqPjBiG/urqoc
uyULq69++xtmK6BanuaSshOj3GV6A6MGZwECQDlt2+0dfqx/i3tFL8ZWk9lI0s/T
/9IPMJkjIfiQ9/2A1XYWXCLAgRte3g+lB9+a75m2ulYSqD0vUOI/I3kF+kkCQQCt
E/f3kjDTH7ysVbhkc1YStcX0vOPSxoS4RMeGwI/h+lhliwZMezsy8CF5qLVVnMJK
mndGOlFJRS6rRFQvCzEBAkBvPd3VB4lN9RGfGbQbGZW/y1BBwpCflj8w5+Jy/jvT
UYfxMLhpPbbtusTSDVbBnPEm9uOB/W4uPI56i535RoYf
-----END RSA PRIVATE KEY-----"""
# With an associated self-signed certificate
# FIXME: does not contain a SKI extension
DEFAULT_SIGNING_CERTIFICATE = \
"""-----BEGIN CERTIFICATE-----
MIIC7zCCAligAwIBAgIJAIq7T1myCRCTMA0GCSqGSIb3DQEBBQUAMFkxCzAJBgNV
BAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxEjAQBgNVBAoTCVRlc3Rlcm1hbjElMCMG
A1UEAxMcVGVzdGVybWFuIFdzLVNlY3VyaXR5IFNhbXBsZTAeFw0xMTAxMDYxMDM5
MjRa | Fw0xMzAxMDUxMDM5MjRaMFkxCzAJBgNVBAYTAkZSMQ8wDQYDVQQIEwZGcmFu
Y2UxEjAQBgNVBAoTCVRlc3Rlcm1hbjElMCMGA1UEAxMcVGVzdGVybWFuIFdzLVNl
Y3VyaXR5IFNhbXBsZTCBnzANBgkqhkiG9w0BAQEFAAOBjQAwgYkCgYEA2uzPG3YQ
spbOXwHAeIZ17DEhPOKo63DRMU/VYC0L8JwoMPMYI21Wz1srMibpho1lTdWD/Ubs
BlrqtAa6MZRaOeGTJ3mXECptHtvtJ | wE7c0RfY+qN15HDMEHy4wvQFBuW1eqHcC23
ha7Slv3mTBqA01afwQYICWG88j3jdEsrUMMCAwEAAaOBvjCBuzAdBgNVHQ4EFgQU
3MarnhFZj6o3UUfblFzxjuVVOH8wgYsGA1UdIwSBgzCBgIAU3MarnhFZj6o3UUfb
lFzxjuVVOH+hXaRbMFkxCzAJBgNVBAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxEjAQ
BgNVBAoTCVRlc3Rlcm1hbjElMCMGA1UEAxMcVGVzdGVybWFuIFdzLVNlY3VyaXR5
IFNhbXBsZYIJAIq7T1myCRCTMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQAD
gYEAaEOWlTmBwS1xkwEaa+LoDblj4KNtOIz0z/WKhcsS3ngnuqbpkt95xyIyNJ9P
9rY7FIuQl1XRuzgT/IlXoe9F2zM8UTHke/dbMGHCBGDHiyfOz91nprqwCY83OReH
pbiSGFhh0br+8OpaldQmqBMj1AWYSGmBnml0GV/Cv49UC/o=
-----END CERTIFICATE-----"""
class SoapDigitalSignatureCodec(CodecManager.Codec):
"""
= Identification and Properties =
Codec ID: `soap11.ds`
Properties:
|| '''Name''' || '''Type''' || '''Default value''' || '''Description''' ||
|| prettyprint || boolean || `False` || encoding/signing: pretty xml print the signed output ||
|| encoding || string || `'utf-8'` || encoding/signing: encoding format. decoding: decoding format if no prolog is present ||
|| write_prolog || boolean || `True` || encoding/signing: write the `<?xml version="1.0" encoding="..." ?>` prolog or not ||
|| signing_key || string || `None` || encoding/signing: the private key to use to sign the outgoing message, provided as a PEM string. If none is provided, a default key is used. ||
|| signing_certificate || string || `None` || encoding/signing: the X.509 certificate associated to the key above, provided as a PEM string. If none is provided, a default certificate associated to the private key above is used. ||
|| expected_certificates || list of strings || `[]` || decoding/verification: a list of X.509 certificates, provided as PEM strings, that will be used as signing candidates. One of them should be referenced in the signature to validate, based on its subject key identifier. By default, the default signing certificate above is included. ||
= Overview =
A special codec that deals with SOAP Ws-Security 1.0/1.1 header
for digital signature:
- upon decoding, verifies the SOAP message's signature,
- upon encoding, signs the message (soap:Body only), adding a SOAP Security/Signature Header in the outgoing message.
This codec takes and returns a string,
i.e. it is not responsible for actually turning an XML payload
into a higher level structure.
You may use with CodecXml for that purpose, for instance:
{{{
#!python
signing_privkey = '''-----BEGIN RSA PRIVATE KEY-----
MIICXgIBAAKBgQDMJBNZoKMEoEs+m/V8jjMAX57uQEJsyYe+2SbWjrZ3knQb+3+6
iMywhduDuVJJhE7leOoDZIlghOCr1CEkIZK+/HoH/kg++Olz8taOG8L/P3GnMfx4
...
gj1qvwBfBVaLGVep1QnQt1DFBbKP36I=
-----END RSA PRIVATE KEY-----'''
# The certificate associated with the key above.
# Must be X509v3 with subjectKeyIdentifier extension.
signing_certificate = '''-----BEGIN CERTIFICATE-----
MIIDEzCCAnygAwIBAgIJAIfjr0Rpa5W7MA0GCSqGSIb3DQEBBQUAMGUxCzAJBgNV
BAYTAkZSMQ8wDQYDVQQIEwZGcmFuY2UxETAPBgNVBAcTCEdyZW5vYmxlMQwwCgYD
...
gj1qvwBfBVaLGVep1QnQt1DFBbKP36I=
-----END CERTIFICATE-----
'''
# We create a codec alias to associate the outgoing signature
# attributes
define_codec_alias('ds', 'soap11.ds',
signing_key = signing_privkey,
signing_cert = signing_certificate)
class TC_SOAP_DS_SAMPLE(TestCase):
def body(self):
port01 = self.mtc['port01']
port02 = self.mtc['port02']
connect(port01, port02)
# Create a structured xml document
document = ('Envelope', { 'ns': 'http://...', 'children': [ ... ])
# Stacked codecs - first we serialize the document with the 'xml' codec,
# then we sign it with the 'ds' aliased codec .
message = with_('ds', with_('xml', document))
# This sends a signed message
port01.send(message)
# On matching event, you'll be able to see the signed message
port02.receive()
# Of course, we could use the 'ds' aliased codec directly with
# an xml string
xml = '<soapenv:Envelope xmlns:ns="http://acme.com/api/1" xmlns:soapenv="http://schemas.xmlsoap.org/soap/envelope/">
<soapenv:Body>
<ns:operationRequest>
<ns:someParameter />
</ns:operationRequest>
</soapenv:Body>
</soapenv:Envelope>'
port01.send(with_('ds', xml))
# On matching event, you'll be able to see the signed message
port02.receive()
TC_SOAP_DS_SAMPLE().execute()
}}}
This codec, which is actually a signer on encoding, and
a signature verifier on decoding, does not support the whole
Ws-Security 1.1 standard yet.
== Signing SOAP Messages ==
The message to sign must be a valid SOAP 1.1 envelope, and must
not contain any saop:Header/wsse:Security/ds:Signature element
yet. It is added by this codec.
The signature model is currently limited to the Ws-Security
X.509 Certificate Token Profile 1.1, and more particularly to the case
where the signing certificate is referenced via its Subject Key
Identifier (SKI).[[BR]]
As a consequence, it requires that the certificate is a X.509v3
certificate with the SKI extension. Without such a certificate,
the signing/encoding operation fails.
The "encoded" output is simply the same XML document as in input
(string buffer), with the added signature elements.
== Verifying Incoming SOAP Messages ==
The decode operation is actually a signature validation.
If the signature contained into the XML document is correct,
the decoding output is a structure containing:
- the original message (field `message`)
- the signing token (field `signedBy`), as a choice. For now, only the `certificate` choice is provided, and is filled with the signing certificate in PEM format.
== Limitations ==
This codec does not support the full range of algorithms required
to be Ws-Security 1.1 compliant.
In addition, only the X.509 Certificate Token Profile is supported for now.
Username profile, in parti |
wangyixiaohuihui/spark2-annotation | python/pyspark/ml/__init__.py | Python | apache-2.0 | 1,130 | 0.000885 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. | You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Li | cense for the specific language governing permissions and
# limitations under the License.
#
"""
DataFrame-based machine learning APIs to let users quickly assemble and configure practical
machine learning pipelines.
"""
from pyspark.ml.base import Estimator, Model, Transformer
from pyspark.ml.pipeline import Pipeline, PipelineModel
__all__ = ["Transformer", "Estimator", "Model", "Pipeline", "PipelineModel"]
|
ssharpjr/loader-controller | examples/lcd_mcp.py | Python | mit | 749 | 0.001335 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Initialize the LCD.
import Adafruit_CharLCD as LCD
import Adafruit_GPIO.MCP230xx as MCP
import RPi.GPIO as io # For standard GPIO | methods.
# Define the MCP pins connected to the LCD.
lcd_rs = 0
lcd_en = 1
lcd_d4 = 2
lcd_d5 = 3
lcd_d6 = 4
lcd_d7 = 5
lcd_red = 6
lcd_green = 7
lcd_blue = 8
# Define LCD column and row size for a 20x4 LCD.
lcd_columns = 20
lcd_rows = 4
# Initialize MCP23017 device using its default 0x20 I2C address.
gpio = MCP.MCP23017()
# Initializ | e the LCD using the pins.
lcd = LCD.Adafruit_RGBCharLCD(lcd_rs, lcd_en, lcd_d4, lcd_d5, lcd_d6, lcd_d7,
lcd_columns, lcd_rows, lcd_red, lcd_green,
lcd_blue, gpio=gpio)
|
finron/finepy | fine/models/tag.py | Python | bsd-3-clause | 1,612 | 0.00062 | # coding: utf-8
'''
tag.py
~~~~~~
'''
from fine import db
class Tag(db.Model):
__tablename__ = 'tags'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(32), unique=True)
weight = db.Column(db.Integer)
note = db.Column(db.String(140))
def __init__(self, | *args, **kwargs):
super(Tag, self).__init__(*args, **kwargs)
def __repr__(self):
return '<Tag %d>' % self.id
@staticmethod
def generate_fake():
rv = ['Python', 'Linux', 'C',
'stackoverflow', 'JavaScript', 'Alogrithms',
'Vim', 'Nginx', 'Flask',
'SQLAlchemy', 'Java', 'jQuery',
'SQL', 'Tornado', 'Werkzeug'
]
for i, x in enumerate(rv, 1):
t = Tag.ge | t_one(x)
if t:
t.weight += 1
db.session.commit()
continue
t = Tag(name=x, weight=i,
note='test'+str(i))
db.session.add(t)
db.session.commit()
@staticmethod
def get_posts(self):
''' Get posts '''
from .post import PostTag
posttag = PostTag.query.filter(PostTag.tag_id == self.id).first()
if posttag:
return posttag.posts
@classmethod
def get_one(cls, name):
return cls.query.filter_by(name=name).first()
@classmethod
def is_empty(cls):
return cls.query.first() is None
@classmethod
def get_top_x(cls, count=9):
return cls.query.order_by(cls
.weight.desc()).limit(count).all()
|
olivierdalang/stdm | third_party/FontTools/fontTools/ttLib/tables/T_S_I_J_.py | Python | gpl-2.0 | 76 | 0.039474 | import asciiTable
class table_T_S_ | I_J_(asciiTable.asciiTable):
pass
| |
Johnzero/OE7 | OE-debug文件/oecn_base_fonts/__openerp__.py | Python | agpl-3.0 | 2,131 | 0.007977 | # -*- encoding: utf-8 -*-
# __author__ = tony@openerp.cn
##############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "pdf report support for your language",
"version" : "2.1",
"author" : "Shine IT",
"maintainer":"jeff@openerp.cn",
"website": "http://www.openerp.cn",
| "description": """
Fonts defined in the default report may not support chara | cters
in your language, which may cause jarbled characters in the printed
pdf report.
This addon will solve abovementioned issue elegently by using openerp
customfonts API to replace the original fonts with your seleted fonts.
Please click open Settings/Configuration/Configuration Wizards/Configuration Wizards
click the launch buttong(gear icon) on the line of 'Configure fonts mapping for pdf report'
set up the font mapping from the poped window there and
have fun!
---------Tips-----------
1.when you restore the database to another system, please run the configuration wizards again.
""",
"depends" : ["base",'base_setup'],
"category" : "Generic Modules/Base",
"demo_xml" : [],
"update_xml" : [
"res_config_view.xml"
],
"license": "GPL-3",
"active": False,
"auto_install":True,
"installable": True
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
oyamad/QuantEcon.py | quantecon/filter.py | Python | bsd-3-clause | 1,768 | 0.00509 | """
function for filtering
"""
import numpy as np
def hamilton_filter(data, h, *args):
r"""
This function applies "Hamilton filter" to the data
http://econweb.ucsd.edu/~jhamilto/hp.pdf
Parameters
----------
data : arrray or dataframe
h : integer
Time horizon that we are likely to predict incorrectly.
Original paper recommends 2 for annual data, 8 for quarterly data,
24 for monthly data.
*args : integer
If supplied, it is p in the paper. Number of lags in regression.
Must be greater than h.
If not supplied, random walk process is assumed.
Note: For seasonal data, it's desirable for p and h to be integer multiples
of the number of obsevations in a year.
e.g. For quarterly data, h = 8 and p = 4 are recommended.
Returns
-------
cycle : array of cyclical component
trend : trend component
"""
# transform data to array
| y = np.asarray(data, float)
# sample size
T = len(y)
if len(args) == 1: # if p is supplied
p = args[0]
# construct X matrix of lags
X = np.ones((T-p-h+1, p+1))
for j in range(1, p+1):
X[:, j] = y[p-j:T-h-j+1:1]
# do OLS regression
b = n | p.linalg.solve(X.transpose()@X, X.transpose()@y[p+h-1:T])
# trend component (`nan` for the first p+h-1 period)
trend = np.append(np.zeros(p+h-1)+np.nan, X@b)
# cyclical component
cycle = y - trend
elif len(args) == 0: # if p is not supplied (random walk)
cycle = np.append(np.zeros(h)+np.nan, y[h:T] - y[0:T-h])
trend = y - cycle
return cycle, trend
|
orlenko/bccf | src/mezzanine/generic/migrations/0009_auto__del_field_threadedcomment_email_hash.py | Python | unlicense | 7,904 | 0.008097 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'ThreadedComment.email_hash'
db.delete_column('generic_threadedcomment', 'email_hash')
def backwards(self, orm):
# Adding field 'ThreadedComment.email_hash'
db.add_column('generic_threadedcomment', 'email_hash', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True), keep_default=False)
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type | ': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', | [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 18, 17, 51, 43, 560694)'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 4, 18, 17, 51, 43, 560579)'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'comments.comment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'Comment', 'db_table': "'django_comments'"},
'comment': ('django.db.models.fields.TextField', [], {'max_length': '3000'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'content_type_set_for_comment'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip_address': ('django.db.models.fields.IPAddressField', [], {'max_length': '15', 'null': 'True', 'blank': 'True'}),
'is_public': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_removed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_pk': ('django.db.models.fields.TextField', [], {}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'submit_date': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'comment_comments'", 'null': 'True', 'to': "orm['auth.User']"}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'user_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'ordering': "('_order',)", 'object_name': 'AssignedKeyword'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'generic.threadedcomment': {
'Meta': {'ordering': "('submit_date',)", 'object_name': 'ThreadedComment', '_ormbases': ['comments.Comment']},
'by_author': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'comment_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['comments.Comment']", 'unique': 'True', 'primary_key': 'True'}),
'replied_to': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'comments'", 'null': 'True', 'to': "orm['generic.ThreadedComment']"})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['generic']
|
summychou/GGFilm | WeRoBot/myrobot/admin.py | Python | apache-2.0 | 1,012 | 0.000988 | from django.contrib import admin
from models import FilmSearch, Films, Developers, Notes
class FilmSearchAdmin(admin.ModelAdmin):
list_display = (
"Film", | "Developer", "Dilution", "ASA_ISO", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Film",
)
class FilmsAdmin(admin.ModelAdmin):
list_display = (
"Film", "create | _timestamp", "last_update_timestamp",
)
list_filter = (
"Film",
)
class DevelopersAdmin(admin.ModelAdmin):
list_display = (
"Developer", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Developer",
)
class NotesAdmin(admin.ModelAdmin):
list_display = (
"Notes", "Remark", "create_timestamp", "last_update_timestamp",
)
list_filter = (
"Notes",
)
admin.site.register(FilmSearch, FilmSearchAdmin)
admin.site.register(Films, FilmsAdmin)
admin.site.register(Developers, DevelopersAdmin)
admin.site.register(Notes, NotesAdmin)
|
abpai/mailin-test | python/dkim/canonicalization.py | Python | mit | 4,252 | 0 | # This software is provided 'as-is', without any express or implied
# warranty. In no event will the author be held liable for any damages
# arising from the use of this software.
#
# Permission is granted to anyone to use this software for any purpose,
# including commercial applications, and to alter it and redistribute it
# freely, subject to the following restrictions:
#
# 1. The origin of this software must not be misrepresented; you must not
# claim that you wrote the original software. If you use this software
# in a product, an acknowledgment in the product documentation would be
# appreciated but is not required.
# 2. Altered source versions must be plainly marked as such, and must not be
# misrepresented as being the original software.
# 3. This notice may not be removed or altered from any source distribution.
#
# Copyright (c) 2008 Greg Hewgill http://hewgill.com
#
# This has been modified from the original software.
# Copyright (c) 2011 William Grant <me@williamgrant.id.au>
import re
__all__ = [
'CanonicalizationPolicy',
'InvalidCanonicalizationPolicyError',
]
class InvalidCanonicalizationPolicyError(Exception):
"""The c= value could not be parsed."""
pass
def strip_trailing_whitespace(content):
return re.sub(b"[\t ]+\r\n", b"\r\n", content)
def compress_whitespace(content):
return re.sub(b"[\t ]+", b" ", content)
def strip_trailing_lines(content):
return re.sub(b"(\r\n)*$", b"\r\n", content)
def unfold_header_value(content):
return re.sub(b"\r\n", b"", content)
class Simple:
"""Class that represents the "simple" canonicalization algorithm."""
name = b"simple"
@staticmethod
def canonicalize_headers(headers):
# No changes to headers.
return headers
@staticmethod
def canonicalize_body(body):
# Ignore all empty lines at the end of the message body.
return strip_trailing_lines(body)
class Relaxed:
"""Class that represents the "relaxed" canonicalization algorithm."""
name = b"relaxed"
@staticmethod
def canonicalize_headers(headers):
# Convert all header field names to lowercase.
# Unfold all header lines.
# Compress WSP to single space.
# Remove all WSP at the start or end of the field value (strip).
return [
(x[0].lower().rstrip(),
compress_whitespace(unfold_header_value(x[1])).strip() + b"\r\n")
for x in headers]
@staticmethod
def canonicalize_body(body):
# Remove all trailing WSP at end of lines.
# Compress non-line-ending WSP to single space.
# Ignore all empty lines at the end of the message body.
return strip_trailing_lines(
compress_whitespace(strip_trailing_whitespace(body)))
class CanonicalizationPolicy:
def __init__(self, header_algorithm, body_algorithm):
self.header_algorithm = header_algorithm
self.body_algorithm = body_algorithm
@classmethod
def from_c_value(cls, c):
"""Construct the canonicalization policy described by a c= value.
May raise an C{InvalidCanonicalizationPolicyError} if the given
value is invalid
@param c: c= value from a DKIM-Signature header field
@return: a C{CanonicalizationPolicy}
"""
if c is None:
c = b'simple/simple'
m = c.split(b'/')
if len(m) not in (1, 2):
raise InvalidCanonicalizationPolicyError(c)
if len(m) == 1:
m. | append(b'simple')
can_headers, can_body = m
try:
header_algorithm = ALGORITHMS[can_headers]
body_algorithm = ALGORITHMS[can_body]
except KeyError as e:
raise InvalidCanonica | lizationPolicyError(e.args[0])
return cls(header_algorithm, body_algorithm)
def to_c_value(self):
return b'/'.join(
(self.header_algorithm.name, self.body_algorithm.name))
def canonicalize_headers(self, headers):
return self.header_algorithm.canonicalize_headers(headers)
def canonicalize_body(self, body):
return self.body_algorithm.canonicalize_body(body)
ALGORITHMS = dict((c.name, c) for c in (Simple, Relaxed))
|
aterrel/blaze | blaze/serve/server.py | Python | bsd-3-clause | 2,462 | 0.000812 | from __future__ import absolute_import, division, print_function
from collections import Iterator
from flask import Flask, request, jsonify, json
from functools import partial, wraps
from .index import parse_index
class Server(object):
__slots__ = 'app', 'datasets'
def __init__(self, name='Blaze-Server', datasets=None):
app = self.app = Flask(name)
se | lf.datasets = datasets or dict()
for args, kwargs, func in routes:
func2 = wraps(func)(partial(func, self.datasets))
app.route(*args, **kwargs)(func2)
def __getitem__(self, key):
return self.datasets[key]
def __setitem__(self, key, value):
self.datasets[key] = value
return value
routes = list()
def route(*args, **kwargs):
| def f(func):
routes.append((args, kwargs, func))
return func
return f
@route('/datasets.json')
def dataset(datasets):
return jsonify(dict((k, str(v.dshape)) for k, v in datasets.items()))
@route('/data/<name>.json', methods=['POST', 'PUT', 'GET'])
def data(datasets, name):
""" Basic indexing API
Allows remote indexing of datasets. Takes indexing data as JSON
Takes requests like
Example
-------
For the following array:
[['Alice', 100],
['Bob', 200],
['Charlie', 300]]
schema = '{name: string, amount: int32}'
And the following
url: /data/table-name.json
POST-data: {'index': [{'start': 0, 'step': 3}, 'name']}
and returns responses like
{"name": "table-name",
"index": [0, "name"],
"datashape": "3 * string",
"data": ["Alice", "Bob", "Charlie"]}
"""
if request.headers['content-type'] != 'application/json':
return ("Expected JSON data", 404)
try:
data = json.loads(request.data)
except ValueError:
return ("Bad JSON. Got %s " % request.data, 404)
try:
dset = datasets[name]
except KeyError:
return ("Dataset %s not found" % name, 404)
try:
index = parse_index(data['index'])
except ValueError:
return ("Bad index", 404)
try:
rv = dset.py[index]
except RuntimeError:
return ("Bad index: %s" % (str(index)), 404)
if isinstance(rv, Iterator):
rv = list(rv)
return jsonify({'name': name,
'index': data['index'],
'datashape': str(dset.dshape.subshape[index]),
'data': rv})
|
motomizuki/Qlone | app/views/view.py | Python | mit | 6,101 | 0.00359 | # -*- coding: utf-8 -*-
__author__ = 'hmizumoto'
from flask import Blueprint, request, render_template, abort
from app.utils import jwt_decode
from app.views.auth import check_login, authorized_user, login
from app.models import DOMAIN
from app import app
from app.decoretor import login_required
from bson import ObjectId
import re
from logging import getLogger, StreamHandler, DEBUG
logger = getLogger(__name__)
handler = StreamHandler()
handler.setLevel(DEBUG)
logger.setLevel(DEBUG)
logger.addHandler(handler)
module = Blueprint('view', __name__, url_prefix=app.config["APPLICATION_ROOT"])
@module.route("/")
def index():
"""
ログイン,アカウント作成
"""
user = check_login()
if not user:
return render_template("index.html", prefix=app.config["APPLICATION_ROOT"])
return render_template("home.html", user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/drafts/")
@login_required
def drafts(oid=None):
"""
下書き一覧
"""
user = authorized_user()
return render_template("drafts.html", oid=oid, user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/drafts/new")
@module.route("/drafts/<oid>/edit")
@login_required
def edit_drafts(oid=None):
"""
下書き作成
"""
user = authorized_user()
model = DOMAIN["items"]
draft = dict()
if oid:
draft = model.get_by_id(oid)
draft['markdown'] = draft['markdown'].replace('\\', '\\\\').replace('\n', '\\n')
return render_template("edit_drafts.html", oid=oid, user=user, draft=draft, prefix=app.config["APPLICATION_ROOT"])
@module.route("/<user_name>/items/<oid>")
@login_required
def item_page(user_name, oid):
"""
記事閲覧
"""
user = authorized_user()
author = DOMAIN['users'].get_by_identify(user_name)
model = DOMAIN['items']
query = {'status': 'published', 'user_id': author['_id']}
item = model.get_by_id(oid, query)
comments = DOMAIN["comments"].get_index({'item_id': oid}, sort=('created', 1))
stocks = DOMAIN["users"].get_index({'stocks': oid})
del author['password']
if item:
return render_template('item.html', user=user, item=item, author=author, comments=comments, stocks=stocks,
prefix=app.config["APPLICATION_ROOT"])
else:
abort(404)
@module.route("/home/<user_name>/")
@module.route("/home/<user_name>/<target>")
@login_required
def user_page(user_name, target=None):
"""
ユーザページ
"""
user = authorized_user()
author = DOMAIN['users'].get_by_identify(user_name, password=False)
if author:
model = DOMAIN['items']
query = {'status': 'published', 'user_id': author['_id']}
item = model.get_index(query, sort=("created", -1))
comments = DOMAIN["comments"].get_index({'user_id': author['_id']})
stock_ids = [ObjectId(x) for x in author['stocks']]
stocks = model.get_index({'_id': {'$in': stock_ids}})
followers = DOMAIN['users'].get_index({'following_users': author['user_name']}, password=False)
return render_template('users.html', user=user, item=item, author=author, stocks=stocks, comments=comments,
followers=followers, | target=target, prefix=app.config["APPLICATION_ROOT"])
else:
abort(404)
@module.route("/tags")
@login_required
def tags_index():
"""
タグ一覧
"""
user = authorized_user()
model = DOMAIN['items']
tags = model.get_all_tags()
return render_template('tags_index.html', user=user, tags=tags, prefix=app.config["APPLICATION_ROOT"])
@module.route("/tags/<tag_name>")
@login_required
def tags_page(tag_name):
"""
タグ詳細
"""
user | = authorized_user()
model = DOMAIN['items']
items = model.get_index({'tags': tag_name})
follower = DOMAIN['users'].get_index({'following_tags': tag_name})
return render_template('tags.html', user=user, items=items, tag_name=tag_name, follower_count=follower['count'],
prefix=app.config["APPLICATION_ROOT"])
@module.route("/settings")
@login_required
def setting():
"""
設定ページ
"""
user = authorized_user()
return render_template('settings.html', user=user, prefix=app.config["APPLICATION_ROOT"])
@module.route("/search")
@login_required
def search():
"""
検索
"""
user = authorized_user()
model = DOMAIN["items"]
q = request.args.get("query")
terms = q.split()
title = map(lambda x: {"title": re.compile(".*"+x+".*")}, terms)
tags = map(lambda x: {"tags": re.compile(".*"+x+".*")}, terms)
query = {
"$or": [{"$and": list(title)}, {"$and": list(tags)}]
}
result = model.get_index(query)
return render_template("search_result.html", user=user, items=result,
query=q, prefix=app.config["APPLICATION_ROOT"])
@module.route("/session/activate")
def activate_page():
"""
アカウントアクティベーション
"""
token = request.args.get("token")
if token:
model = DOMAIN["users"]
data = jwt_decode(token)
user = model.get_by_id(data["_id"])
if user["password"] == data["password"] and user["user_email"] == data["user_email"]:
# activate user account
model.patch(user["_id"], {"status": "active"})
# login
login(user["user_email"], user["password"])
return render_template("session.html", message="アカウントを認証しました。", user=user, prefix=app.config["APPLICATION_ROOT"])
else:
return render_template("session.html", message="不正なトークンです。", user=None, prefix=app.config["APPLICATION_ROOT"])
@module.route("/session/account_created")
def created():
return render_template("session.html", message="アカウントを作成しました。<br>メールに届いたURLをクリックし、<br>アカウントを認証してください。",
user=None, prefix=app.config["APPLICATION_ROOT"])
|
lesina/labs2016 | contests_1sem/8/F.py | Python | gpl-3.0 | 127 | 0.015748 | d, n | = list(map(int, input().split()))
answer = 0
while n%10:
if n%10 == d:
answer += 1
| n //= 10
print(answer)
|
lefnire/tensorforce | tensorforce/core/optimizers/subsampling_step.py | Python | apache-2.0 | 4,026 | 0.002235 | # Copyright 2017 reinforce.io. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import tensorflow as tf
from tensorforce import util, TensorForceError
from tensorforce.core.optimizers import MetaOptimizer
class SubsamplingStep(MetaOptimizer):
"""
The subsampling-step meta optimizer randomly samples a subset of batch instances to calculate
the optimization step of another optimizer.
"""
def __init__(self, optimizer, fraction=0.1, scope='subsampling-step', summary_labels=()):
"""
Creates a new subsampling-step meta optimizer instance.
Args:
optimizer: The optimizer which is modified by this meta optimizer.
fraction: The fraction of instances of the batch to subsample.
"""
assert isinstance(fraction, float) and fraction > 0.0
self.fraction = fraction
super(SubsamplingStep, self).__init__(optimizer=optimizer, scope=scope, summary_labels=summary_labels)
def tf_step(
self,
time,
variables,
arguments,
**kwargs
):
"""
Creates the TensorFlow operations for performing an optimization step.
Args:
time: Time tensor.
variables: List of variables to optimize.
arguments: Dict of arguments for callables, like fn_loss.
**kwargs: Additional arguments passed on to the internal optimizer.
Returns:
List of delta tensors corresponding to the updates for each optimized variable.
"""
# Get some (batched) argument to determine batch size.
arguments_iter = iter(arguments.values())
some_argument = next(arguments_iter)
try:
while not isinstance(some_argument, tf.Tensor) or util.rank(some_argument) == 0:
if isinstance(some_argument, dict):
if some_argument:
arguments_iter = iter(some_argument.values())
some_argument = next(arguments_iter)
elif isinstance(some_argument, list):
if some_argument:
arguments_iter = iter(some_argument)
some_argument = next(arguments_iter)
elif some_argument is None or util.rank(some_argument) == 0:
# Non-batched argument
some_argument = next(arguments_iter)
else:
raise TensorForceError("Invalid argument type.")
except StopIteration:
raise TensorForceError("Invalid argument type.")
batch_size = tf.shape(i | nput=some_argument)[0]
num_samples | = tf.cast(
x=(self.fraction * tf.cast(x=batch_size, dtype=util.tf_dtype('float'))),
dtype=util.tf_dtype('int')
)
num_samples = tf.maximum(x=num_samples, y=1)
indices = tf.random_uniform(shape=(num_samples,), maxval=batch_size, dtype=tf.int32)
subsampled_arguments = util.map_tensors(
fn=(lambda arg: arg if util.rank(arg) == 0 else tf.gather(params=arg, indices=indices)),
tensors=arguments
)
return self.optimizer.step(
time=time,
variables=variables,
arguments=subsampled_arguments,
**kwargs
)
|
raamana/pyradigm | pyradigm/__init__.py | Python | mit | 792 | 0.001263 |
__all__ = [ 'ClassificationDataset', 'RegressionDataset',
'BaseDataset', 'check_compatibility',
'pyradigm', 'MLDataset', 'cli_run']
from sys import version_info
if version_info.major >= 3:
from pyradigm.base import BaseDataset
from pyradigm.classify import Classifi | cationDataset
from pyradigm.regress import RegressionDataset
from pyradigm.pyradigm import MLDataset, cli_run
f | rom pyradigm.multiple import MultiDatasetClassify, MultiDatasetRegress
from pyradigm.utils import check_compatibility
else:
raise NotImplementedError('pyradigm supports only Python 3 or higher! '
'Upgrade to Python 3+ is recommended.')
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
|
frzdian/jaksafe-engine | jaksafe/jaksafe/jakservice/post_processing/dala.py | Python | gpl-2.0 | 29,206 | 0.00137 | # coding = utf-8
__AUTHOR__ = 'Abdul Somat Budiaji'
import logging
import os
import sys
import pandas as pd
import numpy as np
from qgis.core import NULL
import asumsi
import config
from error import *
import hazard
import shape
logger = logging.getLogger('jakservice.post_processing.dala')
class Dala():
"""
Dala class
:param time_0: waktu awal query kejadian banjir
:param time_1: waktu akhir query kejadian banjir (waktu proses menghitung
DaLA dimulai)
"""
def __init__(self, time_0, time_1, tipe='auto'):
"""
Dala class initialization
"""
self.time_1 = time_1
self.time_0 = time_0
self.asu = asumsi.Asumsi()
self.sub = config.Subsektor()
self.path = config.Path(time_0, time_1, tipe=tipe)
def calculate(self, list_subs):
"""
Menghitung DaLA
:param list_subs: daftar jenis subsektor yang akan dihitung DaLA
"""
logger.info('Dala.calculate')
daftar_aset = []
# mendapatkan daftar aset
for s in list_subs:
for a in self.sub.get_list_aset(s):
daftar_aset.append(a)
try:
for aset in daftar_aset:
logger.info(aset)
if self.sub.get_dala(aset) == 1:
self.dala_satu(aset)
if self.sub.get_dala(aset) == 2:
self.dala_dua(aset)
if self.sub.get_dala(aset) == 3:
self.dala_tiga(aset)
if self.sub.get_dala(aset) == 4:
self.dala_empat(aset)
if self.sub.get_dala(aset) == 5:
self.dala_lima(aset)
if self.sub.get_dala(aset) == 6:
self.dala_enam(aset)
except NoImpactOsmError:
logger.error('no impact osm error')
sys.exit(1)
except NoImpactAggError:
logger.error('no impact agg error')
sys.exit(1)
except NoAsumsiKerusakanError:
logger.error('no asumsi kerusakan error')
sys.exit(1)
except NoAsumsiKerugianError:
logger.error('no asumsi kerugian error')
sys.exit(1)
except NoImpactBuildingError:
logger.error('no impact bulding error')
sys.exit(1)
except NoImpactRoadError:
logger.error('no impact road error')
sys.exit(1)
except NoHazardGeneratedFileError:
logger.error('no hazard file error')
sys.exit(1)
except NoAsumsiAggregatFileError:
logger.error('no aggregat file error')
sys.exit(1)
except KeyError, e:
logger.exception(e)
sys.exit(1)
except Exception:
logger.exception('Unhandled Exception')
sys.exit(1)
# perhitungan dala spesial (aset asuransi)
try:
self.dala_asuransi()
except NoAsumsiPenetrasiFileError:
logger.error('no asumsi penetrasi error')
sys.exit(1)
except NoAsumsiAsuransiFileError:
logger.error('no asumsi asuransi error')
sys.exit(1)
except NoHazardGeneratedFileError:
logger.error('no hazard file error')
sys.exit(1)
# merge all aset into one
# output
dala_file = self.path.output_dir + 'dala_' + \
self.time_0 + '_' + self.time_1 + '.csv'
result = []
for aset in daftar_aset:
aset_file = self.path.output_dir + aset + '_' + \
self.time_0 + '_' + self.time_1 + '.csv'
df = pd.read_csv(aset_file)
result.append(df)
os.remove(aset_file)
df_result = pd.concat(result)
# menyimpan hasil akumulasi dala
try:
fh = open(dala_file, 'w')
except IOError, e:
os.makedirs(self.path.output_dir)
fh = open(dala_file, 'w')
df_result.to_csv(fh, index=False)
fh.close()
# spread dala per subsektor
self.dala_per(dala_file, list_subs)
def dala_nol(self, aset):
"""
Hasil perhitungan dala ketika tidak ada aset yang terkena maupun
terdampak banjir
"""
logger.info('Dala.dala_nol')
# output
# aset result
aset_file = self.path.output_dir + aset + '_' + \
self.time_0 + '_' + self.time_1 + '.csv'
df_impact = pd.DataFrame()
df_impact['PROVINSI'] = (['DKI JAKARTA', 'DKI JAKARTA', 'DKI JAKARTA',
'DKI JAKARTA', 'DKI JAKARTA'])
df_impact['KOTA'] = (['JAKARTA TIMUR', 'JAKARTA BARAT', 'JAKARTA UTARA',
'JAKARTA SELATAN', 'JAKARTA PUSAT'])
df_impact['KECAMATAN'] = np.nan
df_impact['KELURAHAN'] = np.nan
df_impact['SUBSEKTOR'] = self.sub.get_subsektor(aset=aset)
df_impact['ASET'] = aset
df_impact['KERUSAKAN'] = 0
df_impact['KERUGIAN'] = 0
# menyimpan hasil perhitungan dala
try:
fh = open(aset_file, 'w')
except IOError, e:
os.makedirs(self.path.output_dir)
fh = open(aset_file, 'w')
df_impact.to_csv(fh, index=False)
| fh.close()
def dala_satu(self, aset):
"""
Perhitungan DaLA per unit aset
"""
logger.info('Dala.dala_satu')
# input
# impact osm atau impact aggregat
# matriks asumsi kerusakan
# matriks asumsi kerugian
impact_file = self.path.impact_dir + 'summary/osm_impact.csv'
agg_file = self.path.impact_dir + 'summary/agg_impact.csv'
rusak_file | = self.path.assumption_dir + 'asumsi_kerusakan.csv'
rugi_file = self.path.assumption_dir + 'asumsi_kerugian.csv'
# raise errro if no file exist
if not os.path.isfile(impact_file):
raise NoImpactOsmError
if not os.path.isfile(agg_file):
raise NoImpactAggError
if not os.path.isfile(rusak_file):
raise NoAsumsiKerusakanError
if not os.path.isfile(rugi_file):
raise NoAsumsiKerugianError
# output
# aset result
aset_file = self.path.output_dir + aset + '_' + \
self.time_0 + '_' + self.time_1 + '.csv'
# impact dataframe filtered for aset
if self.sub.get_impact(aset) == 'osm_impact.csv':
df_impact = pd.read_csv(impact_file)
df_impact = df_impact[df_impact.ASET == aset]
elif self.sub.get_impact(aset) == 'agg_impact.csv':
df_impact = pd.read_csv(agg_file)
df_impact = df_impact[df_impact.ASET == aset]
if len(df_impact) == 0:
self.dala_nol(aset)
return
# dataframe kerusakan
# dataframe kerugian
df_rusak = pd.read_csv(rusak_file, index_col=1)
df_rugi = pd.read_csv(rugi_file, index_col=1)
# lookup berdasarkan kelas dampak
df_impact['TEMP_D'] = df_rusak.lookup(df_impact.ASET, df_impact.KELAS_DAMPAK)
df_impact['TEMP_L'] = df_rugi.lookup(df_impact.ASET, df_impact.KELAS_DAMPAK)
# menghitung kerugian dan kerusakan
df_impact['KERUSAKAN'] = df_impact['JUMLAH_ASET'] * df_impact['TEMP_D'] * df_impact['KOEFISIEN']
df_impact['KERUGIAN'] = df_impact['JUMLAH_ASET'] * df_impact['TEMP_L'] * df_impact['KOEFISIEN']
# menghapus kolom yang tak perlu
df_impact.drop(['JUMLAH_ASET', 'KELAS_DAMPAK', 'TEMP_D', 'TEMP_L'], inplace=True, axis=1)
# menyimpan hasil perhitungan dala
try:
fh = open(aset_file, 'w')
except IOError, e:
os.makedirs(self.path.output_dir)
fh = open(aset_file, 'w')
df_impact.to_csv(fh, index=False)
fh.close()
def dala_dua(self, aset):
"""
Perhitungan dala berdasarkan luas aset (khusus aset tambak dan kebersihan)
"""
logger.info('Dala.dala_dua')
# input
# shapefile impact building
# matriks asumsi kerusakan
# matriks asumsi kerugian
impact_shp |
vecnet/vnetsource | transmission_simulator/views/IndexView.py | Python | mpl-2.0 | 1,472 | 0.003397 | # PEP 0263
# -*- coding: utf-8 -*-
########################################################################################################################
# VECNet CI - Prototype
# Date: 4/5/2013
# Institution: University of Notre Dame
# Primary Authors:
# Alexander Vyushkov <Alexander.Vyushkov@nd.edu>
########################################################################################################################
from django.views.generic import TemplateView
import transmission_simulator
class IndexView(TemplateView):
"""
Main page for the Transmission Simulator
Renders a given template specified in template_name belo | w, passing it a {{ params }} template variable,
which is a dictionary of the parameters captured in the URL and modified by get_context_data.
"""
template_name = 'transmission_simulator/index.html'
def get_context_data(self, **kwargs):
"""
Return a context data dictionary consisting of the contents of kwargs stored in the context variable params.
"""
# Required by any Django view
context = super | (IndexView, self).get_context_data()
# Add TS version to the {{ params }} template variable
# TS version is to be displayed in the page title
context['version'] = "v" + transmission_simulator.__version__
# set flag for nav menu activation
context['nav_button'] = 'index'
# Return context data
return context
|
davebridges/Lab-Website | projects/models.py | Python | mit | 6,048 | 0.020668 | '''This file is the model configuration file for the :mod`projects` app.
There is one model in this app, :class:`~projects.models.Project`.
'''
from django.db import models
from django.template.defaultfilters import slugify
from personnel.models import Person
from papers.models import Publication
class Project(models.Model):
'''This model covers :class:`~projects.models.Projects`.
The only required field is the title.
There are optional fields for the priority, a summary, the start date, and links to our and other Publications, current and previous Personnel
'''
title = models.CharField(max_length=150,
help_text="Name of the Project.")
title_slug = models.SlugField(blank=True,
null=True,
max_length=150,
editable=False,
unique=True)
current_personnel = models.ManyToManyField(Person,
blank=True,
null=True,
help_text="Who is currently working on this project?",
related_name = 'current_personnel')
past_personnel = models.ManyToManyField(Person,
blank=True,
null=True,
help_text="Who previously worked on this project?",
related_name = 'previous_personnel')
summary = models.TextField(blank=True, null=True)
start_date = models.DateField(help_text="When did we start this project",
blank=True,
null=True)
priority = models.IntegerField(blank=True,
null=True,
help_text="Priority Rank, 1 is high, 5 is low")
publications = models.ManyToManyField(Publication,
blank=True,
null=True,
help_text = "What papers have we written for this project?",
related_name="publications")
other_publications = models.ManyToManyField(Publication,
blank=True,
null=True,
help_text = "What key papers have others written about this project?",
related_name ="other_publications")
#these fields are automatically generated.
date_last_modified = models.DateField(auto_now=True)
date_added = models.DateField(auto_now_add=True)
def __unicode__(self):
'''The unicode representation for a :class:`~projects.models.Project` is its title'''
return self.title
@models.permalink
def get_absolute_url(self):
'''the permalink for a project detail page is **/projects/<title_slug>**'''
return ('project-details', [str(self.title_slug)])
def save(self, *args, **kwargs):
'''The title is slugified upon saving into title_slug.'''
if not self.id:
self.title_slug = slugify(self.title)
super(Project, self).save(*args, **kwargs)
class Meta:
'''The meta options for the :class:`projects.models.Project` model is ordering set by priority then secondarily by the date_last_modified.'''
ordering = ['priority','date_last_modified']
class Funding(models.Model):
'''This model covers sources of funding, including grants and fellowships.
The required field is the title'''
title = models.CharField(help_text="The title of the awarded grant",
max_length=200)
title_slug = models.SlugField(blank=True,
null=True,
editable=False,
max_length=150)
amount = models.IntegerField(help_text="The total value of the award",
blank=True,
null=True)
funding_agency = models.ForeignKey('FundingAgency',
help_ | text="What was the funding agency",
blank=True,
null=True)
start_date = models.DateField(help_text="The start date of this award",
blank=True,
null=True)
end_date = models.DateField(help_text="When this | award ends",
blank=True,
null=True)
summary = models.TextField(help_text="The abstract of the award",
blank=True,
null=True)
full_text = models.TextField(help_text="HTML Formatted full text",
blank=True,
null=True)
publications = models.ManyToManyField(Publication,
help_text="Which publications are associated with this award?",
blank=True,
null=True)
projects = models.ManyToManyField(Project,
help_text="Which projects are associated with this award?",
blank=True,
null=True)
active = models.BooleanField(help_text="Is this funding active?")
#these fields are automatically generated.
date_last_modified = models.DateField(auto_now=True)
date_added = models.DateField(auto_now_add=True)
def __unicode__(self):
'''The unicode representation for a :class:`~projects.models.Funding` is its title'''
return self.title
@models.permalink
def get_absolute_url(self):
'''the permalink for a funding detail page is **/funding/<title_slug>**'''
return ('funding-details', [str(self.title_slug)])
def save(self, *args, **kwargs):
'''The title is slugified upon saving into title_slug.'''
if not self.id:
self.title_slug = slugify(self.title)
super(Funding, self).save(*args, **kwargs)
class FundingAgency(models.Model):
'''This model describes the funding agency.
The required field for a funding agency is its name.'''
name = models.CharField(help_text="The name of the funding agency",
max_length=100)
short_name = models.CharField(help_text="A shortened name (ie NIH)",
max_length=10,
blank=True,
null=True)
website = models.URLField(help_text="The URL of the funding agency",
blank=True,
null=True)
logo = models.ImageField(upload_to='funding_agency/%Y/%m/%d',
help_text="A logo for this funding agency",
blank=True,
null=True)
def __unicode__(self):
'''The unicode representation for a :class:`~projects.models.FundingAgency` is its name'''
return self.name
|
EmadMokhtar/Django | tests/model_fields/test_textfield.py | Python | mit | 1,367 | 0.000735 | from unittest import skipIf
from django import forms
from django.db import connection, models
from django.test import TestCase
from .models import Post
class TextFieldTests(TestCase):
def test_max_length_passed | _to_formfield(self):
"""
TextField passes its max_length attribute to form fields created using
their formfield() method.
"""
tf1 = models.TextField()
tf2 = models.TextField(max_length=2345)
self.assertIsNone(tf1.formfield( | ).max_length)
self.assertEqual(2345, tf2.formfield().max_length)
def test_choices_generates_select_widget(self):
"""A TextField with choices uses a Select widget."""
f = models.TextField(choices=[('A', 'A'), ('B', 'B')])
self.assertIsInstance(f.formfield().widget, forms.Select)
def test_to_python(self):
"""TextField.to_python() should return a string."""
f = models.TextField()
self.assertEqual(f.to_python(1), '1')
def test_lookup_integer_in_textfield(self):
self.assertEqual(Post.objects.filter(body=24).count(), 0)
@skipIf(connection.vendor == 'mysql', 'Running on MySQL requires utf8mb4 encoding (#18392)')
def test_emoji(self):
p = Post.objects.create(title='Whatever', body='Smile 😀.')
p.refresh_from_db()
self.assertEqual(p.body, 'Smile 😀.')
|
rosix-ru/django-quickapi | quickapi/apps.py | Python | agpl-3.0 | 1,015 | 0 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Grigoriy Kramarenko <root@rosix.ru>
#
# This file is part of QuickAPI.
#
# QuickAPI is free software: you can redistribute it and/or
# modify it under the terms of the GNU Affero General | Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# QuickAPI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of t | he GNU Affero General Public
# License along with QuickAPI. If not, see
# <http://www.gnu.org/licenses/>.
#
from django.apps import AppConfig as BaseAppConfig
from django.utils.translation import ugettext_lazy as _
class AppConfig(BaseAppConfig):
name = 'quickapi'
verbose_name = _('Application Programming Interface')
|
certik/pyjamas | examples/libtest/I18N/__init__.py | Python | apache-2.0 | 680 | 0.008824 |
class I18N(object):
def example(self):
return "This is an example"
def another_example(self):
return "This is another example"
i18n = I18N()
locale = 'en'
domains = []
import domain
domains.append('domain')
import domain.subdomain
domains.append( | 'domain.subdomain')
def set_locale(loc):
global i18n
try:
path = "I18N.%s" % loc
c = __import__(path)
except ImportError, e:
print "Failed to import %s" % e
domains.sort()
for domain in domains:
try:
path = "I18N.%s.%s" % (domain, loc)
__import__(path)
except ImportError, e:
| print "Failed to import %s" % e
|
IOT-410c/IOT-DB410c-Course-3 | Modules/Module_6_Infrared_Sensors/Lesson_3_IR_Remote/IRRemote.py | Python | apache-2.0 | 3,447 | 0.015956 | from GPIOLibrary import GPIOProcessor
import time
GP = GPIOProcessor()
# GPIO Assignments
#Din = 27
#A1 = 34 Green
#A2 = 33 White
#A3 = 24 Black
#A4 = 26 Yellow
#PIR = 29
#Ind = 30
Din = GP.getPin27()
Din.input()
A1 = GP.getPin34()
A1.out()
A2 = GP.getPin33()
A2.out()
A3 = GP.getPin24()
A3.out()
A4 = GP.getPin26()
A4.out()
PIR = GP.getPin29()
PIR.out()
PIR.low()
Ind = GP.getPin30()
Ind.out()
Ind.low()
# Remote Average Pulse
M = 800
# Stepper Motor Delay
t = 0.001
# Stepper Motor Sequence (Forward / Reverse)
A = [[[0,1,0,1],[1,0,0,1],[1,0,1,0],[0,1,1,0]],
[[0,1,0,1],[0,1,1,0],[1,0,1,0],[1,0,0,1]]]
# Indicators
FR = 0
PIR_status = 0
# Number of clicks
n_PIR = 1
n_90 = 2
n_R90 = 3
n_180 = 4
try:
print 'Calibrate? [y/n]'
r = raw_input()
if r == 'y':
while True:
print 'Click button 5 times.'
counter = 0
time.sleep(0.2)
timeout = time.time() + 2.5
while True:
if Din.getValue() == 0:
counter += 1
if time.time() > timeout:
break;
M = counter/5
print M
print 'Retry? [y/n]'
r = raw_input()
if r == 'n':
break
while True:
read = 0
counter = 0
timeout = time.time() + 0.2
# Determine if read mode should be activated
while True:
if Din.getValue() == 0:
counter += 1
if counter > 0.2*M:
read = 1
if time.time() > timeout:
break;
# Enter read mode
if read == 1:
Ind.high()
x = 0
counter = 0
print 'Read:'
stop_time = time.time() + 2
while True:
if Din.getValue() == 0:
counter += 1
if time.time() > stop_time:
break
# Decide what was chosen
Ind.low()
time.sleep(0.5)
if counter < 0.5*M:
print 'No Input'
elif n_ | PIR*M - 0.5*M < counter < n_PIR*M + 0.5*M:
if PIR_status == 0:
PIR.high()
PIR_status = 1
print 'PIR on'
else:
| PIR.low()
PIR_status = 0
print 'PIR off'
elif n_90*M - 0.5*M < counter < n_90*M + 0.5*M:
FR = 0
x = int(90/1.8)
print '90'
elif n_R90*M - 0.5*M < counter < n_R90*M + 0.5*M:
FR = 1
x = int(90/1.8)
print '-90'
elif n_180*M - 0.5*M < counter < n_180*M + 0.5*M:
FR = 0
x = int(180/1.8)
print '180'
else:
clicks = counter/M
print counter
# Sequencing for Stepper Motor
for i in range(0,x):
A1.setValue(A[FR][i%4][0])
time.sleep(t)
A2.setValue(A[FR][i%4][1])
time.sleep(t)
A3.setValue(A[FR][i%4][2])
time.sleep(t)
A4.setValue(A[FR][i%4][3])
time.sleep(t)
finally:
GP.cleanup()
|
lmazuel/azure-sdk-for-python | azure-mgmt-machinelearningcompute/azure/mgmt/machinelearningcompute/models/__init__.py | Python | mit | 3,596 | 0.001112 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .resource import Resource
from .error_detail import ErrorDetail
from .error_response import ErrorResponse
from .error_response_wrapper import ErrorResponseWrapper, ErrorResponseWrapperException
from .storage_account_properties import StorageAccountProperties
from .container_registry_properties import ContainerRegistryProperties
from .service_principal_properties import ServicePrincipalProperti | es
from .kuber | netes_cluster_properties import KubernetesClusterProperties
from .system_service import SystemService
from .acs_cluster_properties import AcsClusterProperties
from .app_insights_properties import AppInsightsProperties
from .ssl_configuration import SslConfiguration
from .service_auth_configuration import ServiceAuthConfiguration
from .auto_scale_configuration import AutoScaleConfiguration
from .global_service_configuration import GlobalServiceConfiguration
from .operationalization_cluster import OperationalizationCluster
from .operationalization_cluster_update_parameters import OperationalizationClusterUpdateParameters
from .storage_account_credentials import StorageAccountCredentials
from .container_registry_credentials import ContainerRegistryCredentials
from .container_service_credentials import ContainerServiceCredentials
from .app_insights_credentials import AppInsightsCredentials
from .operationalization_cluster_credentials import OperationalizationClusterCredentials
from .check_system_services_updates_available_response import CheckSystemServicesUpdatesAvailableResponse
from .update_system_services_response import UpdateSystemServicesResponse
from .resource_operation_display import ResourceOperationDisplay
from .resource_operation import ResourceOperation
from .available_operations import AvailableOperations
from .operationalization_cluster_paged import OperationalizationClusterPaged
from .machine_learning_compute_management_client_enums import (
OperationStatus,
ClusterType,
OrchestratorType,
SystemServiceType,
AgentVMSizeTypes,
Status,
UpdatesAvailable,
)
__all__ = [
'Resource',
'ErrorDetail',
'ErrorResponse',
'ErrorResponseWrapper', 'ErrorResponseWrapperException',
'StorageAccountProperties',
'ContainerRegistryProperties',
'ServicePrincipalProperties',
'KubernetesClusterProperties',
'SystemService',
'AcsClusterProperties',
'AppInsightsProperties',
'SslConfiguration',
'ServiceAuthConfiguration',
'AutoScaleConfiguration',
'GlobalServiceConfiguration',
'OperationalizationCluster',
'OperationalizationClusterUpdateParameters',
'StorageAccountCredentials',
'ContainerRegistryCredentials',
'ContainerServiceCredentials',
'AppInsightsCredentials',
'OperationalizationClusterCredentials',
'CheckSystemServicesUpdatesAvailableResponse',
'UpdateSystemServicesResponse',
'ResourceOperationDisplay',
'ResourceOperation',
'AvailableOperations',
'OperationalizationClusterPaged',
'OperationStatus',
'ClusterType',
'OrchestratorType',
'SystemServiceType',
'AgentVMSizeTypes',
'Status',
'UpdatesAvailable',
]
|
parseendavid/Andela-Developer-Challenge---Shopping-List-V2.0 | tests/items_test.py | Python | mit | 2,665 | 0.002251 | """TESTS FOR SHOPPING LIST ITEMS"""
import unittest
from datetime import date
from app import shopping_lists_items
class TestCasesItems(unittest.TestCase):
"""TESTS FOR ITEMS CREATION AND BEHAVIOUR"""
def setUp(self):
self.item = shopping_lists_items.ShoppingListItems()
def tearDown(self):
del self.item
def test_sucessful_add_item(self):
"""CHECKS WHETHER AND ITEM CAN BE ADDED SUCESSFULLY"""
msg = self.item.add(
"Party", "Whisky", "dalton@yahoo.com")
self.assertEqual(
msg, [{'user': 'dalton@yahoo.com',
'list': 'Party',
'name': 'Whisky',
'number': 1,
'date':str(date.today())}])
def test_invalid_characters(self):
"""TESTS IF CODE ACCEPTS INVALID CHARACTERS"""
msg = self.item.add(
"Party", "Whisky!", "dalton@yahoo.com")
self.assertEqual(msg, "Invalid characters")
def test_sucess_edit_item(self):
""""CHECKS FOR SUCESSFUL ITEM EDITING"""
self.item.list_of_shopping_list_items = [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Booze'}]
msg = self.item.edit('Soda', 'Booze', 'Adventure', "dalton@yahoo.com")
self.assertEqual(msg, [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Soda'}])
def test_edit_existing_item(self):
"""Check if edit n | ame provided is similar to an existing item
"""
self.item.list_of_shopping_list_items = [{'user': 'dalton@yahoo.com',
'list': 'Adventure',
'name': 'Snacks'},
{'user': 'dalton@yahoo.com',
| 'list': 'Adventure',
'name': 'Booze'}]
msg = self.item.edit(
'Snacks', 'Booze', 'Adventure', "dalton@yahoo.com")
self.assertEqual(msg, "Name already used on another item")
if __name__ == '__main__':
unittest.main()
|
alexhayes/django-async-test | django_async_test/tests/testapp/settings.py | Python | mit | 695 | 0 | # -*- coding: utf-8 -*-
"""
module.name
~~~~~~~~~~~~~~~
Preamble...
"""
from __future__ import absolute_import, print_function, unicode_literals
import json
# TEST SETTINGS
import random
TEST_RUNNER = 'django.test. | runner.DiscoverRunner'
# Django replaces this, but it still wants it. *shrugs*
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
INSTALLED_APPS = (
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.admin',
'django.contrib.sessions',
'django_async_test.tests.testapp',
)
MIDDLEWARE_CLASSES = {}
SE | CRET_KEY = '53cr3773rc3553cr3773rc3553cr3773rc3553cr3773rc35'
|
cbertinato/pandas | pandas/tests/io/msgpack/test_extension.py | Python | bsd-3-clause | 2,167 | 0 | import array
import pandas.io.msgpack as msgpack
from pandas.io.msgpack import ExtType
from .common import frombytes, tobytes
def test_pack_ext_type():
def p(s):
packer = msgpack.Packer()
packer.pack_ext_type(0x42, s)
return packer.bytes()
assert p(b'A') == b'\xd4\x42A' # fixext 1
assert p(b'AB') == b'\xd5\x42AB' # fixext 2
assert p(b'ABCD') == b'\xd6\x42ABCD' # fixext 4
assert p(b'ABCDEFGH') == b'\xd7\x42ABCDEFGH' # fixext 8
assert p(b'A' * 16) == b'\xd8\x42' + b'A' * 16 # fixext 16
assert p(b'ABC') == b'\xc7\x03\x42ABC' # ext 8
assert p(b'A' * 0x0123) == b'\xc8\x01\x23\x42' + b'A' * 0x0123 # ext 16
assert (p(b'A' * 0x00012345) ==
b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345) # ext 32
def test_unpack_ext_type():
def check(b, expected):
assert msgpack.unpackb(b) == expected
check(b'\xd4\x42A', ExtType(0x42, b'A')) # fixext 1
check(b'\xd5\x42AB', ExtType(0x42, b'AB')) # fixext 2
check(b'\xd6\x42ABCD', ExtType(0x42, b'ABCD')) # fixext 4
check(b'\xd7\x42ABCDEFGH', ExtType(0x42, b'ABCDEFGH')) # fixext 8
check(b'\xd8\x42' + b'A' * 16, ExtType(0x42, b'A' * 16)) # fixext 16
check(b'\xc7\x03\x42ABC', ExtType(0x42, b'ABC')) # ext 8
check(b'\xc8\x01\x23\x42' + b'A' * 0x0123,
ExtType(0x42, b'A' * 0x0123)) # ext 16
check(b'\xc9\x00\x01\x23\x45\x42' + b'A' * 0x00012345,
ExtTy | pe(0x42, b'A' * 0x00012345)) # ext 32
def test_extension_type():
def default(obj):
print('default called', obj)
if isinstance(obj, array.array):
typecode = 123 # application specific typecode
data = tobytes(obj)
return ExtType(typecode, data)
raise TypeError("Unknown type object %r" % | (obj, ))
def ext_hook(code, data):
print('ext_hook called', code, data)
assert code == 123
obj = array.array('d')
frombytes(obj, data)
return obj
obj = [42, b'hello', array.array('d', [1.1, 2.2, 3.3])]
s = msgpack.packb(obj, default=default)
obj2 = msgpack.unpackb(s, ext_hook=ext_hook)
assert obj == obj2
|
Bytewerk/mpdsync | mpdsync.py | Python | gpl-3.0 | 2,455 | 0.029735 | from mpd import MPDClient
from time import sleep
import RPi.GPIO as GPIO
currentSong = None
bingoMPD = None
selfMPD = None
def setupGPIO():
GPIO.setmode(GPIO.BOARD)
GPIO.setup(7, GPIO.IN)
def fadeOver(newSong):
global selfMPD
global bingoMPD
oldVol = int(selfMPD.status()["volume"])
#fade out
for vol in range(oldVol, 1, -1):
selfMPD.setvol(vol)
sleep(0.05)
selfMPD.clear()
selfMPD.addid(newSong, 0)
selfMPD.play(0)
bingoMPDPlayTime = bingoMPD.status()["time"].split(":")[0]
selfMPD.seekcur(bingoMPDPlayTime)
#fade in
for vol in range(0, oldVol, 1):
selfMPD.setvol(vol)
sleep(0.05)
def connect():
print("Connecting...")
global bingoMPD
global selfMPD
bingoMPD = MPDClient()
bingoMPD.connect("music.bingo", 6600)
selfMPD = MPDClient()
selfMPD.connect("localhost", 6600)
print("Connected!")
def updateSong(newSong):
global bingoMPD
global selfMPD
try:
selfMPD.delete(0)
except:
pass
selfMPD.addid(newSong, 0)
selfMPD.play(0)
def close():
try:
bingoMPD.close()
bingoMPD.disconnect()
selfMPD.close()
selfMPD.disconnect()
except:
print("got an exception in close()")
pass
print("Setting up GPIOs")
setupGPIO()
connect()
print("Fetching remote song")
currentSong = bingoMPD.currentsong().get("file")
oldSong = currentSong
print("Clearing old playlist")
selfMPD.clear()
print("Playing remote song: " + currentSong)
if len(currentSong) > 0:
updateSong(currentSong)
print("Sync playtime to remote MPD")
bingoMPDPlayTime = bingoMPD.status()["time"].split(":")[0]
selfMPD.seekcur(bingoMPDPlayTime)
currentSwitchState = GPIO.input(7) # True == Sync
oldSwitchState = currentSwitchState
while True:
# If the song has changed, add it to position 0 and play it
if currentSong != oldSong and currentSwitchState == 1:
if | len(currentSong) > 0 | :
print("Remote song changed, syncing...")
updateSong(currentSong)
else:
print("Remote song is empty, skipping...")
oldSong = currentSong
# If the switch gets toggled from "custom play mode" to "sync mode" then fade over to the current playing song
if (currentSwitchState != oldSwitchState):
if (currentSwitchState == 1):
print("Switch changed to sync mode, going to fade now...")
fadeOver(currentSong)
oldSwitchState = currentSwitchState
else:
print("Switch changed to manual mode, good luck!")
oldSwitchState = currentSwitchState
currentSong = bingoMPD.currentsong().get("file")
currentSwitchState = GPIO.input(7)
close()
|
SubhasisDutta/subhasisdutta.com | src/controller/HomeContoller.py | Python | mit | 644 | 0.018634 | import webapp2
import os
from google.appengine.ext.webapp import template
from src.model.WorkModels import Work
class HomePage(webapp2.RequestHandler):
def get(self):
self.response.headers["Content-Type | "]="text/html"
publishedWork=Wor | k.gql("WHERE publish=True ORDER BY order ")
template_values = {
'pageTitle':"Subhasis Dutta - Profile",
'works':publishedWork
}
path=os.path.join(os.path.dirname(__file__),'../../template/index.html')
page=template.render(path,template_values)
self.response.out.write(page)
|
izapolsk/integration_tests | cfme/intelligence/reports/schedules.py | Python | gpl-2.0 | 12,942 | 0.001854 | """Module handling schedules"""
import attr
from navmazing import NavigateToAttribute
from navmazing import NavigateToSibling
from widgetastic.exceptions import NoSuchElementException
from widgetastic.widget import Checkbox
from widgetastic.widget import Text
from widgetastic.widget import TextInput
from widgetastic.widget import View
from widgetastic_patternfly import BootstrapSelect
from widgetastic_patternfly import Button
from widgetastic_patternfly import DatePicker
from widgetastic_patternfly import FlashMessages
from cfme.intelligence.reports import CloudIntelReportsView
from cfme.modeling.base import BaseCollection
from cfme.modeling.base import BaseEntity
from cfme.utils.appliance.implementations.ui import CFMENavigateStep
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.appliance.implementations.ui import navigator
from cfme.utils.log import logger
from cfme.utils.pretty import Pretty
from cfme.utils.update import Updateable
from widgetastic_manageiq import AlertEmail
from widgetastic_manageiq import PaginationPane
from widgetastic_manageiq import SummaryForm
from widgetastic_manageiq import Table
class VolatileBootstrapSelect(BootstrapSelect):
def fill(self, items):
try:
super(VolatileBootstrapSelect, self).fill(items)
except NoSuchElementException:
logger.warning(
"fill() operation was successful, but no options are left in BootstrapSelect to"
" display/select hence the widget has disappeared. Returning True."
)
return True
class SchedulesAllView(CloudIntelReportsView):
title = Text("#explorer_title_text")
schedules_table = Table(".//div[@id='records_div' or @id='main_div']//table")
paginator = PaginationPane()
@property
def is_displayed(self):
return self.in_intel_reports and self.title.text == "All Schedules"
# TODO debug the closing widget behavior
class BootstrapSelectRetry(BootstrapSelect):
"""Workaround for schedule filter widget that is closing itself
Retrys the open action
"""
def open(self):
super(BootstrapSelectRetry, self).open()
if not self.is_open:
super(BootstrapSelectRetry, self).open()
class SchedulesFormCommon(CloudIntelReportsView):
flash = FlashMessages('.//div[@id="flash_msg_div"]')
# Basic Information
title = Text("#explorer_title_text")
name = TextInput(name="name")
description = TextInput(name="description")
active = Checkbox("enabled")
# Buttons
cancel_button = Button("Cancel")
@View.nested
class timer(View): # noqa
run = BootstrapSelect("timer_typ")
# Adding timer for hour, day, week, and zone because there is no single element
# for the timer_interval.
timer_hour = BootstrapSelect("timer_hours")
timer_day = BootstrapSelect("timer_days")
timer_month = BootstrapSelect("timer_months")
timer_week = BootstrapSelect("timer_weeks")
time_zone = BootstrapSelect("time_zone")
starting_date = DatePicker("miq_date_1")
hour = BootstrapSelect("start_hour")
minute = BootstrapSelect("start_min")
@View.nested
class report_filter(View): # noqa
# Report Selection
filter_type = BootstrapSelectRetry("filter_typ")
subfilter_type = BootstrapSelectRetry("subfilter_typ")
report_type = BootstrapSelectRetry("repfilter_typ")
@View.nested
class email(View): # noqa
# Email
emails_send = Checkbox("send_email_cb")
from_email = TextInput(name="from")
to_emails = AlertEmail()
user_email = VolatileBootstrapSelect("user_email")
@View.nested
class email_options(View): # noqa
# Email Options
send_if_empty = Checkbox("send_if_empty")
send_txt = Checkbox("send_txt")
send_csv = Checkbox("send_csv")
send_pdf = Checkbox("send_pdf")
class NewScheduleView(SchedulesFormCommon):
add_button = Button("Add")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == "Adding a new Schedule" and
self.schedules.is_opened and
self.schedules.tree.currently_selected == ["All Schedules"]
)
class EditScheduleView(SchedulesFormCommon):
save_button = Button("Save")
reset_button = Button("Reset")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Editing Schedule "{}"'.format(self.context["object"].name) and
self.s | chedules.is_opened and
self.s | chedules.tree.currently_selected == ["All Schedules", self.context["object"].name]
)
class ScheduleDetailsView(CloudIntelReportsView):
title = Text("#explorer_title_text")
schedule_info = SummaryForm("Schedule Info")
@property
def is_displayed(self):
return (
self.in_intel_reports and
self.title.text == 'Schedule "{}"'.format(self.context["object"].name) and
self.schedules.is_opened and
self.schedules.tree.currently_selected == ["All Schedules", self.context["object"].name]
)
@attr.s
class Schedule(Updateable, Pretty, BaseEntity):
"""Represents a schedule in Cloud Intel/Reports/Schedules.
Args:
name (str): Schedule name.
description (str): Schedule description.
report_filter (dict): Contains filter_type, subfilter_type and report_type.
active (bool): Whether is this schedule active.
timer (dict): Specifies how often this schedule runs. Contains
run, run_interval(timer_hour, timer_day, timer_week, timer_month),
starting_date, timezone, hour and minute
email (dict): Contains to_email and from_email(list). If specified, turns on e-mail sending
email_options (dict): Contains send_if_empty, send_csv, send_txt, send_pdf.
"""
pretty_attrs = ["name", "report_filter"]
def __str__(self):
return self.name
name = attr.ib()
description = attr.ib()
report_filter = attr.ib()
active = attr.ib(default=None)
timer = attr.ib(default=None)
email = attr.ib(default=None)
email_options = attr.ib(default=None)
def update(self, updates):
view = navigate_to(self, "Edit")
changed = view.fill(updates)
if changed:
view.save_button.click()
else:
view.cancel_button.click()
view.flash.assert_no_error()
# using `wait` kwarg to trigger is_displayed check for the required view
self.create_view(ScheduleDetailsView, override=updates, wait="10s")
def delete(self, cancel=False):
view = navigate_to(self, "Details")
view.configuration.item_select("Delete this Schedule", handle_alert=not cancel)
if cancel:
assert view.is_displayed
else:
view = self.create_view(SchedulesAllView)
assert view.is_displayed
view.flash.assert_no_error()
def queue(self):
"""Queue this schedule."""
view = navigate_to(self, "Details")
view.configuration.item_select("Queue up this Schedule to run now")
@property
def enabled(self):
view = navigate_to(self.parent, "All")
for item in view.schedules_table.read():
if item['Name'] == self.name:
return item['Active'] == 'True'
@property
def fill_dict(self):
return ({
"name": self.name,
"description": self.description,
"active": self.active,
"report_filter": self.report_filter,
"timer": self.timer,
"email": self.email,
"email_options": self.email_options,
})
@attr.s
class ScheduleCollection(BaseCollection):
ENTITY = Schedule
def create(
self,
name,
description,
report_filter,
active=True,
timer=None,
email=None,
email_options=None,
cancel=False,
):
if email:
|
rcosnita/fantastico | fantastico/rendering/tests/itest_component_reusage.py | Python | mit | 3,406 | 0.004698 | '''
Copyright 2013 Cosnita Radu Viorel
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
.. codeauthor:: Radu Viorel Cosnita <radu.cosnita@gmail.com>
.. py:module:: fantastico.rendering.tests.itest_component_reusage
'''
from fantastico.server.tests.itest_dev_server import DevServerIntegration
from urllib.request import Request
import urllib
class ComponentReusageIntegration(DevServerIntegration):
'''This class provides the integration tests for ensuring component reusage works as expected within the framework.'''
_response = None
def init(self):
'''This method is invoked automatically before each test case.'''
self._response = None
def test_component_html_rendering_ok(self):
'''This test case ensures that a url which reuses components internally the result is retrieved correctly.'''
endpoint = "/mvc/reuse-component"
def retrieve_menu_items(server):
request = Request(self._get_server_base_url(server, endpoint))
self._response = urllib.request.urlopen(request)
def assert_ok(server):
self.assertIsNotNone(self._response)
self.assertEqual(200, self._response.getcode())
self.assertEqual("text/html; charset=UTF-8", self._response.info()["Content-Type"])
body = self._response.read().decode()
self.assertTrue(body.find("'inner_message': {'message': 'inner_message'}") > -1)
self.assertTrue(body.find("'message': 'Hello world'") > -1)
self._run_test_against_dev_server(retrieve_menu_items, assert_ok)
def test_component_remote_model_local_view_rendering(self):
'''This test case covers the scenario where a remote model is plugged into a local view.'''
endpoint = "/simple-component/foreign-component-reusage"
def retrieve_menu_items(server):
request = Request(self._get_server_base_url(server, endpoint))
self._response = urllib.request.urlopen(req | uest)
def assert_ok(server):
self.assertIsNotNone(self._response)
self.assertEqual(200, self._response.getcode())
self.assertEqual("text/html; charset=UTF-8", self._response.info()["Content-Type"])
body = self._response.read().decode()
self.assertTrue(body.find("Hello inner_message") > -1)
self._run_test_against_dev_se | rver(retrieve_menu_items, assert_ok)
|
citrix-openstack-build/keystone | keystone/openstack/common/loopingcall.py | Python | apache-2.0 | 4,679 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# Copyright 2011 Justin Santa Barbara
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from eventlet import event
from eventlet import greenthread
from keystone.openstack.common.gettextutils import _ # noqa
from keystone.openstack.common import log as logging
from keystone.openstack.common import timeutils
LOG = logging.getLogger(__name__)
class LoopingCallDone(Exception):
"""Exception to break out and stop a LoopingCall.
The poll-function passed to LoopingCall can raise this exception to
break out of the loop normally. This is somewhat analogous to
StopIteration.
An optional return-value can be included as the argument to the exception;
this return-value will be returned by LoopingCall.wait()
"""
def __init__(self, retvalue=True):
""":param retvalue: Value that LoopingCall.wait() should return."""
self.retvalue = retvalue
class LoopingCallBase(object):
def __init__(self, f=None, *args, **kw):
self.args = args
self.kw = kw
self.f = f
self._running = False
self.done = None
def stop(self):
self._running = False
def wait(self):
return self.done.wait()
class FixedIntervalLoopingCall(LoopingCallBase):
"""A fixed interval looping call."""
def start(self, interval, initial_delay=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
start = timeutils.utcnow()
self.f(*self.args, **self.kw)
end = timeutils.utcnow()
if not self._running:
break
delay = interval - timeutils.delta_seconds(start, end)
if delay <= 0:
LOG.warn(_('task run outlasted interval by %s sec') %
-delay)
greenthread.sleep(delay if delay > 0 else 0)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in fixed duration looping call'))
done.send_exception(*sys.exc_info())
return
else:
done.send(True)
self.done = done
greenthread.spawn_n(_inner)
return self.done
# TODO(mikal): this class name is deprecated in Havana and should be removed
# in the I release
LoopingCall = FixedIntervalLoopingCall
class DynamicLoopingCall(LoopingCallBase):
"""A looping call which sleeps until the next known event.
The function called should return how long to sleep for before being
called again.
"""
def start(self, initial_delay=None, periodic_interval_max=None):
self._running = True
done = event.Event()
def _inner():
if initial_delay:
greenthread.sleep(initial_delay)
try:
while self._running:
idle = self.f(*self.args, **self.kw)
if not self._running:
break
if periodic_interval_max is not None:
idle = min(idle, periodic_interval_max)
LOG.debug(_('Dynamic looping call sleeping for %.02f '
'seconds'), idle)
greenthread.sleep(idle)
except LoopingCallDone as e:
self.stop()
done.send(e.retvalue)
except Exception:
LOG.exception(_('in dynamic looping call'))
done.send_exception(*sys.exc_info())
| return
else:
| done.send(True)
self.done = done
greenthread.spawn(_inner)
return self.done
|
bluestemscott/librarygadget | librarygadget/librarybot/findlibraries/bing_finder.py | Python | mit | 1,859 | 0.003228 | import sys
import urllib
import json
import requests
class BingApiResult():
def __init__(self, json_result):
self.response = json.loads(json_result)['SearchResponse']['Web']
self.total = self.response['Total']
def find_libraries(self):
libraries = []
return self.response['Results']
def eof(self):
try:
self.find_libraries()
return False
except KeyError as e:
return True
def bing_search(search_terms, base_url, offset):
base_url = base_url + "&Query=" + urllib.quote_plus(search_terms)
url = base_url + "&Web.Count=50&Web.Offset=" + str(offset)
print str(offset) + " " + url
r = requests.get(url)
search_result = BingApiResult(r.content)
if search_result.eof():
print "EOF"
return []
libraries = search_result.find_libraries()
print 'Total results ' + str(search_result.total)
print 'Current results ' + str(len(libraries))
return libraries
def main():
# for example, to search Bing for Horizon libraries: "inanchor:ipac20 account"
search_terms = "inanchor:ipac20 -site:si.edu"
if len(sys.argv) > 1:
search_terms = sys.argv[1]
base_url = "http://api.bing.net/json.aspx?AppId=91650C54158D791BE8B89E229B2190C53C83ABE | 8&Sources=Web&Version=2.0&Market=en-us&Adult=Moderate&Web.Options=DisableQueryAlterations"
offset = 0
libraries = []
new_libraries = bing_search(search_terms, base_url, offset)
| while len(new_libraries) != 0:
libraries.extend(new_libraries)
offset += len(new_libraries)
new_libraries = bing_search(search_terms, base_url, offset)
for library in libraries:
print library['Title'] + ',' + library['Url']
if __name__ == '__main__':
main() |
edisonlz/fruit | web_project/base/site-packages/docutils/parsers/rst/__init__.py | Python | apache-2.0 | 14,192 | 0.000141 | # $Id: __init__.py 6141 2009-09-25 18:50:30Z milde $
# Author: David Goodger <goodger@python.org>
# Copyright: This module has been placed in the public domain.
"""
This is ``docutils.parsers.rst`` package. It exports a single class, `Parser`,
the reStructuredText parser.
Usage
=====
1. Create a parser::
parser = docutils.parsers.rst.Parser()
Several optional arguments may be passed to modify the parser's behavior.
Please see `Customizing the Parser`_ below for details.
2. Gather input (a multi-line string), by reading a file or the standard
input::
input = sys.stdin.read()
3. Create a new empty `docutils.nodes.document` tree::
document = docutils.utils.new_document(source, settings)
See `docutils.utils.new_document()` for parameter details.
4. Run the parser, populating the document tree::
parser.parse(input, document)
Parser Overview
===============
The reStructuredText parser is implemented as a state machine, examining its
input one line at a time. To understand how the parser works, please first
become familiar with the `docutils.statemachine` module, then see the
`states` module.
Customizing the Parser
----------------------
Anything that isn't already customizable is that way simply because that type
of customizability hasn't been implemented yet. Patches welcome!
When instantiating an object of the `Parser` class, two parameters may be
passed: ``rfc2822`` and ``inliner``. Pass ``rfc2822=1`` to enable an initial
RFC-2822 style header block, parsed as a "field_list" element (with "class"
attribute set to "rfc2822"). Currently this is the only body-level element
which is customizable without subclassing. (Tip: subclass `Parser` and change
its "state_classes" and "initial_state" attributes to refer to new classes.
Contact the author if you need more details.)
The ``inliner`` parameter takes an instance of `states.Inliner` or a subclass.
It handles inline markup recognition. A common extension is the addition of
further implicit hyperlinks, like "RFC 2822". This can be done by subclassing
`states.Inliner`, adding a new method for the implicit markup, and adding a
``(pattern, method)`` pair to the "implicit_dispatch" attribute of the
subclass. See `states.Inliner.implicit_inline()` for details. Explicit
inline markup can be customized in a `states.Inliner` subclass via the
``patterns.initial`` and ``dispatch`` attributes (and new methods as
appropriate).
"""
__docformat__ = 'reStructuredText'
import docutils.parsers
import docutils.statemachine
from docutils.parsers.rst import states
from docutils import frontend, nodes
class Parser(docutils.parsers.Parser):
"""The reStructuredText parser."""
supported = ('restructuredtext', 'rst', 'rest', 'restx', 'rtxt', 'rstx')
"""Aliases this parser supports."""
settings_spec = (
'reStructuredText Parser Options',
None,
(('Recognize and link to standalone PEP references (like "PEP 258").',
['--pep-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for PEP references '
'(default "http://www.python.org/dev/peps/").',
['--pep-base-url'],
{'metavar': '<URL>', 'default': 'http://www.python.org/dev/peps/',
'validator': frontend.validate_url_trailing_slash}),
('Template for PEP file part of URL. (default "pep-%04d")',
['--pep-file-url-template'],
{'metavar': '<URL>', 'default': 'pep-%04d'}),
('Recognize and link to standalone RFC references (like "RFC 822").',
['--rfc-references'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Base URL for RFC references (default "http://www.faqs.org/rfcs/").',
['--rfc-base-url'],
{'metavar': '<URL>', 'default': 'http://www.faqs.org/rfcs/',
'validator': frontend.validate_url_trailing_slash}),
('Set number of spaces for tab expansion (default 8).',
['--tab-width'],
{'metavar': '<width>', 'type': 'int', 'default': 8,
'validator': frontend.validate_nonnegative_int}),
('Remove spaces before footnote references.',
['--trim-footnote-reference-space'],
{'action': 'store_true', 'validator': frontend.validate_boolean}),
('Leave spaces before footnote references.',
['--leave-footnote-reference-space'],
{'action': 'store_false', 'dest': 'trim_footnote_reference_space'}),
('Disable directives that insert the contents of external file '
'("include" & "raw"); replaced with a "warning" system message.',
['--no-file-insertion'],
{'action': 'store_false', 'default': 1,
'dest': 'file_insertion_enabled',
'validator': frontend.validate_boolean}),
('Enable directives that insert the contents of external file '
'("include" & "raw"). Enabled by default.',
['--file-insertion-enabled'],
{'action': 'store_true'}),
('Disable the "raw" directives; replaced with a "warning" '
'system message.',
['--no-raw'],
{'action': 'store_false', 'default': 1, 'dest': 'raw_enabled',
'validator': frontend.validate_boolean}),
('Enable the "raw" directive. Enabled by default.',
['--raw-enabled'],
{'action': 'store_true'}),))
config_section = 'restructuredtext parser'
config_section_dependencies = ('parsers',)
def __init__(self, rfc2822=None, inliner=None):
if rfc2822:
self.initial_state = 'RFC2822Body'
else:
self.initial_state = 'Body'
self.state_classes = states.state_classes
self.inliner = inliner
def parse(self, inputstring, document):
"""Parse `inputstring` and populate `document`, a document tree."""
self.setup_parse(inputstring, document)
self.statemachine = states.RSTStateMachine(
state_classes=self.state_classes,
initial_state=self.initial_state,
debug=document.reporter.debug_flag)
inputlines = docutils.statemachine.string2lines(
inputstring, tab_width=document.settings.tab_width,
convert_whitespace=1)
self.statemachine.run(inputlines, document, inliner=self.inliner)
self.finish_parse()
class DirectiveError(Exception):
"""
Store a message and a system message level.
To be thrown from inside directive code.
Do not instantiate directly -- use `Directive.directive_error()`
instead!
"""
def __init__(self, level, message, source, line):
"""
Initialize with message `message`. `level` is a system message level.
"""
Exception.__init__(self)
self.level = level
self.msg = message
self.source = source
self.line = line
class Directive(object):
"""
Base class for reStructuredText directives.
The following attributes may be set by subclasses. They are
interpreted by the directive parser (which runs the directive
class):
- `required_arguments`: The number of required arguments (default:
0).
- `optional_arguments`: The number of optional arguments (default:
0).
- `final_argument_whitespace`: A boolean, indicating if the final
argument may contain whitespace (default: False).
- `option_spec`: A dictionary, mapping known option names to
conversion functions such as `int` or `float` (default: {}, no
options). Several conversion functions are defined in the
directives/__init__.py module.
Option conversion functions take | a single parameter, the option
argument (a string or ``None``), validate it and/or convert it
to | the appropriate form. Conversion functions may raise
`ValueError` and `TypeError` exceptions.
- `has_content`: A boolean; True if content is allowed. Client
code must handle the case where content is required but not
supplied (an empty content list will be supplied).
Argume |
calben/mcgillbas | mcgillbas/cmdline.py | Python | mit | 1,148 | 0.003484 | #!/usr/bin/env python
# Copyright (c) 2014 Calem J Bendell <calemjbendell@gmail.com>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyrigh | t notice and this permission notice shall be included in all
# copies or substantia | l portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
|
brabemi/sw_config_backup | sw_config_backup.py | Python | gpl-2.0 | 5,888 | 0.031929 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# vim:set sw=4 ts=4 et:
import sys
import pexpect
import threading
import os
import shutil
import logging
import time
import configparser
import ast
import subprocess
def backup(switch, server):
if switch['type'].lower() == '3com':
return backup_3com(switch, server)
elif switch['type'].lower() == 'hp':
return backup_hp(switch, server)
else:
logging.error("Unsupported type of switch (type: %s)" % (switch['type']))
return 4
def backup_3com(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error("Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('login')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup fabric current-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def backup_hp(switch, server):
try:
ssh=pexpect.spawn('ssh -o StrictHostKeyChecking=no %s@%s' % (switch['username'], switch['ip']))
logging.debug('%s: connecting to ip: %s' % (switch['name'], switch['ip']))
ssh.expect('password')
except:
logging.error(" | Connection failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 1
try:
ssh.sendline('%s' % switch['password'])
logging.debug('%s: authenticating username: %s' % (switch['name'], switch['username']))
ssh.expect('>')
except:
logging.error("Authorization failed(%s)\n \tusername: %s" % (switch['name'], switch['username']))
return 2
try:
ssh.sendline("backup startu | p-configuration to %s %s.cfg" % (server, switch['name']))
logging.debug('%s: backuping to server: %s' % (switch['name'], server))
ssh.expect('finished!\s+<.*>',timeout=30)
ssh.sendline('quit')
except:
logging.error("Backup failed(%s)\n \t%s" % (switch['name'], ssh.before))
return 3
logging.info("Configuration from %s uploaded to tftp server %s" % (switch['name'], server))
return 0
def sws_cfg_check(sws_cfg):
keys = {'username', 'password', 'name', 'ip', 'units', 'type'}
for section in sws_cfg:
for key in keys:
if not key in sws_cfg[section]:
raise Exception("Key \'%s\' in switches configuration in section \'%s\' is missing" % (key, section))
def load_switches_cfg():
sws_cfg = configparser.ConfigParser()
sws_cfg.read("%s/conf/switches.cfg" % (sys.path[0]))
retval = dict()
for section in sws_cfg.sections():
retval[section] = dict(sws_cfg.items(section))
sws_cfg_check(retval)
return retval
def app_cfg_check(app_cfg):
keys = {'backup_dir_path', 'backup_server', 'file_expiration_timeout', 'tftp_dir_path', 'log_file', 'git_autocommit'}
for key in keys:
if not key in app_cfg:
raise Exception("Key \'%s\' in application configuration file is missing" % (key))
def load_app_cfg():
app_cfg = configparser.ConfigParser()
app_cfg.read("%s/conf/app.cfg" % (sys.path[0]))
retval = dict(app_cfg.items('APP'))
app_cfg_check(retval)
retval['git_autocommit'] = retval['git_autocommit'].lower() in ['true', '1', 'yes', 'y']
return retval
def git_autocommit(app_cfg):
command = "cd %s; git add -A; git commit -a -m 'autocommit on change'" % (app_cfg['backup_dir_path'])
subprocess.Popen(command,stdout=subprocess.PIPE, shell=True)
def main():
app_cfg = load_app_cfg()
logging.basicConfig(filename=app_cfg['log_file'], level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')
switches_cfg = load_switches_cfg()
threads = []
for switch in switches_cfg:
t = threading.Thread(target = backup, args = (switches_cfg[switch], app_cfg['backup_server']))
t.start()
threads.append(t)
for t in threads:
t.join()
end_time = time.time()
file_expiration_timeout = int(app_cfg['file_expiration_timeout'])
for section in switches_cfg:
switch = switches_cfg[section]
if switch['type'].lower() == '3com':
units = ast.literal_eval(switch['units'])
for unit in units:
tmp_file_path = "%s/%s_%d.cfg" % (app_cfg['tftp_dir_path'],switch['name'],unit)
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s unit %d, expected file %s" % (switch['name'],unit,tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s unit %d, file %s is older than %d s, file will be ignored" % (switch['name'],unit,tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s unit %d configuration" % (switch['name'],unit))
elif switch['type'].lower() == 'hp':
tmp_file_path = "%s/%s.cfg" % (app_cfg['tftp_dir_path'],switch['name'])
if not os.access(tmp_file_path, os.R_OK):
logging.warning("Fail to read %s, expected file %s" % (switch['name'],tmp_file_path))
elif (end_time - os.stat(tmp_file_path).st_mtime) > file_expiration_timeout:
logging.error("Configuration of %s, file %s is older than %d s, file will be ignored" % (switch['name'],tmp_file_path, file_expiration_timeout))
else:
shutil.copy2(tmp_file_path, app_cfg['backup_dir_path'])
logging.info("Saved %s configuration" % (switch['name']))
if app_cfg['git_autocommit'] is True:
git_autocommit(app_cfg)
return 0
if __name__ == '__main__':
main()
|
pford68/nupic.research | sensorimotor/sensorimotor/sensorimotor_experiment_runner.py | Python | gpl-3.0 | 9,403 | 0.006594 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2014, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import time
import numpy
from nupic.bindings.math import GetNTAReal
from nupic.research.monitor_mixin.monitor_mixin_base import MonitorMixinBase
from nupic.research.monitor_mixin.temporal_memory_monitor_mixin import (
TemporalMemoryMonitorMixin)
from sensorimotor.fast_general_temporal_memory import (
FastGeneralTemporalMemory as GeneralTemporalMemory)
# Uncomment the line below to use GeneralTemporalMemory
# from sensorimotor.general_temporal_memory import GeneralTemporalMemory
from sensorimotor.temporal_pooler import TemporalPooler
# Uncomment the line below to use SpatialTemporalPooler
# from sensorimotor.spatial_temporal_pooler import SpatialTemporalPooler as TemporalPooler
from sensorimotor.temporal_pooler_monitor_mixin import (
TemporalPoolerMonitorM | ixin)
class MonitoredGeneralTemporalMemory(TemporalMemoryMonitorMixin,
GeneralTemporalMemory): pass
class MonitoredTemporalPooler(TemporalPoolerMonitorMixin, TemporalPooler): pas | s
"""
Experiment runner class for running networks with layer 4 and layer 3. The
client is responsible for setting up universes, agents, and worlds. This
class just sets up and runs the HTM learning algorithms.
"""
realDType = GetNTAReal()
class SensorimotorExperimentRunner(object):
DEFAULT_TM_PARAMS = {
# These should be decent for most experiments, shouldn't need to override
# these too often. Might want to increase cellsPerColumn for capacity
# experiments.
"cellsPerColumn": 8,
"initialPermanence": 0.5,
"connectedPermanence": 0.6,
"permanenceIncrement": 0.1,
"permanenceDecrement": 0.02,
# We will force client to override these
"columnDimensions": "Sorry",
"minThreshold": "Sorry",
"maxNewSynapseCount": "Sorry",
"activationThreshold": "Sorry",
}
DEFAULT_TP_PARAMS = {
# Need to check these parameters and find stable values that will be
# consistent across most experiments.
"synPermInactiveDec": 0, # TODO: Check we can use class default here.
"synPermActiveInc": 0.001, # TODO: Check we can use class default here.
"synPredictedInc": 0.5, # TODO: Why so high??
"potentialPct": 0.9, # TODO: need to check impact of this for pooling
"initConnectedPct": 0.5, # TODO: need to check impact of this for pooling
"poolingThreshUnpredicted": 0.0,
# We will force client to override these
"numActiveColumnsPerInhArea": "Sorry",
}
def __init__(self, tmOverrides=None, tpOverrides=None, seed=42):
# Initialize Layer 4 temporal memory
params = dict(self.DEFAULT_TM_PARAMS)
params.update(tmOverrides or {})
params["seed"] = seed
self._checkParams(params)
self.tm = MonitoredGeneralTemporalMemory(mmName="TM", **params)
# Initialize Layer 3 temporal pooler
params = dict(self.DEFAULT_TP_PARAMS)
params["inputDimensions"] = [self.tm.numberOfCells()]
params["potentialRadius"] = self.tm.numberOfCells()
params["seed"] = seed
params.update(tpOverrides or {})
self._checkParams(params)
self.tp = MonitoredTemporalPooler(mmName="TP", **params)
def _checkParams(self, params):
for k,v in params.iteritems():
if v == "Sorry":
raise RuntimeError("Param "+k+" must be specified")
def feedTransition(self, sensorPattern, motorPattern, sensorimotorPattern,
tmLearn=True, tpLearn=None, sequenceLabel=None):
if sensorPattern is None:
self.tm.reset()
self.tp.reset()
else:
# Feed the TM
self.tm.compute(sensorPattern,
activeExternalCells=motorPattern,
formInternalConnections=True,
learn=tmLearn,
sequenceLabel=sequenceLabel)
# If requested, feed the TP
if tpLearn is not None:
tpInputVector, burstingColumns, correctlyPredictedCells = (
self.formatInputForTP())
activeArray = numpy.zeros(self.tp.getNumColumns())
self.tp.compute(tpInputVector,
tpLearn,
activeArray,
burstingColumns,
correctlyPredictedCells,
sequenceLabel=sequenceLabel)
def feedLayers(self, sequences, tmLearn=True, tpLearn=None, verbosity=0,
showProgressInterval=None):
"""
Feed the given sequences to the HTM algorithms.
@param tmLearn: (bool) Either False, or True
@param tpLearn: (None,bool) Either None, False, or True. If None,
temporal pooler will be skipped.
@param showProgressInterval: (int) Prints progress every N iterations,
where N is the value of this param
"""
(sensorSequence,
motorSequence,
sensorimotorSequence,
sequenceLabels) = sequences
currentTime = time.time()
for i in xrange(len(sensorSequence)):
sensorPattern = sensorSequence[i]
motorPattern = motorSequence[i]
sensorimotorPattern = sensorimotorSequence[i]
sequenceLabel = sequenceLabels[i]
self.feedTransition(sensorPattern, motorPattern, sensorimotorPattern,
tmLearn=tmLearn, tpLearn=tpLearn,
sequenceLabel=sequenceLabel)
if (showProgressInterval is not None and
i > 0 and
i % showProgressInterval == 0):
print ("Fed {0} / {1} elements of the sequence "
"in {2:0.2f} seconds.".format(
i, len(sensorSequence), time.time() - currentTime))
currentTime = time.time()
if verbosity >= 2:
# Print default TM traces
traces = self.tm.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tm.mmGetTraceResets())
if tpLearn is not None:
# Print default TP traces
traces = self.tp.mmGetDefaultTraces(verbosity=verbosity)
print MonitorMixinBase.mmPrettyPrintTraces(traces,
breakOnResets=
self.tp.mmGetTraceResets())
print
@staticmethod
def generateSequences(length, agents, numSequences=1, verbosity=0):
"""
@param length (int) Length of each sequence to generate, one for
each agent
@param agents (AbstractAgent) Agents acting in their worlds
@return (tuple) (sensor sequence, motor sequence, sensorimotor sequence,
sequence labels)
"""
sensorSequence = []
motorSequence = []
sensorimotorSequence = []
sequenceLabels = []
for _ in xrange(numSequences):
for agent in agents:
s,m,sm = agent.generateSensorimotorSequence(length, verbosity=verbosity)
sensorSequence += s
motorSequence += m
sensorimotorSequence += sm
sequenceLabels += [agent.world.toString()] * length
sensorSequence.append(None)
motorSequence.append(None)
sensorimotorSequence.append(None)
sequenceLabels. |
jackromo/RandTerrainPy | randterrainpy/terraindisplay.py | Python | mit | 3,444 | 0.001452 | """Module for displaying Terrain, both in 2D and 3D.
(Not accessible outside of package; use display methods of Terrain instead.)
"""
from Tkinter import Tk, Canvas, Frame, BOTH
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
import numpy as np
class Terrain2D(Frame):
"""2D graphical representation of a Terrain object.
Consists of a 2D top-down image of terrain as a grid of greyscale squares.
Each square corresponds to a height value, being on a scale from white if 1 to black if 0.
"""
SQUARE_SIDE = 3
"""Length of one side of colored square."""
@classmethod
def display_terrain(cls, terrain):
"""Display a Terrain in 2D.
| Args:
terrain (Terrain): Terrain to display.
"""
root = Tk()
dim | ensions = "{0}x{1}".format(terrain.width * Terrain2D.SQUARE_SIDE,
terrain.length * Terrain2D.SQUARE_SIDE)
root.geometry(dimensions)
app = Terrain2D(root, terrain)
root.mainloop()
def __init__(self, parent, terrain):
"""Make self child of a TK parent, then initialize own UI.
Args:
parent (TK): Parent to attach self to.
terrain (Terrain): Terrain to display.
"""
Frame.__init__(self, parent)
self.terrain = terrain
self.parent = parent
self.init_ui()
def init_ui(self):
"""Initialize UI of window."""
self.parent.title("Terrain (top-down)")
self.pack(fill=BOTH, expand=1)
self.draw_heights()
def draw_heights(self):
"""Draw grid of height values on window.
Heights are shown as squares, with greyscale colors becoming brighter for greater heights.
"""
canvas = Canvas(self)
for x in range(self.terrain.width):
for y in range(self.terrain.length):
x_pos = x * Terrain2D.SQUARE_SIDE
y_pos = y * Terrain2D.SQUARE_SIDE
color = int(self.terrain[x, y] * 15)
hex_color = "#" + "0123456789abcdef"[color] * 3
canvas.create_rectangle(x_pos, y_pos,
x_pos + Terrain2D.SQUARE_SIDE,
y_pos + Terrain2D.SQUARE_SIDE,
outline=hex_color, fill=hex_color)
canvas.pack(fill=BOTH, expand=1)
class Terrain3D(object):
"""A 3D representation of a Terrain.
Consists of a 3D surface mesh, shown at an angle. Can be seen at different angles.
Uses matplotlib.mplot3d to display rudimentary 3D version of terrain.
Notes:
Is somewhat guaranteed to be slow. Not intended for use other than visualizing terrain during development.
"""
def __init__(self, terrain):
self.terrain = terrain
self.x_grid, self.y_grid = np.meshgrid(range(self.terrain.width),
range(self.terrain.length))
z_vals = np.array([self.terrain[x, y] for x, y in zip(np.ravel(self.x_grid), np.ravel(self.y_grid))])
self.z_grid = z_vals.reshape(self.x_grid.shape)
def display_terrain(self):
"""Display 3D surface of terrain."""
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.plot_surface(self.x_grid, self.y_grid, self.z_grid)
ax.set_zlim(0.0, 1.0)
plt.show()
|
zgchizi/oppia-uc | extensions/interactions/GraphInput/GraphInput.py | Python | apache-2.0 | 3,179 | 0 | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, s | oftwar
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from extensions.interact | ions import base
class GraphInput(base.BaseInteraction):
"""Interaction for evaluating graphs."""
name = '几何图形'
description = '允许创建多种几何图形'
display_mode = base.DISPLAY_MODE_SUPPLEMENTAL
is_trainable = False
_dependency_ids = []
answer_type = 'Graph'
instructions = '创建几何图形'
narrow_instructions = 'View graph'
needs_summary = True
_customization_arg_specs = [{
'name': 'graph',
'description': '初始图形',
'schema': {
'type': 'custom',
'obj_type': 'Graph',
},
'default_value': {
'vertices': [{
'x': 150.0,
'y': 50.0,
'label': '',
}, {
'x': 200.0,
'y': 50.0,
'label': '',
}, {
'x': 150.0,
'y': 100.0,
'label': '',
}],
'edges': [{
'src': 0,
'dst': 1,
'weight': 1,
}, {
'src': 1,
'dst': 2,
'weight': 1,
}],
'isLabeled': False,
'isDirected': False,
'isWeighted': False,
}
}, {
'name': 'canAddVertex',
'description': '允许添加点',
'schema': {
'type': 'bool',
},
'default_value': False
}, {
'name': 'canDeleteVertex',
'description': '允许删除点',
'schema': {
'type': 'bool',
},
'default_value': False
}, {
'name': 'canMoveVertex',
'description': '允许移动点',
'schema': {
'type': 'bool',
},
'default_value': True
}, {
'name': 'canEditVertexLabel',
'description': '允许编辑节点标签',
'schema': {
'type': 'bool',
},
'default_value': False
}, {
'name': 'canAddEdge',
'description': '允许添加线',
'schema': {
'type': 'bool',
},
'default_value': True
}, {
'name': 'canDeleteEdge',
'description': '允许删除线',
'schema': {
'type': 'bool',
},
'default_value': True
}, {
'name': 'canEditEdgeWeight',
'description': '允许编辑线权重',
'schema': {
'type': 'bool',
},
'default_value': False
}]
|
NickDaly/GemRB-MultipleConfigs | gemrb/GUIScripts/bg2/LoadScreen.py | Python | gpl-2.0 | 1,632 | 0.026961 | # -*-python-*-
# GemRB - Infinity Engine Emulator
# Copyright (C) 2003-2004 The GemRB Project
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# LoadScreen.py - display Loading screen
###################################################
import GemRB
from GUIDefines import *
LoadScreen = None
def StartLoadScreen ():
global LoadScreen
GemRB.LoadWindowPack ("guils", 640, 480)
LoadScreen = GemRB.LoadWindow (0)
LoadScreen.SetFrame ()
Middle = LoadScreen.GetControl (3)
LoadPic = GemRB.GetGameString (STR_LOADMOS)
if LoadPic == "":
LoadPic = "GTRSK00"+str(GemRB.Roll(1,5,1) )
Middle.SetMOS (LoadPic)
Progress = 0
GemRB.SetVar ("Progress", Progress)
Table = | GemRB.LoadTable ("loadhint")
tmp = Table.GetRowCount ()
tmp = GemRB.Roll (1,tmp,0)
HintStr = Table.GetValue (tmp, 0)
TextArea = LoadScreen.GetControl (2)
TextArea.SetText (HintStr)
Bar = LoadScreen.GetControl (0)
Bar.SetVarAssoc ("Progre | ss", Progress)
LoadScreen.SetVisible (WINDOW_VISIBLE)
|
nngroup/django-pagebits | pagebits/migrations/0002_page_include_in_search.py | Python | bsd-3-clause | 401 | 0 | # -*- coding: utf-8 -*-
from | __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('pagebits', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='page',
name='include_in_search',
| field=models.BooleanField(default=True),
),
]
|
Manolaru/Python_Mantis | Working version/test/test_add_project.py | Python | apache-2.0 | 614 | 0.011401 | from model.project import Project
def test_add_project(app):
project=Project(name="students_project", description="ab | out Project")
try:
ind = app.project.get_project_list().index(project)
app.project.delete_named_project(project)
except ValueError:
pass
old_projects = app.project.get_project_list()
app.project.create(project)
new_projects = app.project.get_project_list()
assert len(old_projects) + 1 == len(new_projects)
old_projects.append(project)
assert sorted(old_projects,key=Project.id_or_max) == sorted(new | _projects,key=Project.id_or_max)
|
phvu/nervanagpu | benchmarks/minibatch_layout_diff.py | Python | apache-2.0 | 3,364 | 0.01308 | #!/usr/bin/python
# Copyright 2014 Nervana Systems Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Using just cublas compare N as the contiguous dimension verses the non-contiguous dimension.
import numpy as np
import pycuda.driver as drv
from nervanagpu import NervanaGPU
from pycuda.autoinit import context
from scikits. | cuda import cublas
print(context.get_device().name | ())
ng = NervanaGPU(stochastic_round=False, bench=True)
handle = cublas.cublasCreate()
start, end = (drv.Event(), drv.Event())
def cublas_dot(op, A, B, C, repeat=1, warmup=False):
lda = A.shape[0]
ldb = B.shape[0]
ldc = C.shape[0]
m = C.shape[0]
n = C.shape[1]
k = A.shape[1] if op[0] == 'n' else A.shape[0]
if warmup:
for r in range(repeat):
cublas.cublasSgemm(handle, op[0], op[1], m, n, k, 1.0, A.gpudata, lda, B.gpudata, ldb, 0.0, C.gpudata, ldc)
start.record()
# Swap A and B to map from C order to Fortran
for r in range(repeat):
cublas.cublasSgemm(handle, op[0], op[1], m, n, k, 1.0, A.gpudata, lda, B.gpudata, ldb, 0.0, C.gpudata, ldc)
end.record()
end.synchronize()
msecs = end.time_since(start) / repeat
gflops = (m * n * k * 2.0) / (msecs * 1000000.0)
print("%7.3f msecs %4.0f gflops (%s: %d,%d,%d)" % (msecs,gflops,op,m,n,k))
return msecs
# N non-contiguous:
# fprop(nn): KC x CN = KN
# bprop(tn): KC^T x KN = CN
# updat(nt): KN x CN^T = KC
# N contiguous:
# fprop(nt): NC x KC^T = NK
# bprop(nn): NK x KC = NC
# updat(tn): NK^T x NC = KC
repeat = 2000
for K, C, N in ((3072,3072,32),):
total = 0
for op, dimA, dimB, dimC in (
("nn", (K,C), (C,N), (K,N) ), # fprop
("tn", (K,C), (K,N), (C,N) ), # bprop
("nt", (K,N), (C,N), (K,C) ),): # update
devA = ng.empty(dimA, dtype=np.float32)
devB = ng.empty(dimB, dtype=np.float32)
devC = ng.empty(dimC, dtype=np.float32)
# fill with uniform randoms from -1 to 1
devA[:] = 2 * (.5 - ng.rand())
devB[:] = 2 * (.5 - ng.rand())
total += cublas_dot(op, devA, devB, devC, repeat=repeat, warmup=True)
print("N2 Total: ", total)
total = 0
for op, dimA, dimB, dimC in (
("nt", (N,C), (K,C), (N,K) ), # fprop
("nn", (N,K), (K,C), (N,C) ), # bprop
("tn", (N,K), (N,C), (K,C) ),): # update
devA = ng.empty(dimA, dtype=np.float32)
devB = ng.empty(dimB, dtype=np.float32)
devC = ng.empty(dimC, dtype=np.float32)
# fill with uniform randoms from -1 to 1
devA[:] = 2 * (.5 - ng.rand())
devB[:] = 2 * (.5 - ng.rand())
total += cublas_dot(op, devA, devB, devC, repeat=repeat)
print("N1 Total: ", total)
print("--------------------------------------------------------------------------------")
|
ttgc/TtgcBot | src/utils/converters.py | Python | gpl-3.0 | 4,827 | 0.016988 | #!usr/bin/env python3.7
#-*-coding:utf-8-*-
## TtgcBot - a bot for discord
## Copyright (C) 2017 Thomas PIOT
##
## This program is fre | e software: you can redistribute it and/or modify
## it und | er the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>
import re
import discord
from discord.ext import commands
from src.tools.Character import *
from src.tools.CharacterUtils import *
from src.utils.checks import GenericCommandParameters
# from src.tools.mapmanager import *
from src.tools.parsingdice import DiceType
class CharacterConverter(commands.Converter):
async def convert(self,ctx,arg):
data = GenericCommandParameters(ctx)
return data.jdr.get_character(arg)
class RaceConverter(commands.Converter):
async def convert(self,ctx,arg):
return retrieveRaceID(arg.replace("_"," "))
class SymbiontConverter(commands.Converter):
async def convert(self,ctx,arg):
return retrieveSymbiontID(arg.replace("_"," "))
class SkillConverter(commands.Converter):
async def convert(self,ctx,arg):
if arg.isdecimal():
return [Skill(int(arg))]
return Skill.skillsearch(arg.replace("_"," "))
class OperatorConverter(commands.Converter):
async def convert(self,ctx,arg):
if arg not in ["+","-"]:
raise commands.BadArgument("Operator conversion error ! Not a valid operator")
return arg
class MapTokenConverter(commands.Converter):
async def convert(self,ctx,arg):
data = GenericCommandParameters(ctx)
try: tk = Token.load(arg,data.jdr.server,data.jdr.channel)
except:
await ctx.message.channel.send(data.lang["token_notexist"].format(arg))
return None
return tk
class ShapeConverter(commands.Converter):
async def convert(self,ctx,arg):
if arg.lower() == "circle": return Shape.CIRCLE
if arg.lower() == "sphere": return Shape.SPHERE
if arg.lower() == "line": return Shape.LINE
if arg.lower() == "rect": return Shape.RECT
if arg.lower() == "cube": return Shape.CUBE
if arg.lower() == "conic": return Shape.CONIC
raise commands.BadArgument("Shape conversion error ! Not a valid shape")
class MapEffectParameterConverter(commands.Converter):
async def convert(self,ctx,arg):
args = arg.split(" ")
while "" in args: args.remove("")
data = {}
for i in args:
tag,value = i.split("=")
if tag.lower() == "lengths":
value = value.split("-")
for i in range(len(value)):
value[i] = int(value[i])
if value[i]%2 == 0:
raise commands.BadArgument("Shape parameter lengths invalid ! Should not be divisible by 2")
elif tag.lower() == "orientation":
directions = {"right":0,"left":180,"up":90,"down":270}
if value.lower() in directions:
value = directions[value]
else:
value = int(value)
if value not in [0,90,180,270]:
raise commands.BadArgument("Shape parameter orientation invalid ! Should be 0, 90, 180 or 270")
else:
value = int(value)
data[tag] = value
return data
class AffiliationConverter(commands.Converter):
async def convert(self,ctx,arg):
if arg.lower() == "none": return None
if not organizationExists(arg):
raise commands.BadArgument("Unexisting organization provided")
return arg
class BattleEntityConverter(commands.Converter):
async def convert(self,ctx,arg):
if re.match(r"\w+:\d\d?", arg):
tag, value = arg.split(":")
return tag, int(value)
raise commands.BadArgument("Invalid Battle Entity provided, cannot convert")
class DiceConverter(commands.Converter):
async def convert(self, ctx, arg):
match = re.search(r"\d+", arg)
if match is not None:
try: return DiceType(int(match.group(0)))
except:
raise commands.BadArgument("Unable to convert value {} into a dice type".format(arg))
raise commands.BadArgument("Invalid Dice Type provided, cannot convert")
|
proyectosdeley/proyectos_de_ley | proyectos_de_ley/stats/migrations/0002_dispensed.py | Python | mit | 1,175 | 0.005106 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('stats', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Dispensed',
fields=[
('id', models.AutoField(primary_key=True, auto_created=True, serialize=False, verbose_name='ID')),
('total_approved', models.IntegerField(help_text='Number of projects approved in any instance.')),
('total_dispensed', models.IntegerField(help_text='Number of projects that did not go to 2nd round of votes.')),
('dispensed_by_plenary', models.IntegerField(help_text='Those projects dispensed due to `acuerdo del pleno`.')),
| ('dispensed_by_spokesmen', models.Inte | gerField(help_text='Those projects dispensed due to `junta de portavoces`.')),
('dispensed_others', models.IntegerField(help_text='All other projects dispensed, and those with no specific reason.')),
],
options={
},
bases=(models.Model,),
),
]
|
leojohnthomas/ahkab | mosq.py | Python | gpl-2.0 | 18,308 | 0.032936 | # -*- coding: iso-8859-1 -*-
# mosq.py
# Implementation of the square-law MOS transistor model
# Copyright 2012 Giuseppe Venturini
#
# This file is part of the ahkab simulator.
#
# Ahkab is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 2 of the License.
#
# Ahkab is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License v2
# along with ahkab. If not, see <http://www.gnu.org/licenses/>.
"""
This module defines two classes:
mosq_device
mosq_model
This MOS Model follows the Square Law Mos Model:
[Vds > 0 in the following, transistor type: N]
1. No subthreshold conduction.
Vgs < Vt
Id = 0
2. Ohmic region of operation
Vgs > Vt
Vgd > Vt
Id = k w/l ((vgs-vt)vds - vds^2/2)
3. Saturation region of operation
Vgs > Vt
Vgs < Vt
Id = 1/2 k w/l (vgs-vt)^2 * (1 + lambd*(vds-vgs+vt))
"""
import constants, options, utilities, printing
import math
# DEFAULT VALUES FOR 500n CH LENGTH
COX_DEFAULT = .7e-3
VTO_DEFAULT = .5
GAMMA_DEFAULT = 1
PHI_DEFAULT = .7
KP_DEFAULT = 50e-6
LAMBDA_DEFAULT = .5
AVT_DEFAULT = 7.1e-3*1e-6
AKP_DEFAULT = 1.8e-2*1e-6
TCV_DEFAULT = 1e-3
BEX_DEFAULT = -1.5
ISMALL_GUESS_MIN = 1e-10
class mosq_device:
def __init__(self, nd, ng, ns, nb, W, L, model, M=1, N=1):
"""Quadratic Law MOSFET device
Parameters:
nd: drain node
ng: gate node
ns: source node
nb: bulk node
L: element width [m]
W: element length [m]
M: multiplier (n. of shunt devices)
N: series mult. (n. of series devices)
model: pass an instance of mosq_mos_model
Selected methods:
- get_output_ports() -> (nd, ns)
- get_drive_ports() -> (nd, nb), (ng, nb), (ns, nb)
"""
self.ng = ng
self.nb = nb
self.n1 = nd
self.n2 = ns
self.ports = ((self.n1, self.n2), (self.ng, self.n2), (self.nb, self.n2))
class dev_class: pass # empty class to hold device parameters
self.device = dev_class()
self.device.L = float(L) #channel length -
self.device.W = float(W) #channel width -
self.device.M = int(M) #parallel multiple device number
self.device.N = int(N) #series multiple device number
self.device.mckey = None
self.mosq_model = model
self.mc_enabled = False
self.opdict = {}
self.opdict.update({'state':(float('nan'), float('nan'), float('nan'))})
self.letter_id = 'M'
self.is_nonlinear = True
self.is_symbolic = True
self.dc_guess = [self.mosq_model.VTO*(0.4)*self.mosq_model.NPMOS, self.mosq_model.VTO*(1.1)*self.mosq_model.NPMOS, 0]
devcheck, reason = self.mosq_model._device_check(self.device)
if not devcheck:
raise Exception, reason + " out of boundaries."
def get_drive_ports(self, op):
"""Returns a tuple of tuples of ports nodes, as:
(port0, port1, port2...)
Where each port is in the form:
port0 = (nplus, nminus)
"""
return self.ports #d,g,b
def get_output_ports(self):
return ((self.n1, self.n2),)
def __str__(self):
mos_type = self._get_mos_type()
rep = " " + self.mosq_model.name + " w="+ str(self.device.W) + " l=" + \
str(self.device.L) + " M="+ str(self.device.M) + " N=" + \
str(self.device.N)
return rep
def _get_mos_type(self):
"""Returns N or P (capitalized)
"""
mtype = 'N' if self.mosq_model.NPMOS == 1 else 'P'
return mtype
def i(self, op_index, ports_v, time=0):
"""Returns the current flowing in the element with the voltages
applied as specified in the ports_v vector.
ports_v: [voltage_across_port0, voltage_across_port1, ...]
time: the simulation time at which the evaluation is performed.
It has no effect here. Set it to None during DC analysis.
"""
#print ports_v
ret = self.mosq_model.get_ids(self.device, ports_v, self.opdict)
return ret
def update_status_dictionary(self, ports_v):
if self.opdict is None:
self.opdict = {}
if not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmd')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gm')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('gmb')) or \
not (self.opdict['state'] == ports_v[0] and self.opdict.has_key('Ids')):
self.opdict['state'] == ports_v[0]
self.opdict['gmd'] = self.g(0, ports_v[0], 0)
self.opdict['gm'] = self.g(0, ports_v[0], 1)
self.opdict['gmb'] = self.g(0, ports_v[0], 2)
self.opdict['Ids'] = self.i(0, ports_v[0])
def print_op_info(self, ports_v):
arr = self.get_op_info(ports_v)
print arr,
def get_op_info(self, ports_v):
"""Operating point info, for design/verification. """
mos_type = self._get_mos_type()
self.update_status_dictionary(ports_v)
sat_status = "SATURATION" if self.opdict['SAT'] else "LINEAR"
if not self.opdict["ON"]:
status = "OFF"
else:
status = "ON"
arr = [["M"+self.descr, mos_type.upper()+" ch", status, "", "", sat_status, "", "", "", "", "",""],]
arr.append(["beta", "[A/V^2]:", self.opdict['beta'], "Weff", "[m]:", str(self.opdict['W'])+" ("+str(self.device.W)+")", "L", "[m]:", str(self.opdict['L'])+ " ("+str(self.device.L)+")", "M/N:", "", str(self.device.M)+"/"+str(self.device.N)])
arr.append(["Vds", "[V]:", float(ports_v[0][0]), "Vgs", "[V]:", float(ports_v[0][1]), "Vbs", "[V]:", float(ports_v[0][2]), "", "", ""])
arr.append([ "VTH", "[V]:", self.opdict['VTH'], "VOD", "[V]:", self.opdict['VOD'], "", "","", "VA", "[V]:", str(self.opdict['Ids']/self.opdict['gmd'])])
arr.append(["Ids", "[A]:", self.opdict['Ids'], "", "", "", "", "", "", "", "", ''])
arr.append(["gm", "[S]:", self.opdict['gm'], "gmb", "[S]:", self.opdict['gmb'], "ro", "[Ohm]:", 1/self.opdict['gmd'], "", "", ""])
#arr.append([ "", "", "", "", "", ""])
return printing.table_setup(arr)
def g(self, op_index, ports_v, port_index, time=0):
"""Returns the differential (trans)conductance rs the port specified by port_index
when the element has the voltages specified in ports_v across its ports,
at (simulation) time.
ports_v: a list in the form: [voltage_across_port0, voltage_across_port1, ...]
port_index: an integer, 0 <= port_index < len(se | lf.get_ports())
time: the simulation time at which the evaluation is performed. Set it to
None during DC analysis.
"""
assert op_index == 0
assert port_index < 3
if port_index == 0:
g = self.mosq_model.get_gmd(self.device, ports_v, self.opdict)
elif port_index == 1:
g = self.mosq_model.get_gm(self.device, ports_v, self.opdict)
if port_index == 2:
g = self.mosq_model.get_gmb(s | elf.device, ports_v, self.opdict)
if op_index == 0 and g == 0:
if port_index == 2:
sign = -1
else:
sign = +1
g = sign*options.gmin*2
#print type(g), g
if op_index == 0 and port_index == 0:
self.opdict.update({'gmd':g})
elif op_index == 0 and port_index == 1:
self.opdict.update({'gm':g})
elif op_index == 0 and port_index == 2:
self.opdict.update({'gmb':g})
return g
def get_value_function(self, identifier):
def get_value(self):
return self.opdict[identifier]
return get_value
def get_mc_requirements(self):
return True, 2
def setup_mc(self, status, mckey):
self.mc_enabled = status
if self.mc_enabled:
self.device.mckey = mckey
else:
self.device.mckey = None
class scaling_holder: pass # will hold the scaling factors
class mosq_mos_model:
def __init__(self, name=None, TYPE='n', TNOM=None, COX=None, \
GAMMA=None, NSUB=None, PHI=None, VTO=None, KP=None, \
LAMBDA=None, AKP=None, AVT=None,\
TOX=None, VFB=None, U0=None, TCV=None, BEX=None):
self.scaling = scaling_holder()
self.name = "model_mosq0" if name is None else name
Vth = constants.Vth()
self.TNOM = float(TNOM) if TNOM is not None else constants.Tref
#print "TYPE IS:" + TYPE
self.NPMOS = 1 if TYPE == 'n' else -1
# optional parameters (no defaults)
self.TOX = float(TOX) if TOX is not None else None
self.NSUB = float(NSUB) if NSUB is not N |
skuschel/postpic | postpic/datahandling.py | Python | gpl-3.0 | 89,742 | 0.002518 | #
# This file is part of postpic.
#
# postpic is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# postpic is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with postpic. If not, see <http://www.gnu.org/licenses/>.
#
# Stephan Kuschel, 2014-2018
# Alexander Blinne, 2017
"""
The Core module for final data handling.
This module provides classes for dealing with axes, grid as well as the Field
class -- the final output of the postpic postprocessor.
Terminology
-----------
A data field with N numeric points has N 'grid' points,
but N+1 'grid_nodes' as depicted here:
.. code-block:: none
+---+---+---+---+---+
| | | | | |
+---+---+---+---+---+
| | | | | |
+---+---+---+---+---+
| | | | | |
+---+---+---+---+---+
o o o o o grid (coordinates where data is sampled at)
o o o o o o grid_node (coordinates of grid cell boundaries)
| | extent
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import sys
try:
from collections.abc import Iterable, Mapping
except ImportError:
from collections import Iterable, Mapping
import copy
import warnings
import os
import numbers
import numpy as np
import scipy.ndimage as spnd
import scipy.interpolate as spinterp
import scipy.integrate
import scipy.signal as sps
import numexpr as ne
from ._compat import tukey, meshgrid, broadcast_to, NDArrayOperatorsMixin
from . import helper
from . import io
from .helper_fft import fft
if sys.version[0] == '2':
import functools32 as functools
else:
import functools
if sys.version[0] == '2':
from itertools import izip_longest as zip_longest
else:
from itertools import zip_longest
try:
with warnings.catch_warnings():
# skimage produces a DeprecationWarning by importing `imp`. We will silence this warning
# as we have nothing to do with it
warnings.simplefilter("ignore", DeprecationWarning)
from skimage.restoration import unwrap_phase
except ImportError:
unwrap_phase = None
__all__ = ['KeepDim', 'Field', 'Axis']
class KeepDim(object):
def __init__(self, value):
self.value = value
class Axis(object):
'''
Axis handling for a single Axis.
Create an Axis object from scratch.
The least required arguments are any of:
* grid
* grid_node
* extent _and_ n
The remaining fields will be deduced from the givens.
More arguments may be supplied, as long as they are compatible.
'''
def __init__(self, name='', unit='', **kwargs):
self.name = name
self.unit = unit
self._grid_node = kwargs.pop('grid_node', None)
if self._grid_node is not None:
self._grid_node = np.array(self._grid_node)
if self._grid_node.ndim != 1:
raise ValueError("Passed array grid_node has ndim != 1.")
if helper.monotonicity(self._grid_node) == 0:
if np.isclose(self._grid_node[0], self._grid_node[-1], atol=0):
s = 'Grid_node spacing is zero on axis "{}" at value {}.'
raise ValueError(s.format(self.name, self._grid_node[0]))
raise ValueError("Passed array grid_node is not monotonous.")
self._grid = kwargs.pop('grid', None)
if self._grid is not None:
self._grid = np.array(self._grid)
if self._grid.ndim != 1:
raise ValueError("Passed array grid has ndim != 1.")
if helper.monotonicity(self._grid) == 0:
if np.isclose(self._grid[0], self._grid[-1], atol=0):
s = 'Grid spacing is zero on axis "{}" at value {}.'
raise ValueError(s.format(self.name, self._grid[0]))
raise ValueError("Passed array grid is not monotonous.")
self._extent = kwargs.pop('extent', None)
if self._extent is not None:
if not isinstance(self._extent, Iterable) or len(self._extent) != 2:
raise ValueError("Passed extent is not an iterable of length 2")
| self._n = kwargs.pop('n', None)
# kwargs must be exhausted now
if len(kwargs) > 0:
raise TypeError('got an unexpcted keyword argument "{}"'.format(kwargs))
if self._grid_node is None:
if self._grid is None:
if self._extent is None or self._n is None:
# If we are here really nothing has been passed, like with the old version
# of this class
raise ValueErro | r("Missing required arguments for Axis construction.")
# only extent and n have been passed, use that to create a linear grid_node
self._grid_node = np.linspace(self._extent[0], self._extent[-1], self._n+1,
endpoint=True)
else:
# grid has been passed, create grid_node from grid.
if len(self._grid) > 3:
grid_spline = scipy.interpolate.UnivariateSpline(np.arange(len(self._grid)),
self._grid, s=0)
gn_inner = grid_spline(np.arange(0.5, len(self._grid)-1))
gn = np.pad(gn_inner, 1, 'constant')
del grid_spline
else:
gn = np.convolve(self._grid, np.ones(2) / 2.0, mode='full')
if self._extent is not None:
# extent has been passed, use this for the end points of grid_node
if self._extent[0] >= self._grid[0] or self._extent[-1] <= self._grid[-1]:
raise ValueError("Passed invalid extent.")
gn[0] = self._extent[0]
gn[-1] = self._extent[-1]
else:
# estimate end points of grid_node as in the old grid.setter
if len(self._grid) > 1:
gn[0] = self._grid[0] + (self._grid[0] - gn[1])
gn[-1] = self._grid[-1] + (self._grid[-1] - gn[-2])
else:
gn[0] = self._grid[0] - 0.5
gn[-1] = self._grid[0] + 0.5
self._grid_node = gn
# now we are garantueed to have a grid_node
if self._grid is None:
# create grid from grid_node like in the old grid.getter
if len(self._grid_node) > 3:
node_spline = scipy.interpolate.UnivariateSpline(np.arange(-0.5,
len(self._grid_node)-1),
self._grid_node, s=0)
self._grid = node_spline(np.arange(len(self._grid_node)-1))
del node_spline
else:
self._grid = np.convolve(self._grid_node, np.ones(2) / 2.0, mode='valid')
else:
# check if grid and grid_node are compatible
if not np.all(self._grid > self._grid_node[:-1]) and \
np.all(self._grid < self._grid_node[1:]):
raise ValueError("Points of passed grid are not within corresponding "
"grid_nodes.")
# set extent if not given or check if compatible with grid_node
if self._extent is None:
self._extent = [self._grid_node[0], self._grid_node[-1]]
elif self._extent[0] != self._grid_node[0] or self._extent[-1] != self._grid_node[-1]:
raise ValueError("Passed invalid extent.")
# make sure grid and grid_node is immutable
self. |
mudream4869/imaginary-city | admin/app.py | Python | apache-2.0 | 3,380 | 0 | import tornado.ioloop
import tornado.web
import os
import json
import datetime
from blogpost import BlogpostHandler
from image import ImageHandler
from setting import SettingHandler
class DateTimeEncoder(json.JSONEncoder):
def default(self, o): # pylint: disable=E0202
if isinstance(o, datetime.datetime) or isinstance(o, datetime.time):
return o.isoformat()
return json.JSONEncoder.default(self, o)
class RequestHandler(tornado.web.RequestHandler):
pass
class BlogServer(RequestHandler):
def get(self, filepath):
if not filepath:
self.render("blogpost_list.html",
blogpost_list=BlogpostHandler.inst.list())
else:
self.render("blogpost.html",
filepath=filepath)
def post(self, filepath):
method = self.get_argument("method")
if method == "getPost":
self.finish(json.dumps(BlogpostHandler.inst.get(
filepath), cls=DateTimeEncoder))
elif method == "pushPage":
BlogpostHandler.inst.pushPage()
elif method == "updatePost":
md = self.get_argument("md", default=None)
title = self.get_argument("title", default=None)
if md is not None:
BlogpostHandler.inst.updatePostMD(filepath, md)
if title is not None:
BlogpostHandler.inst.updatePostTitle(filepath, title)
elif method == "createPost":
err, blogpost = BlogpostHandler.inst.createPost(filepath)
self.finish(err or json.dumps(blogpost, cls=DateTimeEncoder))
elif method == "deletePost":
BlogpostHandler.inst.deletePost(filepath)
class SettingServer(RequestHandler):
def get(self, name):
self.render("setting.html",
setting=SettingHandler.inst.setting)
def post(self, name):
val = self.get_argument("val")
SettingHandler.inst.set(name, val)
class ImageServer(RequestHandler):
def post(self, filepath):
method = self.get_argument("method")
print(method, filepath)
if method == "listImage":
image_list = ImageHandler.inst.listImage(filepath)
self.finish(json.dumps(image_list))
elif method == "uploadImage":
upload_file = self.request.files['file'][0]
filename = self.get_argument("filenam | e")
ImageHandler.inst.uploadImage(
filepath, filename, upload_file['body'])
elif method == "deleteImage":
filename = self.get_argument("filename")
ImageHandler.inst.deleteImage(filepath, file | name)
_settings = {
"static_path": os.path.join(os.path.dirname(__file__), "static"),
"template_path": os.path.join(os.path.dirname(__file__), "templ"),
"autoreload": True,
"debug": True
}
application = tornado.web.Application([
(r"/blog/(.*)", BlogServer),
(r"/setting/(.*)", SettingServer),
(r"/image/(.*)", ImageServer),
(r'/static/(.*)', tornado.web.StaticFileHandler, {'path': "./static/"}),
(r'/blogdb/(.*)', tornado.web.StaticFileHandler,
{'path': "../frontend/blog/"}),
], **_settings)
if __name__ == "__main__":
application.listen(8888)
BlogpostHandler()
ImageHandler()
SettingHandler()
tornado.ioloop.IOLoop.current().start()
|
catlee/hashsync | upload.py | Python | bsd-3-clause | 2,595 | 0.003854 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from hashsync.connection import connect
from hashsync.transfer import upload_directory
import logging
log = logging.getLogger(__name__)
def main():
import argparse
import sys
import gzip
parser = argparse.ArgumentParser()
# TODO: These aren't required if no-upload is set
parser.add_argument("-r", "--region", dest="region", required=True)
parser.add_argument("-b", "--bucket", dest="bucket_name", required=True)
parser.add_argument("-q", "--quiet", dest="loglevel", action="store_const", const=logging.WARN, default=logging.INFO)
parser.add_argument("-v", "--verbose", dest="loglevel", action="store_const", const=logging.DEBUG)
parser.add_argument("-j", "--jobs", dest="jobs", type=int, help="how many simultaneous uploads to do", default=8)
parser.add_argument("-o", "--output", dest="output", help="where to output manifet, use '-' for stdout", default="manifest.gz")
parser.add_argument("-z", "--compress-manifest", dest="compress_manifest",
help="compress manifest output (default if outputting to a file)",
action="store_true", default=None)
parser.add_argument("--no-compress-manifest", dest="compress_manifest",
help="don't compress manifest output (default if outputting to stdout)",
action="store_false")
parser.add_argument("--no-upload", dest="dryrun", action="store_true", default=False)
parser.add_argument("--report-dupes", dest="report_dupes", action="store_true", default=False, help="report on duplicate files")
parser.add_argument("dirname", help="directory to upload")
args = parser.parse_args()
logging.basicConfig(level=args.loglevel, format="%(asctime)s - %(message)s")
# Make boto shut up
# TODO: Add -v -v support to set this to DEBUG?
logging.getLogger('boto').setLevel(logging.INFO)
if not args.dryrun:
connect(args.region, args.bucket_name)
manifest = upload_directory(args.dirname, args.jobs, dryrun=args.dryrun)
if args.output == '-': |
output_file = sys.stdout
else:
output_file = open(args.output, 'wb')
# Enable compression by default if we're writing out to a file
if args.compress_manifest is None:
args.compress_manifest = True
if args.compress_manifest:
output_file = gzip.GzipFile(fileobj=output_file, mode='wb')
manifest.save(output_file)
if args.report_dupes:
manifest.report_ | dupes()
if __name__ == '__main__':
main()
|
PanDAWMS/autopyfactory | autopyfactory/plugins/queue/sched/StatusOffline.py | Python | apache-2.0 | 2,304 | 0.011285 | #! /usr/bin/env python
#
from autopyfactory.interfaces import SchedInterface
import logging
class StatusOffline(SchedInterface):
id = 'statusoffline'
def __init__(self, apfqueue, config, section):
try:
self.apfqueue = apfqueue
self.log = logging.getLogger('autopyfactory.sched.%s' %apfqueue.apfqname)
self.pilots_in_offline_mode = self.apfqueue.qcl.generic_get(self.apfqueue.apfqname,
| 'sched.statusoffline.pilots',
'getint',
default_value=0)
self.log.debug("SchedPlugin: Object initialized.")
except Exception as ex:
self.log.error("SchedPlugin object initialization failed. Raising exception")
raise ex
def | calcSubmitNum(self, n=0):
self.log.debug('Starting.')
self.wmsqueueinfo = self.apfqueue.wmsstatus_plugin.getInfo(
queue=self.apfqueue.wmsqueue)
self.siteinfo = self.apfqueue.wmsstatus_plugin.getSiteInfo(
site=self.apfqueue.wmsqueue)
self.batchinfo = self.apfqueue.batchstatus_plugin.getInfo(
queue=self.apfqueue.apfqname)
if self.wmsqueueinfo is None or self.siteinfo is None or self.batchinfo is None:
self.log.warning("wmsinfo, siteinfo, or batchinfo is None!")
out = 0
msg = "StatusOffline:comment=no wms/site/batchinfo,in=%s,ret=0" %n
else:
sitestatus = self.siteinfo.status
self.log.debug('site status is %s' %sitestatus)
out = n
msg = None
# choosing algorithm
if sitestatus == 'offline':
self.log.info('Return=%s' %self.pilots_in_offline_mode)
out = self.pilots_in_offline_mode
msg = "StatusOffline:comment=offline,in=%s,ret=%s" %(n, self.pilots_in_offline_mode)
else:
msg = "StatusOffline:comment=not offline,in=%s,ret=%s" %(n, out)
self.log.info(msg)
return (out, msg)
|
Logan213/is210-week-04-warmup | task_05.py | Python | mpl-2.0 | 452 | 0 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Blood Pressure Reading"""
MYINPUT = raw_input('What is your blood pressure? ')
MYINPUT = int(MYINPUT)
if MYINPUT <= 89:
BP_STATUS = 'Low'
elif 89 < MYINPUT <= 119:
BP_STATUS = 'Ideal'
elif 119 < MYINPUT <= 139:
BP_STATUS = 'Warning'
elif 139 < MYINPUT <= 159:
BP_STATUS = 'High'
else:
BP_STATUS = 'Emergency'
OUTPUT = 'Your status is currently: {}'.format(BP_STATUS)
print OUTPU | T
| |
michalkurka/h2o-3 | h2o-py/tests/testdir_algos/gam/pyunit_PUBDEV_7367_random_gridsearch_generic.py | Python | apache-2.0 | 3,832 | 0.010177 | from __future__ import division
from __future__ import print_function
from past.utils import old_div
import sys
sys.path.insert(1, "../../../")
import h2o
from tests import pyunit_utils
from h2o.estimators.gam import H2OGeneralizedAdditiveEstimator
from h2o.grid.grid_search import H2OGridSearch
# In this test, we check to make sure that a random discrete grid search on a GAM functions correctly.
# The test searches over 3 parameters, lambda, scale, and gam_columns.
# The test then compares the results of the grid search models with the models we created
# by manually searching over the hyperspace.
# If the coefficients do not match or an incorrect number of models is generated, the test throws an assertion error.
class test_random_gam_gridsearch_generic:
h2o_data = []
myX = []
myY = []
h2o_model = []
search_criteria = {'strategy': 'RandomDiscrete', "max_models": 8, "seed": 1}
hyper_parameters = {'scale': [[1, 1], [2, 2]], 'gam_columns': [["C11", "C12"], ["C12", "C13"]], 'lambda': [0, 0.01]}
manual_gam_models = []
num_grid_models = 0
num_expected_models = 8
def __init__(self):
self.setup_data()
def setup_data(self):
"""
This function performs all initializations necessary:
load the data sets and set the training set indices and response column index
"""
self.h2o_data = h2o.import_file(
path = pyunit_utils.locate("smalldata/glm_test/gaussian_20cols_10000Rows.csv"))
self.h2o_data["C1"] = self.h2o_data["C1"].asfactor()
self.h2o_data["C2"] = self.h2o_data["C2"].asfactor()
self.myX = ["C1", "C2"]
self.myY = "C21"
for scale in self.hyper_parameters['scale']:
for gam_columns in self.hyper_parameters['gam_columns']:
for lambda_ in self.hyper_parameters['lambda']:
self.manual_gam_models.append(H2OGeneralizedAdditiveEstimator(family="gaussian", gam_columns=gam_columns,
keep_gam_cols=True, scale=scale, lambda_=lambda_,
| ))
def train_models(self):
self.h2o_model = H2OGridSearch(H2OGeneral | izedAdditiveEstimator(family="gaussian",
keep_gam_cols=True), hyper_params=self.hyper_parameters, search_criteria=self.search_criteria)
self.h2o_model.train(x = self.myX, y = self.myY, training_frame = self.h2o_data)
for model in self.manual_gam_models:
model.train(x = self.myX, y = self.myY, training_frame = self.h2o_data)
print("done")
def match_models(self):
for model in self.manual_gam_models:
scale = model.actual_params['scale']
gam_columns = model.actual_params['gam_columns']
lambda_ = model.actual_params['lambda']
for grid_search_model in self.h2o_model.models:
if grid_search_model.actual_params['gam_columns'] == gam_columns \
and grid_search_model.actual_params['scale'] == scale \
and grid_search_model.actual_params['lambda'] == lambda_:
self.num_grid_models += 1
assert grid_search_model.coef() == model.coef(), "coefficients should be equal"
break
assert self.num_grid_models == self.num_expected_models, "Grid search model parameters incorrect or incorrect number of models generated"
def test_gridsearch():
test_gam_grid = test_random_gam_gridsearch_generic()
test_gam_grid.train_models()
test_gam_grid.match_models()
if __name__ == "__main__":
pyunit_utils.standalone_test(test_gridsearch)
else:
test_gridsearch()
|
RedbackThomson/LoLAlerter | lolalerter/twitchalerts/DonateTracker.py | Python | mit | 2,854 | 0.0459 | '''
LoLAlerter: Every LoL partner's pal
Copyright (C) 2015 Redback
This file is part of LoLAlerter
'''
import json
import threading
import time
import urllib.error
from alerterredis import AlerterRedis
from logger import Logger
import constants as Constants
class DonateTracker(object):
'''
The DonateTracker is in charge of detecting new donations through the
TwitchAlerts API. It will send the donations through to the users' chat
when a new donation arrives.
'''
def __init__(self, active_user):
'''
Constructor
:param user: The associated database user model associated
'''
self.active_user = active_user
self.user = active_user.summoner.user
def start(self):
'''
Begins tracking donation changes
'''
self.alive = True
# Create a new thread for each sub tracker
self.run_thread = threading.Thread(target=self.run_method)
self.run_thread.start()
def stop(self):
'''
Stops tracking donation changes
'''
self.alive = False
def run_method(self):
'''
The method on which the thread will run
'''
first = True
while(self.alive):
# Get the latest few donations
redis = AlerterRedis()
latest = Donation.fetch(5, self.user.twitchalertskey)
for new in latest:
donate_id = new['id']
# Store the overnight donations in the redis server
if first:
redis.new_donation(self.user, donate_id)
continue
if (not redis.has_donation(self.user, donate_id)):
redis.new_donation(self.user, donate_id)
if(self.user.minimumdonation != None and
float(new['amount']) < self.user.minimumdonation):
continue
self.active_user.new_donation(new['donator']['name'],
new['amount'], new['message'])
first = False
# Minimum cache time by TwitchAlerts
time.sleep(20)
class Donation(object):
'''
A donation is a representation of a donation towards a particular
Twitch channel
'''
@staticmethod
def fetch(count, key):
'''
| Fetches a list of the latest donations
:param count: The amount of donations to fetch
:param key: The auth key for the TwitchAlerts API
'''
url = str(Constants.TWITCHALERTS_URI).format(token=key)
try:
opener = urllib.request.build_opener()
opener.addheaders = [('User-agent', 'LoLAlerter')]
open = opener.open(url)
response = open.read()
return Donation.parse_donation | s(response.decode('utf8'))[:count]
except urllib.error.HTTPError as e:
if(e.code != 404):
Logger().get().exception(e)
return []
except urllib.error.URLError as e:
Logger().get().exception(e)
return []
@staticmethod
def parse_donations(response):
'''
Parses the TwitchAlerts API JSON into donation objects
:param response: The JSON data from the TwitchAlerts API
'''
subs = json.loads(response)
return subs['donations'] |
Mega-DatA-Lab/mxnet | tests/python/unittest/test_multi_device_exec.py | Python | apache-2.0 | 3,000 | 0.013 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import numpy as np
import mxnet as mx
def test_ctx_group():
with mx.AttrScope(ctx_group='stage1'):
data = mx.symbol.Variable('data')
fc1 = mx.symbol.FullyConnected(data = data, name='fc1', num_hidden=128)
act1 = mx.symbol.Activation(data = fc1, name='relu1', act_type="relu")
set_stage1 = set(act1.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
fc2 = mx.symbol.FullyConnected(data = act1, name = 'fc2', num_hidden = 64)
act2 = mx.symbol.Activation(data = fc2, name='relu2', act_type="relu")
fc3 = mx.symbol.FullyConnected(data = act2, name='fc3', num_hidden=10)
fc3 = mx.symbol.BatchNorm(fc3)
mlp = mx.symbol.SoftmaxOutput(data = fc3, name = 'softmax')
set_stage2 = set(mlp.list_arguments()) - set_stage1
group2ctx = {
'stage1' : mx.cpu(1),
'stage2' : mx.cpu(2)
}
texec = mlp.simple_bind(mx.cpu(0),
group2ctx=group2ctx,
data=(1,200))
for arr, name in zip(texec.arg_arrays, mlp.list_arguments()):
if name in set_stage1:
assert arr.context == group2ctx['stage1']
else:
assert arr.context == group2ctx['stage2']
def test_ctx_group_sparse():
with mx.AttrScope(ctx_group='stage1'):
lhs = mx.symbol.Variable('lhs', stype='csr')
rhs = mx.symbol.Variable('rhs', stype='row_sparse')
dot = mx.symbol.dot(lhs, rhs, name='dot')
set_stage1 = set(dot.list_arguments())
with mx.AttrScope(ctx_group='stage2'):
softmax = mx.s | ymbol.SoftmaxOutput(data = dot, name = 'softmax')
set_stage2 = set(softmax.list_arguments()) - set_stage1
group2ctx = {
'stage1' : mx.cpu(1),
'stage2' : mx.cpu(2)
}
texec = softmax.simple_bind(mx.cpu(0), group2ctx=group2ctx,
lhs=(32,200), rhs=(200, 5))
for arr, name | in zip(texec.arg_arrays, softmax.list_arguments()):
if name in set_stage1:
assert arr.context == group2ctx['stage1']
else:
assert arr.context == group2ctx['stage2']
if __name__ == '__main__':
test_ctx_group()
test_ctx_group_sparse()
|
erdc/proteus | scripts/pyadhRunSSO.py | Python | mit | 19,696 | 0.013201 | from __future__ import print_function
## Automatically adapted for numpy.oldnumeric Apr 14, 2008 by -c
#! /usr/bin/env python
from future import standard_library
standard_library.install_aliases()
from builtins import str
from builtins import input
from builtins import zip
from builtins import range
import os
## need to insert more comments
# \ingroup scripts
# \file proteusRunSSO.py
#
# @{
# \brief driver for multi-model simulations
#
import sys
import pickle
import numpy as numpy
from proteus import *
from warnings import *
def proteusRun(runRoutines):
import optparse
if sys.version_info[1] >= 5:
import cProfile as profiler
else:
import profile as profiler
#
import pstats
usage = "usage: %prog [options] pFile.py [nFile.py]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-i", "--interactive",
help="Read input from stdin",
action="store_true",
dest="interactive",
default='')
parser.add_option("-M", "--masterModel",
help="Set the model that controls the time step",
action="store",
type="int",
dest="masterModel",
default=0)
parser.add_option("-V", "--viewer",
help="Set the application to use for viewing results. Can be gnuplot or matlab",
action="store",
type="string",
dest="viewer",
default=False)
parser.add_option("-b", "--batchFile",
help="Read input from a file",
action="store",
type="string",
dest="batchFileName",
default="")
parser.add_option("-p", "--profile",
help="Generate a profile of the run",
action="store_true",
dest="profile",
default=False)
parser.add_option("-m", "--memory",
help="Track memory usage of the run",
action="callback",
callback=Profiling.memProfOn_callback)
parser.add_option("-l", "--log",
help="Store information about what the code is doing,0=none,10=everything",
action="store",
type="int",
dest="logLevel",
default=1)
parser.add_option("-v", "--verbose",
help="Print logging information to standard out",
action="callback",
callback=Profiling.verboseOn_callback)
parser.add_option("-E", "--ensight",
help="write data in ensight format",
action="store_true",
dest="ensight",
default=False)
parser.add_option("-w", "--wait",
help="stop after each time step",
action="store_true",
dest="wait",
default=False)
parser.add_option('--probDir',
default='.',
help="""where to find problem descriptions""")
(opts,args) = parser.parse_args()
#modify path to be able to load proteus test problems
#for now always insert
probDir = str(opts.probDir)
if probDir not in sys.path:
sys.path.insert(0,probDir)
#end if
if len(args) < 1:
raise RuntimeError("No input file specified")
if len(args) > 1:
raise RuntimeError("Must specify so-file containing p and n filenames")
pList = []
nList = []
pNameList = []
soFile = open(args[0],'r')
lines = soFile.readlines()
for mLine in lines:
filenames = mLine.split | ()
if len(filenames) == 2:
pList.append(__import__(filenames[0][:-3]))
pNameList.append(filenames[0][:-3]+filenames[1][:-3])
nList.append(__import__(filenames[1][:-3]))
if opts.batchFileName != "":
inputStream = open(opts.batchFileName,'r')
else:
inputS | tream = sys.stdin
pNameAll = ''
for pName in pNameList:
pNameAll+=pName
if opts.logLevel > 0:
Profiling.openLog(pNameAll+".log",opts.logLevel)
if opts.viewer:
Viewers.viewerOn(pNameAll,opts.viewer)
running = True
while running:
if opts.interactive:
userInput = True
sys.stdout.write("Enter python commands or (s)tart/(q)quit\n>>>")
elif opts.batchFileName != "":
userInput = True
else:
userInput = False
run = True
running = False
while (userInput):
line = inputStream.readline()
if line:
if (line.split()[0] == 's' or
line.split()[0] == 'start'):
userInput = False
run = True
elif (line.split()[0] == 'q' or
line.split()[0] == 'quit'):
userInput = False
run = False
running = False
else:
userInput = True
exec(line)
sys.stdout.write(">>>")
else:
userInput = False
run = False
running = False
if run:
if opts.profile:
profiler.runctx('runRoutines(pNameAll,pNameList,pList,nList,opts)',{'runRoutines':runRoutines},{'pNameAll':pNameAll,'pNameList':pNameList,'pList':pList,'nList':nList,'opts':opts},pNameAll+'_prof')
stats = pstats.Stats(pNameAll+'_prof')
stats.strip_dirs()
stats.dump_stats(pNameAll+'_prof_c')
stats.sort_stats('cumulative')
if Profiling.verbose:
stats.print_stats(30)
stats.sort_stats('time')
if Profiling.verbose:
stats.print_stats(30)
else:
runRoutines(pNameAll,pNameList,pList,nList,opts)
os.chdir('../../')
if opts.viewer:
input('\nPress return to close windows and exit... \n')
if Viewers.viewerType == 'matlab':
Viewers.viewerPipe.write("quit \n")
#matlab
def runProblems(pNameAll,pNameList,pList,nList,opts):
Profiling.memory()
memBase = Profiling.memLast
elementQuadratureList=[]
elementBoundaryQuadratureList=[]
pM = pList[opts.masterModel]
nM = nList[opts.masterModel]
for p,n in zip(pList,nList):
elementQuadratureDict={}
for I in p.coefficients.elementIntegralKeys:
elementQuadratureDict[I] = n.elementQuadrature
if n.subgridError != None:
for I in p.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = n.elementQuadrature
if n.shockCapturing != None:
for ci in n.shockCapturing.components:
elementQuadratureDict[('numDiff',ci,ci)] = n.elementQuadrature
if n.massLumping:
for ci in list(p.coefficients.mass.keys()):
elementQuadratureDict[('m',ci)] = Quadrature.SimplexLobattoQuadrature(p.nd,1)
for I in p.coefficients.elementIntegralKeys:
elementQuadratureDict[('stab',)+I[1:]] = Quadrature.SimplexLobattoQuadrature(p.nd,1)
elementBoundaryQuadratureDict={}
for I in p.coefficients.elementBoundaryIntegralKeys:
elementBoundaryQuadratureDict[I] = n.elementBoundaryQuadrature
elementQuadratureList.append(elementQuadratureDict)
elementBoundaryQuadratureList.append(elementBoundaryQuadratureDict)
Profiling.logEvent("Setting up MultilevelMesh",level=1)
mlMesh = None
mlMeshFileName = pNameAll+"_mesh%dD.%d" % (pM.nd,nM.nLevels)
# try:
# mlMeshFile = open(mlMeshFileName,'rb')
# Profiling.logEvent("Reading mesh",level=2)
# mlMesh = cPickle.load(mlMeshFile)
# except |
jeremybanks/stack-suggestions-bot | src/stackexchange/stackexchange.py | Python | mit | 2,984 | 0 | import json
import requests
from .site import Site
from .errors import APIError
class StackExchange(object):
"""
A simple wrapper for the Stack Exchange API V2.2.
This doesn't consider rate limiting or any important things like
that. Careless use could result in being blocked.
This doesn't support the use of filters which remove API fields
that are present by default.
"""
API_ROOT = 'http://api.stackexchange.com/2.2/'
def __init__(self, key=None):
self._key = key
self._init_sites_list()
def _init_sites_list(self):
sites_data = self._request('sites', pagesize=99999)
sites = {}
for site_data in sites_data:
site = Site(self | , site_data)
sites[site.api_site_parameter] = site
# maps from site API identifiers to Site objects
self.sites = sites
def _request(self, path, site=None, object_hook=None, **kwargs):
url = self.API_ROOT + path
params = dict(kwargs)
# The "unsafe" mode returns data correctly encoded, instead of
# protectively over-encoded. That's the main reason we're using
# it, although also includes many (all?) fields tha | t aren't
# included by default. Ideally, we may want to create a custom
# filter returning what we actually know how to use.
params['filter'] = 'unsafe'
if site:
params['site'] = site
if self._key:
params['key'] = self._key
response = requests.get(url, params=params, stream=True)
response_data = json.loads(response.text, object_hook=object_hook)
if 'error_id' in response_data:
raise APIError.from_response_data(response_data)
else:
return APIItems.from_response_data(response_data)
def get_site(self, identifier):
"""
Returns a Site object given a site's domain, name, slug, or ID.
"""
# if given the api identifier, we can get it instantly
if identifier in self.sites:
return self.sites[identifier]
# otherwise we need to search for it
for site in self.sites.values():
if identifier == site.name:
return site
if identifier == site.site_url:
return site
if identifier in site.aliases:
return site
raise ValueError("no site found matching %r" % (identifier,))
class APIItems(list):
"""
A list of items from an API response, with API metadata attached.
"""
@staticmethod
def from_response_data(response_data):
self = APIItems(response_data['items'])
self._response_data = response_data
self.has_more = response_data['has_more']
self.backoff = response_data.get('backoff', 0)
self.quota_max = response_data['quota_max']
self.quota_remaining = response_data['quota_remaining']
return self
|
hzlf/openbroadcast.org | website/base/templatetags/daterange_tags.py | Python | gpl-3.0 | 201 | 0 | import datetime
from django import template
register = template.Library()
@register.filter
def | xxxx_to_now(value):
value = int(value)
return range(va | lue, datetime.datetime.now().year + 2)
|
SaschaMester/delicium | build/get_landmines.py | Python | bsd-3-clause | 3,458 | 0.010989 | #!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This file emits the list of reasons why a particular build needs to be clobbered
(or a list of 'landmines').
"""
import sys
import landmine_utils
builder = landmine_utils.builder
distributor = landmine_utils.distributor
gyp_defines = landmine_utils.gyp_defines
gyp_msvs_version = landmine_utils.gyp_msvs_version
platform = landmine_utils.platform
def print_landmines():
"""
ALL LANDMINES ARE EMITTED FROM HERE.
"""
# DO NOT add landmines as part of a regular CL. Landmines are a last-effort
# bandaid fix if a CL that got landed has a build dependency bug and all bots
# need to be cleaned up. If you're writing a new CL that causes build
# dependency problems, fix the dependency problems instead of a | dding a
# landmine.
if (distributor() == 'goma' and platform() == 'win32' and
builder() == 'ninja'):
print 'Need to clobber winja goma due to backend cwd cache fix.'
if platform() == 'android':
print 'Clobber: to handle new way of suppressing findbugs failures.'
print 'Clobber to fix gyp not rename package name (crbug.com/457038)'
if platform() == 'win' and builder() == 'ninja':
print 'Compile on cc_unittests fails due to symbols removed in r185063.'
if platform() == 'lin | ux' and builder() == 'ninja':
print 'Builders switching from make to ninja will clobber on this.'
if platform() == 'mac':
print 'Switching from bundle to unbundled dylib (issue 14743002).'
if platform() in ('win', 'mac'):
print ('Improper dependency for create_nmf.py broke in r240802, '
'fixed in r240860.')
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version() == '2012' and
gyp_defines().get('target_arch') == 'x64' and
gyp_defines().get('dcheck_always_on') == '1'):
print "Switched win x64 trybots from VS2010 to VS2012."
if (platform() == 'win' and builder() == 'ninja' and
gyp_msvs_version().startswith('2013')):
print "Switched win from VS2010 to VS2013."
print "Update to VS2013 Update 2."
print "Update to VS2013 Update 4."
if (platform() == 'win' and gyp_msvs_version().startswith('2015')):
print 'Switch to VS2015'
print 'Need to clobber everything due to an IDL change in r154579 (blink)'
print 'Need to clobber everything due to gen file moves in r175513 (Blink)'
if (platform() != 'ios'):
print 'Clobber to get rid of obselete test plugin after r248358'
print 'Clobber to rebuild GN files for V8'
print 'Clobber to get rid of stale generated mojom.h files'
print 'Need to clobber everything due to build_nexe change in nacl r13424'
print '[chromium-dev] PSA: clobber build needed for IDR_INSPECTOR_* compil...'
print 'blink_resources.grd changed: crbug.com/400860'
print 'ninja dependency cycle: crbug.com/408192'
print 'Clobber to fix missing NaCl gyp dependencies (crbug.com/427427).'
print 'Another clobber for missing NaCl gyp deps (crbug.com/427427).'
print 'Clobber to fix GN not picking up increased ID range (crbug.com/444902)'
print 'Remove NaCl toolchains from the output dir (crbug.com/456902)'
if platform() == 'ios':
print 'Clobber iOS to workaround Xcode deps bug (crbug.com/485435)'
def main():
print_landmines()
return 0
if __name__ == '__main__':
sys.exit(main())
|
Dima73/pli-openmultibootmanager | src/ubi_reader/ubi_io/__init__.py | Python | gpl-2.0 | 5,452 | 0.00055 | #!/usr/bin/env python
#############################################################
# ubi_reader/ubi_io
# (c) 2013 Jason Pruitt (jrspruitt@gmail.com)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#############################################################
from ubi.block import sort
class ubi_file(object):
"""UBI image file object
Arguments:
Str:path -- Path to file to parse
Int:block_size -- Erase block size of NAND in bytes.
Int:start_offset -- (optional) Where to start looking in the file for
UBI data.
Int:end_offset -- (optional) Where to stop looking in the file.
Methods:
seek -- Put file head to specified byte offset.
Int:offset
read -- Read specified bytes from file handle.
Int:size
tell -- Returns byte offset of current file location.
read_block -- Returns complete PEB data of provided block
description.
Obj:block
read_block_data -- Returns LEB data only from provided block.
Obj:block
reader -- Generator that returns data from file.
reset -- Reset file position to start_offset
Handles all the actual file interactions, read, seek,
extract blocks, etc.
"""
def __init__(self, path, block_size, start_offset=0, end_offset=None):
self._fhandle = open(path, 'rb')
self._start_offset = start_offset
if end_offset:
self._end_offset = end_offset
else:
self._fhandle.seek(0, 2)
self._end_offset = self.tell()
self._block_size = block_size
if start_offset >= self._end_offset:
raise Exception('Start offset larger than file size!')
self._fhandle.seek(self._start_offset)
def _set_start(self, i):
self._start_offset = i
def _get_start(self):
return self._start_offset
start_offset = property(_get_start, _set_start)
def _get_end(self):
return self._end_offset
end_offset = property(_get_end)
def _get_block_size(self):
return self._block_size
block_size = property(_get_block_size)
def seek(self, offset):
self._fhandle.seek(offset)
def read(self, size):
return self._fhandle.read(size)
def tell(self):
return self._fhandle.tell()
def reset(self):
self._fhandle.seek(self.start_offset)
def reader(self):
self.reset()
while True:
cur_loc = self._fhandle.tell()
if self.end_offset and cur_loc > self.end_offset:
break
elif self.end_offset and self.end_offset - cur_loc < self.block_size:
chunk_size = self.end_offset - cur_loc
else:
chunk_size = self.block_size
buf = self.read(chunk_size)
if not buf:
break
yield buf
def read_block(self, block):
"""Read complete PEB data from file.
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset)
return self._fhandle.read(block.size)
def read_block_data(self, block):
"""Read LEB data from file
Argument:
Obj:block -- Block data is desired for.
"""
self.seek(block.file_offset + block.ec_hdr.data_offset)
buf = self._fhandle.read(block.size - block.ec_hdr.data_offset - block.vid_hdr.data_pa | d)
return buf
class leb_virtual_file():
def __init__(self, ubi, volume):
self._ubi = ubi
self._volume = volume
self._blocks = sort.by_leb( | self._volume.get_blocks(self._ubi.blocks))
self._seek = 0
self.leb_data_size = len(self._blocks) * self._ubi.leb_size
self._last_leb = -1
self._last_buf = ''
def read(self, i):
buf = ''
leb = int(self.tell() / self._ubi.leb_size)
offset = self.tell() % self._ubi.leb_size
if leb == self._last_leb:
self.seek(self.tell() + i)
return self._last_buf[offset:offset + i]
else:
buf = self._ubi.file.read_block_data(self._ubi.blocks[self._blocks[leb]])
self._last_buf = buf
self._last_leb = leb
self.seek(self.tell() + i)
return buf[offset:offset + i]
def reset(self):
self.seek(0)
def seek(self, offset):
self._seek = offset
def tell(self):
return self._seek
def reader(self):
last_leb = 0
for block in self._blocks:
while 0 != (self._ubi.blocks[block].leb_num - last_leb):
last_leb += 1
yield '\xff' * self._ubi.leb_size
last_leb += 1
yield self._ubi.file.read_block_data(self._ubi.blocks[block])
|
timothycrosley/pies | pies2overrides/html/parser.py | Python | mit | 65 | 0 | from | __future__ import absolute_import
from HTMLParser imp | ort *
|
TouchBack/leap-gl-test | gl.py | Python | gpl-3.0 | 3,061 | 0.031036 | from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
import Leap
from Leap import *
from time import sleep
window = 0 | # glut window number
width, height = 800, 600 | # window size
x,y = 0,0
leap = None
r = 1.0
g = 0.0
b = 0.0
def draw_rect(x, y, width, height):
glBegin(GL_QUADS) # start drawing a rectangle
glVertex2f(x, y) # bottom left point
glVertex2f(x + width, y) # bottom right point
glVertex2f(x + width, y + height) # top right point
glVertex2f(x, y + height) # top left point
glEnd()
def refresh2d(width, height):
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()
glOrtho(0.0, width, 0.0, height, 0.0, 1.0)
glMatrixMode (GL_MODELVIEW)
glLoadIdentity()
def map_range(num, min1, max1, min2, max2, clamp=True):
percent = (num-min1)/(max1-min1)
if clamp:
percent = 0 if percent < 0 else percent
percent = 1 if percent > 1 else percent
return min2 + (max2-min2)*percent
def draw(): # ondraw is called all the time
global leap, width, height, r, g, b
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # clear the screen
glLoadIdentity() # reset position
refresh2d(width, height) # set mode to 2d
frame = leap.frame()
# if len(frame.fingers) > 0:
# x = frame.fingers[0].tip_position.x
# else:
# x = 0
#print "Getting gestures"
for gesture in frame.gestures():
#print "GESTURE"
if gesture.type == Leap.Gesture.TYPE_SWIPE:
swipe = SwipeGesture(gesture)
if swipe.state == Leap.Gesture.STATE_STOP:
old_r = r
old_g = g
old_b = b
r = old_b
g = old_r
b = old_g
#print "Red: %f -> %f" % (old_r, r)
for finger in frame.fingers:
f_x = map_range(finger.tip_position.x, -255,255, 0, width, False)
f_y = map_range(finger.tip_position.y, 0,512, 0, height, False)
z_mult = map_range(finger.tip_position.z, -255, 255, 1.0, 0.0)
glColor3f(r*z_mult,g*z_mult,b*z_mult) # set color
draw_rect(f_x, f_y, 10, 10) # draw rect
glutSwapBuffers()
def gl_init():
global leap
# init leap first!!!
leap = Leap.Controller()
leap.enable_gesture(Leap.Gesture.TYPE_SWIPE);
# initialization
glutInit() # initialize glut
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(width, height) # set window size
glutInitWindowPosition(0, 0) # set window position
window = glutCreateWindow("noobtuts.com") # create window with title
glutDisplayFunc(draw) # set draw function callback
glutIdleFunc(draw) # draw all the time
glutMainLoop() # start everything
gl_init() |
abhattad4/Digi-Menu | tests/model_fields/tests.py | Python | bsd-3-clause | 35,232 | 0.000539 | from __future__ import unicode_literals
import datetime
import unittest
from decimal import Decimal
from django import forms, test
from django.core import checks, validators
from django.core.exceptions import ValidationError
from django.db import IntegrityError, connection, models, transaction
from django.db.models.fields import (
NOT_PROVIDED, AutoField, BigIntegerField, BinaryField, BooleanField,
CharField, CommaSeparatedIntegerField, DateField, DateTimeField,
DecimalField, EmailField, FilePathField, FloatField, GenericIPAddressField,
IntegerField, IPAddressField, NullBooleanField, PositiveIntegerField,
PositiveSmallIntegerField, SlugField, SmallIntegerField, TextField,
TimeField, URLField,
)
from django.db.models.fields.files import FileField, ImageField
from django.utils import six
from django.utils.functional import lazy
from .models import (
Bar, BigD, BigIntegerModel, BigS, BooleanModel, DataModel, DateTimeModel,
Document, FksToBooleans, FkToChar, FloatModel, Foo, GenericIPAddress,
IntegerModel, NullBooleanModel, PositiveIntegerModel,
PositiveSmallIntegerModel, Post, PrimaryKeyCharModel, RenamedField,
SmallIntegerModel, VerboseNameField, Whiz, WhizIter, WhizIterEmpty,
)
class BasicFieldTests(test.TestCase):
def test_show_hidden_initial(self):
"""
Regression test for #12913. Make sure fields with choices respect
show_hidden_initial as a kwarg to models.Field.formfield()
"""
choices = [(0, 0), (1, 1)]
model_field = models.Field(choices=choices)
form_field = model_field.formfield(show_hidden_initial=True)
self.assertTrue(form_field.show_hidden_initial)
form_field = model_field.formfield(show_hidden_initial=Fa | lse)
self.assertFalse(form_field.show_hidden_initial)
def test_nullbooleanfield_blank(self):
"""
Regression test for #13071: NullBooleanField should not throw
a validation error when given a value of None.
"""
nullboolean = NullBoolea | nModel(nbfield=None)
try:
nullboolean.full_clean()
except ValidationError as e:
self.fail("NullBooleanField failed validation with value of None: %s" % e.messages)
def test_field_repr(self):
"""
Regression test for #5931: __repr__ of a field also displays its name
"""
f = Foo._meta.get_field('a')
self.assertEqual(repr(f), '<django.db.models.fields.CharField: a>')
f = models.fields.CharField()
self.assertEqual(repr(f), '<django.db.models.fields.CharField>')
def test_field_name(self):
"""
Regression test for #14695: explicitly defined field name overwritten
by model's attribute name.
"""
instance = RenamedField()
self.assertTrue(hasattr(instance, 'get_fieldname_display'))
self.assertFalse(hasattr(instance, 'get_modelname_display'))
def test_field_verbose_name(self):
m = VerboseNameField
for i in range(1, 25):
self.assertEqual(m._meta.get_field('field%d' % i).verbose_name,
'verbose field%d' % i)
self.assertEqual(m._meta.get_field('id').verbose_name, 'verbose pk')
def test_float_validates_object(self):
instance = FloatModel(size=2.5)
# Try setting float field to unsaved object
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Set value to valid and save
instance.size = 2.5
instance.save()
self.assertTrue(instance.id)
# Set field to object on saved instance
instance.size = instance
with transaction.atomic():
with self.assertRaises(TypeError):
instance.save()
# Try setting field to object on retrieved object
obj = FloatModel.objects.get(pk=instance.id)
obj.size = obj
with self.assertRaises(TypeError):
obj.save()
def test_choices_form_class(self):
"""Can supply a custom choices form class. Regression for #20999."""
choices = [('a', 'a')]
field = models.CharField(choices=choices)
klass = forms.TypedMultipleChoiceField
self.assertIsInstance(field.formfield(choices_form_class=klass), klass)
def test_field_str(self):
from django.utils.encoding import force_str
f = Foo._meta.get_field('a')
self.assertEqual(force_str(f), "model_fields.Foo.a")
class DecimalFieldTests(test.TestCase):
def test_to_python(self):
f = models.DecimalField(max_digits=4, decimal_places=2)
self.assertEqual(f.to_python(3), Decimal("3"))
self.assertEqual(f.to_python("3.14"), Decimal("3.14"))
self.assertRaises(ValidationError, f.to_python, "abc")
def test_default(self):
f = models.DecimalField(default=Decimal("0.00"))
self.assertEqual(f.get_default(), Decimal("0.00"))
def test_format(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f._format(f.to_python(2)), '2.0')
self.assertEqual(f._format(f.to_python('2.6')), '2.6')
self.assertEqual(f._format(None), None)
def test_get_db_prep_lookup(self):
f = models.DecimalField(max_digits=5, decimal_places=1)
self.assertEqual(f.get_db_prep_lookup('exact', None, connection=connection), [None])
def test_filter_with_strings(self):
"""
We should be able to filter decimal fields using strings (#8023)
"""
Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
self.assertEqual(list(Foo.objects.filter(d='1.23')), [])
def test_save_without_float_conversion(self):
"""
Ensure decimals don't go through a corrupting float conversion during
save (#5079).
"""
bd = BigD(d="12.9")
bd.save()
bd = BigD.objects.get(pk=bd.pk)
self.assertEqual(bd.d, Decimal("12.9"))
def test_lookup_really_big_value(self):
"""
Ensure that really big values can be used in a filter statement, even
with older Python versions.
"""
# This should not crash. That counts as a win for our purposes.
Foo.objects.filter(d__gte=100000000000)
class ForeignKeyTests(test.TestCase):
def test_callable_default(self):
"""Test the use of a lazy callable for ForeignKey.default"""
a = Foo.objects.create(id=1, a='abc', d=Decimal("12.34"))
b = Bar.objects.create(b="bcd")
self.assertEqual(b.a, a)
@test.skipIfDBFeature('interprets_empty_strings_as_nulls')
def test_empty_string_fk(self):
"""
Test that foreign key values to empty strings don't get converted
to None (#19299)
"""
char_model_empty = PrimaryKeyCharModel.objects.create(string='')
fk_model_empty = FkToChar.objects.create(out=char_model_empty)
fk_model_empty = FkToChar.objects.select_related('out').get(id=fk_model_empty.pk)
self.assertEqual(fk_model_empty.out, char_model_empty)
def test_warning_when_unique_true_on_fk(self):
class FKUniqueTrue(models.Model):
fk_field = models.ForeignKey(Foo, unique=True)
model = FKUniqueTrue()
expected_warnings = [
checks.Warning(
'Setting unique=True on a ForeignKey has the same effect as using a OneToOneField.',
hint='ForeignKey(unique=True) is usually better served by a OneToOneField.',
obj=FKUniqueTrue.fk_field.field,
id='fields.W342',
)
]
warnings = model.check()
self.assertEqual(warnings, expected_warnings)
def test_related_name_converted_to_text(self):
rel_name = Bar._meta.get_field('a').rel.related_name
self.assertIsInstance(rel_name, six.text_type)
class DateTimeFieldTests(test.TestCase):
def test_datetimefield_to_python_usecs(self):
"""DateTimeField.to_python should support usecs"""
f = models.DateTimeField()
self.asser |
ldjebran/robottelo | robottelo/cli/module_stream.py | Python | gpl-3.0 | 494 | 0 | # -*- encoding: utf-8 -*-
"""
Usage::
hamm | er module-stream [OPTIONS] SUBCOMMAND [ARG] ...
Parameters::
SUBCOMMAND subcommand
[ARG] ... subcommand arguments
Subcommands::
info Show a module-stream
list List module-streams
"""
from robottelo.cli.base import Base
class ModuleStream(Base):
"""
Manipulates module-stream command.
"""
command_base = 'modu | le-stream'
|
archangd/leetcode | 1.two_sum.py | Python | lgpl-3.0 | 350 | 0 | class Solution(object):
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
temp = {}
for i, n in enumerate(nums):
if (target - n) in temp:
return [temp[target | - n], i]
else:
temp[n] = i
| |
Endika/hr | hr_report_payroll_attendance_summary/wizard/__init__.py | Python | agpl-3.0 | 850 | 0 | # -*- coding:utf-8 -*-
#
#
# Copyright (C) 2013 Michael Telahun Makonnen <mmakonnen@gmail.com>.
# All Rights Reserved.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either ver | sion 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# |
#
from . import attendance_summary
|
City-of-Helsinki/devheldev | users/models.py | Python | agpl-3.0 | 107 | 0 | from django.db import models
from helusers.m | odels import AbstractUser
|
class User(AbstractUser):
pass
|
LxMLS/lxmls-toolkit | lxmls/sequences/bak/basic_feature.py | Python | mit | 2,225 | 0 | import id_feature as idf
class BasicFeatures(idf.IDFeatures):
def __init__(self, dataset):
idf.IDFeatures.__init__(self, dataset)
# def add_next_word_context_feature(self,next_word,tag,idx):
# feat = "next_word:%s::%s"%(next_word,tag)
# nr_feat = self.add_feature(feat)
# | idx.append(nr_feat)
# return idx
# def add_prev_word_context_feature(self,prev_word,tag,idx):
# feat = "prev_word:%s::%s"%(prev_word,tag)
# nr_feat = self.add_feature(feat)
# idx.append(nr_feat)
# | return idx
def add_node_feature(self, seq, pos, y, idx):
x = seq.x[pos]
word = self.dataset.int_to_word[x]
if self.dataset.word_counts[x] > 5:
y_name = self.dataset.int_to_pos[y]
word = self.dataset.int_to_word[x]
feat = "id:%s::%s" % (word, y_name)
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
else:
# Check for upercase
if not unicode.islower(word):
feat = "upercased::%s" % y
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
# Check for number
if not unicode.isalpha(word):
feat = "number::%s" % y
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
# Check for number
if unicode.find(word, "-") != -1:
feat = "hyphen::%s" % y
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
# Suffixes
max_suffix = 4
for i in xrange(max_suffix):
if len(word) > i+1:
suffix = word[-(i+1):]
feat = "suffix:%s::%s" % (suffix, y)
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
# Prefixes
max_prefix = 4
for i in xrange(max_prefix):
if len(word) > i+1:
prefix = word[:i+1]
feat = "prefix:%s::%s" % (prefix, y)
nr_feat = self.add_feature(feat)
idx.append(nr_feat)
return idx
|
rohitranjan1991/home-assistant | homeassistant/components/homekit_controller/lock.py | Python | mit | 4,317 | 0.000695 | """Support for HomeKit Controller locks."""
from __future__ import annotations
from typing import Any
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import Service, ServicesTypes
from homeassistant.components.lock import STATE_JAMMED, LockEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
STATE_LOCKED,
STATE_UNKNOWN,
STATE_UNLOCKED,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import KNOWN_DEVICES, HomeKitEntity
CURRENT_STATE_MAP = {
0: STATE_UNLOCKED,
1: STATE_LOCKED,
2: STATE_JAMMED,
3: STATE_UNKNOWN,
}
TARGET_STATE_MAP = {STATE_UNLOCKED: 0, STATE_LOCKED: 1}
REVERSED_TARGET_STATE_MAP = {v: k for k, v in TARGET_STATE_MAP.items()}
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Set up Homekit lock."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(service: Service) -> bool:
if service.type != ServicesTypes.LOCK_MECHANISM:
return False
info = {"aid": service.accessory.aid, "iid": service.iid}
async_add_entities([HomeKitLock(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitLock(HomeKitEntity, LockEntity):
"""Representation of a HomeKit Controller Lock."""
def get_characteristic_types(self) -> list[str]:
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE,
CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE,
CharacteristicsTypes.BATTERY_LEVEL,
]
@property
def is_locked(self) -> bool | None:
"""Return true if device is locked."""
value = self.service.value(CharacteristicsTypes.LOCK_MECHANIS | M_CURRENT_STATE)
if CURRENT_STATE_MAP | [value] == STATE_UNKNOWN:
return None
return CURRENT_STATE_MAP[value] == STATE_LOCKED
@property
def is_locking(self) -> bool:
"""Return true if device is locking."""
current_value = self.service.value(
CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE
)
target_value = self.service.value(
CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE
)
return (
CURRENT_STATE_MAP[current_value] == STATE_UNLOCKED
and REVERSED_TARGET_STATE_MAP.get(target_value) == STATE_LOCKED
)
@property
def is_unlocking(self) -> bool:
"""Return true if device is unlocking."""
current_value = self.service.value(
CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE
)
target_value = self.service.value(
CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE
)
return (
CURRENT_STATE_MAP[current_value] == STATE_LOCKED
and REVERSED_TARGET_STATE_MAP.get(target_value) == STATE_UNLOCKED
)
@property
def is_jammed(self) -> bool:
"""Return true if device is jammed."""
value = self.service.value(CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE)
return CURRENT_STATE_MAP[value] == STATE_JAMMED
async def async_lock(self, **kwargs: Any) -> None:
"""Lock the device."""
await self._set_lock_state(STATE_LOCKED)
async def async_unlock(self, **kwargs: Any) -> None:
"""Unlock the device."""
await self._set_lock_state(STATE_UNLOCKED)
async def _set_lock_state(self, state: str) -> None:
"""Send state command."""
await self.async_put_characteristics(
{CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: TARGET_STATE_MAP[state]}
)
@property
def extra_state_attributes(self) -> dict[str, Any]:
"""Return the optional state attributes."""
attributes = {}
battery_level = self.service.value(CharacteristicsTypes.BATTERY_LEVEL)
if battery_level:
attributes[ATTR_BATTERY_LEVEL] = battery_level
return attributes
|
vv-p/jira-reports | filters/filters.py | Python | mit | 977 | 0.009212 | import re
import os
def g | et_emoji_content(filename):
full_filename = os.path.join(os.path.dirname(__file__), 'emojis', filename)
with open(full_filename, 'r') as fp:
return fp.read()
def fix_emoji(value):
"""
Replace some text emojis with pictures
"""
emojis = {
'(+)': get_emoji_content('plus.html'),
'(-)': get_emoji_content('minus.html'),
'(?)': get_emoji_content('quest | ion.html'),
'(!)': get_emoji_content('alarm.html'),
'(/)': get_emoji_content('check.html'),
}
for e in emojis:
value = value.replace(e, emojis[e])
return value
def cleanup(value):
"""
Remove {code}...{/code} and {noformat}...{noformat} fragments from worklog comment
:param value: worklog comment text
:return: cleaned worklog comment text
"""
value = re.sub('\{code.*?\}.*?\{.*?code\}', ' ', value, 0, re.S)
return re.sub('\{noformat.*?\}.*?\{noformat\}', ' ', value, 0, re.S)
|
kpbochenek/empireofcode | common_words.py | Python | apache-2.0 | 640 | 0.009375 | #kpbochenek@gmail.com
def common_words(first, second):
dd = set( | )
for s in first.split(","): dd.add(s)
return ",".join(sorted([w for w in second.split(",") if w in dd]))
if __name__ == '__main__':
# These "asserts" using only for self-checking and not necessary for auto-testing
assert common_words("hello,world", "hello,earth") == "hello", "Hello"
assert common_words("one,two,three", "four,five,six") == "", "Too different"
assert common_words("one,two,three", "four,five,one,two,six,three") == "one,three,two", "1 2 | 3"
print("Coding complete? Click 'Check' to review your tests and earn cool rewards!")
|
jrg365/gpytorch | gpytorch/likelihoods/likelihood_list.py | Python | mit | 1,999 | 0.004002 | #! /usr/bin/env python3
from torch.nn import ModuleList
from gpytorch.likelihoods import Likelihood
def _get_tuple_args_(*args):
for arg in args:
if isinstance(arg, tuple):
yield arg
else:
yield (arg,)
class LikelihoodList(Likelihood):
def __init__(self, *likelihoods):
super().__init__()
self.likelihoods = ModuleList(likelihoods)
def expected_log_prob(self, *args, **kwargs):
return [
likelihood.expected_log_prob(*args_, **kwargs)
for likelihood, args_ in zip(self.likelihoods, _get_tuple_args_(*args))
]
def forward(self, *args, **kwargs):
if "noise" in kwargs:
noise = kwargs.pop("noise")
# if noise kwarg is passed, assume it's an iterable of noise tensors
return [
likelihood.forward(*args_, {**kwargs, "noise": noise_})
for likelihood, | args_, noise_ in zip(self.li | kelihoods, _get_tuple_args_(*args), noise)
]
else:
return [
likelihood.forward(*args_, **kwargs)
for likelihood, args_ in zip(self.likelihoods, _get_tuple_args_(*args))
]
def pyro_sample_output(self, *args, **kwargs):
return [
likelihood.pyro_sample_output(*args_, **kwargs)
for likelihood, args_ in zip(self.likelihoods, _get_tuple_args_(*args))
]
def __call__(self, *args, **kwargs):
if "noise" in kwargs:
noise = kwargs.pop("noise")
# if noise kwarg is passed, assume it's an iterable of noise tensors
return [
likelihood(*args_, {**kwargs, "noise": noise_})
for likelihood, args_, noise_ in zip(self.likelihoods, _get_tuple_args_(*args), noise)
]
else:
return [
likelihood(*args_, **kwargs) for likelihood, args_ in zip(self.likelihoods, _get_tuple_args_(*args))
]
|
unomena/django-saml2-sp | saml2sp/xml_render.py | Python | bsd-3-clause | 899 | 0.002225 | """
Functions for creating XML output.
"""
import logging
import string
from xml_signing import get_signature_xml
from xml_templates import AUTHN_REQUEST
def _get_authnrequest_xml(template, parameters, signed=False):
# Reset signature.
params = {}
params.update(parameters)
params['AUTHN_REQUEST_SIGNATURE'] = ''
template = string.Template(template)
unsigned = template.substitute(params)
logging.debug('Unsigned:')
logging.debug(unsigned)
if not signed:
return unsigned
# Sign it.
signature_xml = get_signature_xml(unsigned, params['AUTHN_REQUEST_ID'])
params['AUTHN_REQUEST_SIGNATURE'] = signature_xml
signed = template.substitute(params)
logging.debug('Signed:')
logging.debug(signed)
return signed
def get_authnrequest_xml(parameters, signed=False):
return _get_authnrequest | _xm | l(AUTHN_REQUEST, parameters, signed)
|
Distrotech/bzr | bzrlib/tests/test_selftest.py | Python | gpl-2.0 | 153,857 | 0.002145 | # Copyright (C) 2005-2011 Canonical Ltd
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""Tests for the test framework."""
from cStringIO import StringIO
import gc
import doctest
import os
import signal
import sys
import threading
import time
import unittest
import warnings
from testtools import (
ExtendedToOriginalDecorator,
MultiTestResult,
)
from testtools.content import Content
from testtools.content_type import ContentType
from testtools.matchers import (
DocTestMatches,
Equals,
)
import testtools.testresult.doubles
import bzrlib
from bzrlib import (
branchbuilder,
bzrdir,
controldir,
errors,
hooks,
lockdir,
memorytree,
osutils,
remote,
repository,
symbol_versioning,
| tests,
transport,
workingt | ree,
workingtree_3,
workingtree_4,
)
from bzrlib.repofmt import (
groupcompress_repo,
)
from bzrlib.symbol_versioning import (
deprecated_function,
deprecated_in,
deprecated_method,
)
from bzrlib.tests import (
features,
test_lsprof,
test_server,
TestUtil,
)
from bzrlib.trace import note, mutter
from bzrlib.transport import memory
def _test_ids(test_suite):
"""Get the ids for the tests in a test suite."""
return [t.id() for t in tests.iter_suite_tests(test_suite)]
class MetaTestLog(tests.TestCase):
def test_logging(self):
"""Test logs are captured when a test fails."""
self.log('a test message')
details = self.getDetails()
log = details['log']
self.assertThat(log.content_type, Equals(ContentType(
"text", "plain", {"charset": "utf8"})))
self.assertThat(u"".join(log.iter_text()), Equals(self.get_log()))
self.assertThat(self.get_log(),
DocTestMatches(u"...a test message\n", doctest.ELLIPSIS))
class TestTreeShape(tests.TestCaseInTempDir):
def test_unicode_paths(self):
self.requireFeature(features.UnicodeFilenameFeature)
filename = u'hell\u00d8'
self.build_tree_contents([(filename, 'contents of hello')])
self.assertPathExists(filename)
class TestClassesAvailable(tests.TestCase):
"""As a convenience we expose Test* classes from bzrlib.tests"""
def test_test_case(self):
from bzrlib.tests import TestCase
def test_test_loader(self):
from bzrlib.tests import TestLoader
def test_test_suite(self):
from bzrlib.tests import TestSuite
class TestTransportScenarios(tests.TestCase):
"""A group of tests that test the transport implementation adaption core.
This is a meta test that the tests are applied to all available
transports.
This will be generalised in the future which is why it is in this
test file even though it is specific to transport tests at the moment.
"""
def test_get_transport_permutations(self):
# this checks that get_test_permutations defined by the module is
# called by the get_transport_test_permutations function.
class MockModule(object):
def get_test_permutations(self):
return sample_permutation
sample_permutation = [(1,2), (3,4)]
from bzrlib.tests.per_transport import get_transport_test_permutations
self.assertEqual(sample_permutation,
get_transport_test_permutations(MockModule()))
def test_scenarios_include_all_modules(self):
# this checks that the scenario generator returns as many permutations
# as there are in all the registered transport modules - we assume if
# this matches its probably doing the right thing especially in
# combination with the tests for setting the right classes below.
from bzrlib.tests.per_transport import transport_test_permutations
from bzrlib.transport import _get_transport_modules
modules = _get_transport_modules()
permutation_count = 0
for module in modules:
try:
permutation_count += len(reduce(getattr,
(module + ".get_test_permutations").split('.')[1:],
__import__(module))())
except errors.DependencyNotPresent:
pass
scenarios = transport_test_permutations()
self.assertEqual(permutation_count, len(scenarios))
def test_scenarios_include_transport_class(self):
# This test used to know about all the possible transports and the
# order they were returned but that seems overly brittle (mbp
# 20060307)
from bzrlib.tests.per_transport import transport_test_permutations
scenarios = transport_test_permutations()
# there are at least that many builtin transports
self.assertTrue(len(scenarios) > 6)
one_scenario = scenarios[0]
self.assertIsInstance(one_scenario[0], str)
self.assertTrue(issubclass(one_scenario[1]["transport_class"],
bzrlib.transport.Transport))
self.assertTrue(issubclass(one_scenario[1]["transport_server"],
bzrlib.transport.Server))
class TestBranchScenarios(tests.TestCase):
def test_scenarios(self):
# check that constructor parameters are passed through to the adapted
# test.
from bzrlib.tests.per_branch import make_scenarios
server1 = "a"
server2 = "b"
formats = [("c", "C"), ("d", "D")]
scenarios = make_scenarios(server1, server2, formats)
self.assertEqual(2, len(scenarios))
self.assertEqual([
('str',
{'branch_format': 'c',
'bzrdir_format': 'C',
'transport_readonly_server': 'b',
'transport_server': 'a'}),
('str',
{'branch_format': 'd',
'bzrdir_format': 'D',
'transport_readonly_server': 'b',
'transport_server': 'a'})],
scenarios)
class TestBzrDirScenarios(tests.TestCase):
def test_scenarios(self):
# check that constructor parameters are passed through to the adapted
# test.
from bzrlib.tests.per_controldir import make_scenarios
vfs_factory = "v"
server1 = "a"
server2 = "b"
formats = ["c", "d"]
scenarios = make_scenarios(vfs_factory, server1, server2, formats)
self.assertEqual([
('str',
{'bzrdir_format': 'c',
'transport_readonly_server': 'b',
'transport_server': 'a',
'vfs_transport_factory': 'v'}),
('str',
{'bzrdir_format': 'd',
'transport_readonly_server': 'b',
'transport_server': 'a',
'vfs_transport_factory': 'v'})],
scenarios)
class TestRepositoryScenarios(tests.TestCase):
def test_formats_to_scenarios(self):
from bzrlib.tests.per_repository import formats_to_scenarios
formats = [("(c)", remote.RemoteRepositoryFormat()),
("(d)", repository.format_registry.get(
'Bazaar repository format 2a (needs bzr 1.16 or later)\n'))]
no_vfs_scenarios = formats_to_scenarios(formats, "server", "readonly",
None)
vfs_scenarios = formats_to_scenarios(formats, "server", "readonly",
vfs_transport_factory="vfs")
# no_vfs generate scenarios without vfs_transport_factory
expected = [
('RemoteRepositoryFormat( |
sixuanwang/SAMSaaS | wirecloud-develop/src/wirecloud/platform/wiring/utils.py | Python | gpl-2.0 | 2,913 | 0.003434 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2014 CoNWeT Lab., Universidad Politécnica de Madrid
# This file is part of Wirecloud.
# Wirecloud is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Wirecloud is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
# You should have received a copy of the GNU Affero General Public License
# along with Wirecloud. If not, see <http://www.gnu.org/licenses/>.
from django.template import loader, Context
from wirecloud.commons.utils.http import get_absolute_static_url
from wirecloud.platform.plugins import get_operator_api_extensions
def remove_related_iwidget_connections(wiring, iwidget):
connections_to_remove = []
for index, connection in enumerate(wiring['connections']):
if (connection['source']['type'] == 'iwidget' and connection['source']['id'] == iwidget.id) or (connection['target']['type'] == 'iwidget' and connection['target']['id'] == iwidget.id):
connection['index'] = index
connections_to_remove.append(connection)
view_available = 'views' in wiring and len(wiring['views']) > 0
if | view_available and ('iwidgets' in wiring['views'][0]) and (iwidget.id in wiring['views'][0]['iwidgets']):
del wiring['views'][0]['iwidgets'][iwidget.id]
connection_view_available = view_available and 'connections' in wiring['views'][0]
for connection in connections_to_remove:
wiring['connections'].remove(connection)
if connection_view_available and len(wiring[ | 'views'][0]['connections']) > connection['index']:
del wiring['views'][0]['connections'][connection['index']]
def get_operator_cache_key(operator, domain, mode):
return '_operator_xhtml/%s/%s/%s?mode=%s' % (operator.cache_version, domain, operator.id, mode)
def generate_xhtml_operator_code(js_files, base_url, request, requirements, mode):
api_url = get_absolute_static_url('js/WirecloudAPI/WirecloudOperatorAPI.js', request=request)
api_common_url = get_absolute_static_url('js/WirecloudAPI/WirecloudAPICommon.js', request=request)
api_closure_url = get_absolute_static_url('js/WirecloudAPI/WirecloudAPIClosure.js', request=request)
api_js_files = [get_absolute_static_url(url, request=request) for url in get_operator_api_extensions(mode, requirements)]
api_js = [api_url, api_common_url] + api_js_files + [api_closure_url]
t = loader.get_template('wirecloud/operator_xhtml.html')
c = Context({'base_url': base_url, 'js_files': api_js + js_files})
xhtml = t.render(c)
return xhtml
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.