repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
| prefix
stringlengths 0
8.16k
| middle
stringlengths 3
512
| suffix
stringlengths 0
8.17k
|
|---|---|---|---|---|---|---|---|---|
aabilio/PyDownTV
|
Servers/tve.py
|
Python
|
gpl-3.0
| 6,285
| 0.006712
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of PyDownTV.
#
# PyDownTV is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PyDownTV is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PyDownTV. If not, see <http://www.gnu.org/licenses/>.
# Módulo para descargar todos los vídeos de la web de rtve.es ("A la carta" o no)
# Antes era el módulo de tvalacarta.py modificado para dar soporte a todos los vídeos
__author__="aabilio"
__date__ ="$31-mar-2011 11:35:37$"
# Puedes importar Descargar para utilizar su método descargar al que se le pasa una
# url y delvuelve un strema con el contenido de lo descargado:
# ejemplo:
# D = Descargar(url)
# stream = D.descargar()
import sys
from Descargar import Descargar
from utiles import salir, formatearNombre, printt
class TVE(object): # Identificativo del canal
'''
Clase para manejar los vídeos de la RTVE (todos).
'''
def __init__(self, url=""):
'''
Solo reseñar que reibe una URL válida de Televisión Española como parámetro
'''
self._URL_recibida = url
def getURL(self):
'''
Obtener la URL de televisión española
'''
return self._URL_recibida
def setURL(self, url):
'''
return la URL válida de TVE que se le pasa a la clase
'''
self._URL_recibida = url
url = property(getURL, setURL)
# Funciones privadas que ayuden a procesarDescarga(self):
def __descHTML(self, url2down):
''' Método que utiliza la clase descargar para descargar el HTML '''
D = Descargar(url2down)
return D.descargar()
def __descXML(self, url2down):
''' Método que utiliza la clase descargar para descargar XML '''
D = Descargar(url2down)
return D.descargar()
def procesarDescarga(self):
'''
Procesa lo necesario para obtener la url final del vídeo a descargar y devuelve
esta y el nombre como se quiere que se descarge el archivo de la siguiente forma:
return [ruta_url, nombre]
Si no se quiere especificar un nombre para el archivo resultante en disco, o no se
conoce un procedimiento para obtener este automáticamente se utilizará:
return [ruta_url, None]
Y el método de Descargar que descarga utilizará el nombre por defecto según la url.
'''
# Primero: nos quedamos con le id dependiendo si el user metio la url con
# una barra (/) final o no y si tiene extensión (no es alacarta)
videoID = self._URL_recibida.split('/')[-1]
if videoID == "":
videoID = self._URL_recibida.split('/')[-2]
elif videoID.find(".shtml") != -1 or videoID.find(".html") != -1 or \
videoID.find(".html") != -1:
videoID = videoID.split('.')[0]
printt(u"[INFO] ID del Vídeo :", videoID)
xmlURL = "www.rtve.es/swf/data/es/videos/video/" + videoID[-1] \
+ "/" + videoID[-2] + "/" + videoID[-3] \
+ "/" + videoID[-4] + "/" + videoID + ".xml"
printt(u"[INFO] Url de xml :", xmlURL)
#print "[+] Procesando Descarga"
sourceXML = self.__descXML(xmlURL)
if sourceXML == -1: # Comprobar si existe (No es tve a la carta)
sourceHTML = self.__descHTML(self._URL_recibida)
if sourceHTML.find("<div id=\"video") != -1:
id = sourceHTML.split("<div id=\"video")[1].split("\"")[0]
elif sourceHTML.find("<div id=\"vid") != -1:
id = sourceHTML.split("<div id=\"vid")[1].split("\"")[0]
else:
salir(u"[!] ERROR al generear el nuevo id")
xmlURL = "www.rtve.es/swf/data/es/videos/video/" + id[-1] \
+ "/" + id[-2] + "/" + id[-3] \
+ "/" + id[-4] + "/" + id + ".xml"
sourceXML = self.__descXML(xmlURL)
printt(u"[INFO] Nuevo vídeo ID:", id)
printt(u"[INFO] Nuevo url de xml:", xmlURL)
# Ahora la url final del video puede estar entre las etiquetas <file></file>
# o puede que tengamos que dar un rodeo
if sourceXML.find("<file>") != -1 and sourceXML.find
|
("</file>"): # Contiene la URL
urlVideo = sourceXML.split("<file>")[1].split("</file>")[0]
elif sourceXML.find("assetDataId::") != -1: # Dar el rodeo
idAsset = sourceXML.split("assetDataId::")[1].split("\"/>")[0]
|
urlXMLasset = "www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/" + idAsset[-1] \
+ "/" + idAsset[-2] + "/" + idAsset[-3] \
+ "/" + idAsset[-4] + "/ASSET_DATA_VIDEO-" + idAsset + ".xml"
sourceAssetXML = self.__descXML(urlXMLasset)
urlInSourceAssetXML = sourceAssetXML.split("defaultLocation=\"")[1].split("\"")[0]
#print "urllInSourceAssetXML =", urlInSourceAssetXML
# Es flv o mp4?
if urlInSourceAssetXML.find("/flv/") != -1:
urlVideo = "http://www.rtve.es/resources/TE_NGVA/flv/" \
+ urlInSourceAssetXML.split("/flv/")[1]
elif urlInSourceAssetXML.find("/mp4/") != -1:
urlVideo = "http://www.rtve.es/resources/TE_NGVA/mp4/" \
+ urlInSourceAssetXML.split("/mp4/")[1]
else:
salir(u"Vídeo no encontrado")
else:
salir(u"No se encuentra la URL del vídeo")
# Nombre con el que se guardará la descarga:
extension = '.' + urlVideo.split('.')[-1]
name = sourceXML.split("<name>")[1].split("</name")[0] + extension
name = formatearNombre(name)
return [urlVideo, name]
|
dhluong90/PokemonGo-Bot
|
pokemongo_bot/cell_workers/move_to_fort.py
|
Python
|
mit
| 5,477
| 0.003104
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from pokemongo_bot import inventory
from pokemongo_bot.constants import Constants
from pokemongo_bot.walkers.walker_factory import walker_factory
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.base_task import BaseTask
from utils import distance, format_dist, fort_details
class MoveToFort(BaseTask):
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.lure_distance = 0
self.lure_attraction = self.config.get("lure_attraction", True)
self.lure_max_distance = self.config.get("lure_max_distance", 2000)
self.ignore_item_count = self.config.get("ignore_item_count", False)
self.walker = self.config.get('walker', 'StepWalker')
def should_run(self):
has_space_for_loot = i
|
nventory.Items.has_space_for_loot()
if not has_space_for_loot and not self.ignore_item_count:
self.emit_event(
'inventory_full',
formatted="Inventory is full. You might want to
|
change your config to recycle more items if this message appears consistently."
)
return has_space_for_loot or self.ignore_item_count or self.bot.softban
def is_attracted(self):
return (self.lure_distance > 0)
def work(self):
if not self.should_run():
return WorkerResult.SUCCESS
nearest_fort = self.get_nearest_fort()
if nearest_fort is None:
return WorkerResult.SUCCESS
lat = nearest_fort['latitude']
lng = nearest_fort['longitude']
fortID = nearest_fort['id']
details = fort_details(self.bot, fortID, lat, lng)
fort_name = details.get('name', 'Unknown')
unit = self.bot.config.distance_unit # Unit to use when printing formatted distance
dist = distance(
self.bot.position[0],
self.bot.position[1],
lat,
lng
)
noised_dist = distance(
self.bot.noised_position[0],
self.bot.noised_position[1],
lat,
lng
)
moving = noised_dist > Constants.MAX_DISTANCE_FORT_IS_REACHABLE if self.bot.config.replicate_gps_xy_noise else dist > Constants.MAX_DISTANCE_FORT_IS_REACHABLE
if moving:
fort_event_data = {
'fort_name': u"{}".format(fort_name),
'distance': format_dist(dist, unit),
}
if self.is_attracted() > 0:
fort_event_data.update(lure_distance=format_dist(self.lure_distance, unit))
self.emit_event(
'moving_to_lured_fort',
formatted="Moving towards pokestop {fort_name} - {distance} (attraction of lure {lure_distance})",
data=fort_event_data
)
else:
self.emit_event(
'moving_to_fort',
formatted="Moving towards pokestop {fort_name} - {distance}",
data=fort_event_data
)
step_walker = walker_factory(self.walker,
self.bot,
lat,
lng
)
if not step_walker.step():
return WorkerResult.RUNNING
self.emit_event(
'arrived_at_fort',
formatted='Arrived at fort.'
)
return WorkerResult.SUCCESS
def _get_nearest_fort_on_lure_way(self, forts):
if not self.lure_attraction:
return None, 0
lures = filter(lambda x: True if x.get('lure_info', None) != None else False, forts)
if (len(lures)):
dist_lure_me = distance(self.bot.position[0], self.bot.position[1],
lures[0]['latitude'],lures[0]['longitude'])
else:
dist_lure_me = 0
if dist_lure_me > 0 and dist_lure_me < self.lure_max_distance:
self.lure_distance = dist_lure_me
for fort in forts:
dist_lure_fort = distance(
fort['latitude'],
fort['longitude'],
lures[0]['latitude'],
lures[0]['longitude'])
dist_fort_me = distance(
fort['latitude'],
fort['longitude'],
self.bot.position[0],
self.bot.position[1])
if dist_lure_fort < dist_lure_me and dist_lure_me > dist_fort_me:
return fort, dist_lure_me
if dist_fort_me > dist_lure_me:
break
return lures[0], dist_lure_me
else:
return None, 0
def get_nearest_fort(self):
forts = self.bot.get_forts(order_by_distance=True)
# Remove stops that are still on timeout
forts = filter(lambda x: x["id"] not in self.bot.fort_timeouts, forts)
next_attracted_pts, lure_distance = self._get_nearest_fort_on_lure_way(forts)
# Remove all forts which were spun in the last ticks to avoid circles if set
if self.bot.config.forts_avoid_circles:
forts = filter(lambda x: x["id"] not in self.bot.recent_forts, forts)
self.lure_distance = lure_distance
if (lure_distance > 0):
return next_attracted_pts
if len(forts) > 0:
return forts[0]
else:
return None
|
f3b/blender_f3b_exporter
|
src/f3b_export.py
|
Python
|
gpl-3.0
| 48,666
| 0.01426
|
# This file is part of blender_io_f3b. blender_io_f3b is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright David Bernard, Riccardo Balbo
# <pep8 compliant>
import mathutils
import bpy_extras
import math
import f3b
import f3b.datas_pb2
import f3b.custom_params_pb2
import f3b.animations_kf_pb2
import f3b.physics_pb2
from . import helpers
from .utils import *
from .exporter_utils import *
import re,os
import subprocess
from concurrent.futures import ThreadPoolExecutor
DDS_SUPPORT=False
DDS_WRITER_PATH=os.path.dirname(__file__)+"/bin/DDSWriter."
if os.name=="nt":
DDS_WRITER_PATH+="win64.exe"
DDS_SUPPORT=True
else:
DDS_WRITER_PATH+="linux64"
DDS_SUPPORT=True
class ExportCfg:
def __init__(self, is_preview=False, assets_path="/tmp",option_export_selection=False,textures_to_dds=False,export_tangents=False,remove_doubles=False):
self.is_preview = is_preview
self.assets_path = bpy.path.abspath(assets_path)
self._modified = {}
self._ids = {}
self.option_export_selection=option_export_selection
self.textures_to_dds=textures_to_dds
self.export_tangents=export_tangents
self.remove_doubles=remove_doubles
def _k_of(self, v):
# hash(v) or id(v) ?
return str(hash(v))
def id_of(self, v):
k = self._k_of(v)
if k in self._ids:
out = self._ids[k]
else:
# out = str(uuid.uuid4().clock_seq)
out = str(hash(v))
self._ids[k] = out
return out
def need_update(self, v, modified=False):
k = self._k_of(v)
old = (k not in self._modified) or self._modified[k]
self._modified[k] = modified
return old
def info(self, txt):
print("INFO: " + txt)
def warning(self, txt):
print("WARNING: " + txt)
def error(self, txt):
print("ERROR: " + txt)
def export_all_forcefields(scene,data,cfg):
if scene.use_gravity:
force_field=data.cr_forcefields.add()
force_field.id="sceneGravity"
cnv_vec3(cnv_toVec3ZupToYup(scene.gravity), force_field.gravity.strength)
def export_all_collisionplane(scene,data,cfg):
for obj in scene.objects:
if obj.type!= 'MESH' or obj.hide_render or (cfg.option_export_selection and not obj.select):
#print("Skip ",obj,"not selected/render disabled")
continue
for m in obj.modifiers:
if m.type == "COLLISION":
cfg.need_update(obj)
cfg.need_update(obj.data)
cplane=data.cr_collisionplanes.add()
cplane.id = cfg.id_of(obj)
cplane.name = obj.name
wm = obj.matrix_world
cnv_vec3(cnv_toVec3ZupToYup(wm.to_translation()), cplane.point)
rot=wm.to_quaternion()
normal=None
ps = obj.data.polygons
for p in ps:
normal=(p.normal)
break
cnv_vec3(cnv_toVec3ZupToYup(rot*normal), cplane.normal)
cnv_vec3(cnv_toVec3ZupToYup(obj.dimensions), cplane.extents)
cplane.damping=m.settings.damping_factor
cplane.damping_randomness=m.settings.damping_random
cplane.friction=m.settings.friction_factor
cplane.friction_randomness=m.settings.friction_random
cplane.stickiness=m.settings.stickiness
cplane.permeability=m.settings.permeability
cplane.kill_particles=m.settings.use_particle_kill
def export(scene, data, cfg):
if hasattr(f3b.datas_pb2.Data,"cr_collisionplanes") :exp
|
ort_all_collisionplane(scene, data, cfg)
export_all_tobjects(scene, data, cfg)
if hasattr
|
(f3b.datas_pb2.Data,"cr_emitters") : export_all_emitters(scene,data,cfg)
export_all_speakers(scene, data, cfg)
export_all_geometries(scene, data, cfg)
export_all_materials(scene, data, cfg)
export_all_lights(scene, data, cfg)
export_all_skeletons(scene, data, cfg)
export_all_actions(scene, data, cfg)
export_all_physics(scene, data, cfg)
if hasattr(f3b.datas_pb2.Data,"cr_forcefields"): export_all_forcefields(scene,data,cfg)
def export_all_tobjects(scene, data, cfg):
for obj in scene.objects:
if obj.hide_render or (cfg.option_export_selection and not obj.select):
# print("Skip ",obj,"not selected/render disabled")
continue
if cfg.need_update(obj):
tobject = data.tobjects.add()
tobject.id = cfg.id_of(obj)
tobject.name = obj.name
loc, quat, scale = obj.matrix_local.decompose()
cnv_scale(scale, tobject.scale)
cnv_translation(loc, tobject.translation)
if obj.type == 'MESH':
cnv_rotation(quat, tobject.rotation)
elif obj.type == 'Armature':
cnv_rotation(quat, tobject.rotation)
elif obj.type == 'LAMP':
rot = helpers.z_backward_to_forward(quat)
cnv_quatZupToYup(rot, tobject.rotation)
else:
cnv_rotation(helpers.rot_quat(obj), tobject.rotation)
if obj.parent is not None:
add_relation_raw(data.relations,
cfg.id_of(obj.parent), cfg.id_of(obj), cfg)
export_obj_customproperties(obj, tobject, data, cfg)
else:
print("Skip ",obj,"already exported")
def export_all_physics(scene, data, cfg):
for obj in scene.objects:
if obj.hide_render or (cfg.option_export_selection and not obj.select):
continue
phy_data = None
phy_data = export_rb(obj, phy_data, data, cfg)
export_rbct(obj, phy_data, data, cfg)
def export_rbct(ob, phy_data, data, cfg):
btct = ob.rigid_body_constraint
if not btct or not cfg.need_update(btct):
return
if phy_data == None:
phy_data = data.physics.add()
ct_type = btct.type
constraint = phy_data.constraint
constraint.id = cfg.id_of(btct)
o1 = btct.object1
o2 = btct.object2
o1_wp = o1.matrix_world.to_translation()
o2_wp = o2.matrix_world.to_translation()
constraint.a_ref = cfg.id_of(o1.rigid_body)
constraint.b_ref = cfg.id_of(o2.rigid_body)
if ct_type == "GENERIC":
generic = constraint.generic
cnv_vec3((0, 0, 0), generic.pivotA)
cnv_vec3(cnv_toVec3ZupToYup(o1_wp-o2_wp), generic.pivotB)
generic.disable_collisions = btct.disable_collisions
if btct.use_limit_lin_x:
limit_lin_x_upper = btct.limit_lin_x_upper
limit_lin_x_lower = btct.limit_lin_x_lower
else:
limit_lin_x_upper = float('inf')
limit_lin_x_lower = float('-inf')
if btct.use_limit_lin_y:
limit_lin_y_upper = btct.limit_lin_y_upper
limit_lin_y_lower = btct.limit_lin_y_lower
else:
limit_lin_y_upper = float('inf')
limit_lin_y_lower = float('-inf')
if btct.use_limit_lin_z:
limit_lin_z_upper = btct.limit_lin_z_upper
limit_lin_z_lower = btct.limit_lin_z_lower
else:
limit_lin_z_upper = float('inf')
limit_lin_z_lower = float('-inf')
if btct.use_limit_ang_x:
limit_ang_x_upper = btct.limit_ang_x_upper
limit_ang_x_lower = btct.limit_ang_x_lower
else:
limit_ang_x_upper = float('inf')
limit_ang_x_lower = float('-inf')
if
|
endlessm/chromium-browser
|
third_party/llvm/lldb/test/API/functionalities/breakpoint/breakpoint_command/bktptcmd.py
|
Python
|
bsd-3-clause
| 812
| 0.006158
|
from __future__ import print_function
import side_effect
def useless_function(first, second):
print("I have the wrong number of arguments.")
def function(frame, bp_loc, dict):
side_effect.bktptcmd = "function was here"
def another_function(frame, bp_loc, extra_args, d
|
ict):
se_value = extra_args.GetValueForKey("side_effect")
se_string = se_value.GetStringValue(100)
side_effect.fancy = se_string
def a
|
_third_function(frame, bp_loc, extra_args, dict):
se_value = extra_args.GetValueForKey("side_effect")
se_string = se_value.GetStringValue(100)
side_effect.fancier = se_string
def empty_extra_args(frame, bp_loc, extra_args, dict):
if extra_args.IsValid():
side_effect.not_so_fancy = "Extra args should not be valid"
side_effect.not_so_fancy = "Not so fancy"
|
sramana/pysis
|
apps/passwords/tests/test_reset_password.py
|
Python
|
unlicense
| 1,236
| 0.004045
|
from djangosanetesting.cases import HttpTestCase
from django.conf import settings
from django.core.urlresolvers import reverse
from
|
django.core import mail
from accounts.tests import testdata
class TestResetPassword(HttpTestCase):
def __init__(self, *args, **kwargs):
super(self.__class__, self).__init__(*args, **kwargs)
self.host = 'localhost'
self.port = 8000
def setUp(self):
testdata.run()
def test_reset_password(self):
res = self.client.post(reverse('password_reset'),
{'register_number' : settings.TEST_USERNAME,
|
},
follow=True)
assert reverse('password_reset_done') in res.request['PATH_INFO']
assert len(mail.outbox) == 1
reset_url = [word for word in mail.outbox[0].body.split() if word.startswith('http')][0]
res = self.client.get(reset_url, follow=True)
assert res.status_code == 200
assert 'unsuccessful' not in res.content.lower()
assert 'change my password' in res.content.lower()
# I've to stop here, because next step is to change password at Google Apps.
# Can't mess up production database.
|
Arelle/Arelle
|
arelle/TableStructure.py
|
Python
|
apache-2.0
| 28,566
| 0.005776
|
'''
Created on Feb 02, 2014
@author: Mark V Systems Limited
(c) Copyright 2014 Mark V Systems Limited, All rights reserved.
'''
try:
import regex as re
except ImportError:
import re
from collections import defaultdict
import os, io, json
from datetime import datetime, timedelta
from arelle import XbrlConst
from arelle.ModelDtsObject import ModelConcept
from arelle.XmlValidate import VALID
# regular expression components
STMT = r".* - statement - "
notDET = r"(?!.*details)"
notCMPRH = r"(?!.*comprehensive)"
isCMPRH = r"(?=.*comprehensive)"
''' common mis-spellings of parenthetical to match successfully (from 2013 SEC filings)
paranthetical
parenthical
parentheical
parenthtical
parenthethical
parenthentical
prenthetical
parenethetical
use a regular expression that is forgiving on at least the above
and doens't match variations of parent, transparent, etc.
'''
rePARENTHETICAL = r"pa?r[ae]ne?th\w?[aei]+\w?t?h?i?c"
notPAR = "(?!.*" + rePARENTHETICAL + ")"
isPAR = "(?=.*" + rePARENTHETICAL + ")"
UGT_TOPICS = None
def RE(*args):
return re.compile(''.join(args), re.IGNORECASE)
# NOTE: This is an early experimental implementation of statement detection
# it is not in a finished status at this time.
EFMtableCodes = [
# ELRs are parsed for these patterns in sort order until there is one match per code
# sheet(s) may be plural
# statement detection including root element of presentation link role
("BS", RE(STMT, notDET, notPAR), ("StatementOfFinancialPositionAbstract",)),
("BSP", RE(STMT, notDET, isPAR), ("StatementOfFinancialPositionAbstract",)),
("IS", RE(STMT, notDET, notPAR), ("IncomeStatementAbstract",)),
("ISP", RE(STMT, notDET, isPAR), ("IncomeStatementAbstract",)),
("CI", RE(STMT, notDET, notPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)),
("CIP", RE(STMT, notDET, isPAR), ("StatementOfIncomeAndComprehensiveIncomeAbstract",)),
("EQ", RE(STMT, notDET, notPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")),
("EQP", RE(STMT, notDET, isPAR), ("StatementOfStockholdersEquityAbstract","StatementOfPartnersCapitalAbstract")),
("CF", RE(STMT, notDET, notPAR), ("StatementOfCashFlowsAbstract",)),
("CFP", RE(STMT, notDET, isPAR), ("StatementOfCashFlowsAbstract",)),
("CA", RE(STMT, notDET, notPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)),
("CAP", RE(STMT, notDET, isPAR), ("CapitalizationLongtermDebtAndEquityAbstract",)),
("IN", RE(STMT, notDET, notPAR), ("ScheduleOfInvestmentsAbstract",)),
("INP", RE(STMT, notDET, isPAR), ("ScheduleOfInvestmentsAbstract",)),
# statement detection without considering root elements
("DEI", RE(r".* - (document|statement) - .*document\W+.*entity\W+.*information"), None),
("BS", RE(STMT, notDET, notPAR, r".*balance\W+sheet"), None),
("BSP", RE(STMT, notDET, isPAR, r".*balance\W+sheet"), None),
("CF", RE(STMT, notDET, notPAR, r".*cash\W*flow"), None),
("IS", RE(STMT, notDET, notPAR, notCMPRH, r".*(income|loss)"), None),
("ISP", RE(STMT, notDET, isPAR, notCMPRH, r".*(income|loss)"), None),
("CI", RE(STMT, notDET, notPAR, isCMPRH, r".*(income|loss|earnings)"), None),
("CIP", RE(STMT, notDET, isPAR, isCMPRH, r".*(income|loss|earnings)"), None),
("CA", RE(STMT, notDET, notPAR, r".*capitali[sz]ation"), None),
("CAP", RE(STMT, notDET, isPAR, r".*capitali[sz]ation"), None),
("EQ", RE(STMT, notDET, notPAR, r".*(equity|capital)"), None),
("EQP", RE(STMT, notDET, isPAR, r".*(equity|capital)"), None),
("IS", RE(STMT, notDET, notPAR, r".*(income|operations|earning)"), None),
("EQ", RE(STMT, notDET, notPAR, r".*def[ei][cs]it"), None),
("ISP", RE(STMT, notDET, isPAR, r".*(income|operations|earning)"), None),
("CFP", RE(STMT, notDET, isPAR, r".*cash\W*flow.*"), None),
("IS", RE(STMT, notDET, notPAR, r".*loss"), None),
("ISP", RE(STMT, notDET, isPAR, r".*loss"), None),
("BS", RE(STMT, notDET, notPAR, r".*(position|condition)"), None),
("BSP", RE(STMT, notDET, isPAR, r".*(position|condition)"), None),
("SE", RE(STMT, notDET, notPAR, r"(?=.*equity).*comprehensive"), None),
("EQ", RE(STMT, notDET, notPAR, r".*shareholder[']?s[']?\W+investment"), None),
("EQP", RE(STMT, notDET, isPAR, r".*shareholder[']?s[']?\W+investment"), None),
("EQ", RE(STMT, notDET, notPAR, r".*retained\W+earning"), None),
("IN", RE(STMT, notDET, notPAR, r".*investment"), None),
("INP", RE(STMT, notDET, isPAR, r".*investment"), None),
("LA", RE(STMT, notDET, notPAR, r"(?!.*changes)(?=.*assets).*liquidati"), None),
("LC", RE(STMT, notDET, notPAR, r"(?=.*changes)(?=.*assets).*liquidati"), None),
("IS", RE(STMT, notDET, notPAR, r"(?=.*disc).*operation"), None),
("BS", RE(STMT, notDET, notPAR, r"(?!.*changes).*assets"), None),
("BSP", RE(STMT, notDET, isPAR, r"(?!.*changes).*assets"), None),
("EQ", RE(STMT, notDET, notPAR, r"(?=.*changes).*assets"), None),
("EQP", RE(STMT, notDET, isPAR, r"(?=.*changes).*assets"), None),
("FH", RE(STMT, notDET, notPAR, r"(?=.*financial).*highlight"), None),
("FHP", RE(STMT, notDET, isPAR, r"(?=.*financial).*highlight"), None),
("EQ", RE(STMT, notDET, notPAR, r"(?=.*reserve).*trust"), None),
("EQP", RE(STMT, notDET, isPAR, r"(?=.*reserve).*trust"), None),
("LC", RE(STMT, notDET, notPAR, r"(?=.*activities).*liquidati"), None),
("EQP", RE(STMT, notDET, isPAR, r".*def[ei][cs]it"), None),
("BSV", RE(STMT, notDET,notPAR, r".*net\W+asset\W+value"), None),
("CFS", RE(STMT, notDET,notPAR, r".*cash\W*flows\W+supplemental"), N
|
one),
("LAP", RE(STMT, notDET, isPAR, r".*(?!.*changes)(?=.*assets).*liquidati"), None)
]
HMRCtableCodes = [
# ELRs are parsed for these patterns in sort order until there is one match per code
# sheet(s) may be plural
("DEI", RE(r".*entity\W+.*information.*"), None),
("BS", RE(r".*balance\W+sheet.*"), None),
("IS", RE(r".*loss"), None),
("CF", RE(r".*cash\W*flow.*"), None),
("
|
SE", RE(r".*(shareholder|equity).*"), None),
]
def evaluateRoleTypesTableCodes(modelXbrl):
disclosureSystem = modelXbrl.modelManager.disclosureSystem
if disclosureSystem.validationType in ("EFM", "HMRC"):
detectMultipleOfCode = False
if disclosureSystem.validationType == "EFM":
tableCodes = list( EFMtableCodes ) # separate copy of list so entries can be deleted
# for Registration and resubmission allow detecting multiple of code
detectMultipleOfCode = any(v and any(v.startswith(dt) for dt in ('S-', 'F-', '8-K', '6-K'))
for docTypeConcept in modelXbrl.nameConcepts.get('DocumentType', ())
for docTypeFact in modelXbrl.factsByQname.get(docTypeConcept.qname, ())
for v in (docTypeFact.value,))
elif disclosureSystem.validationType == "HMRC":
tableCodes = list( HMRCtableCodes ) # separate copy of list so entries can be deleted
codeRoleURI = {} # lookup by code for roleURI
roleURICode = {} # lookup by roleURI
# resolve structural model
roleTypes = [roleType
for roleURI in modelXbrl.relationshipSet(XbrlConst.parentChild).linkRoleUris
for roleType in modelXbrl.roleTypes.get(roleURI,())]
roleTypes.sort(key=lambda roleType: roleType.definition)
# assign code to table link roles (Presentation ELRs)
for roleType in roleTypes:
definition = roleType.definition
rootConcepts = None
for i, tableCode in enumerate(tableCodes):
code, pattern, rootConceptNames = tableCode
if (detectMultipleOfCode or code not in codeRoleURI) and pattern.match(definition):
if rootConceptNames and rootConcepts is None:
rootConcepts = modelXbrl.relationshipSet(XbrlConst.parentChild, roleType.roleURI).rootConcepts
if (not rootConceptNames or
|
maartenbreddels/vaex
|
packages/vaex-ui/vaex/ui/main.py
|
Python
|
mit
| 89,877
| 0.002348
|
from __future__ import print_function
__author__ = 'breddels'
# import astropy.vo.samp as sampy
import platform
import vaex.utils
import sys
import threading
import vaex.export
import vaex.utils
import vaex.promise
import vaex.settings
import vaex.remote
import psutil
from vaex.parallelize import parallelize
from vaex.ui.plot_windows import PlotDialog
import vaex.ui.columns
import vaex.ui.variables
import vaex.ui.qt as dialogs
import astropy.units
# py2/p3 compatibility
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import vaex as vx
# from PySide import QtGui, QtCore
from vaex.ui.qt import *
from vaex.ui.table import *
from vaex.samp import Samp
# help py2app, it was missing this import
try: # in Pyinstaller this doesn't work, and we can get away with not setting this, total mystery
import sip
sip.setapi('QVariant', 2)
sip.setapi('QString', 2)
except:
pass
darwin = "darwin" in platform.system().lower()
frozen = getattr(sys, 'frozen', False)
# print "DEFAULT ENCODING is: %s"%(sys.getdefaultencoding())
# print "FILE SYSTEM ENCODING is: %s"%(sys.getfilesystemencoding())
# if darwin:
if sys.getfilesystemencoding() == None: # TODO: why does this happen in pyinstaller?
def getfilesystemencoding_wrapper():
return "UTF-8"
sys.getfilesystemencoding = getfilesystemencoding_wrapper
# on osx 10.8 we sometimes get pipe errors while printing, ignore these
# signal.signal(signal.SIGPIPE, signal.SIG_DFL)
try:
import pdb
import astropy.io.fits
# pdb.set_trace()
except Exception as e:
print(e)
pdb.set_trace()
import vaex.ui.plot_windows as vp
from vaex.ui.ranking import *
import vaex.ui.undo
import vaex.kld
import vaex.utils
import vaex.dataset
# import subspacefind
# import ctypes
import imp
import logging
logger = logging.getLogger("vaex")
# import locale
# locale.setlocale(locale.LC_ALL, )
# samp stuff
# import astropy.io.votable
custom = None
custompath = path = os.path.expanduser('~/.vaex/custom.py')
# print path
if os.path.exists(path):
customModule = imp.load_source('vaex.custom', path)
# custom = customModule.Custom()
else:
custom = None
logger.debug("%s does not exist" % path)
# print "root path is", vaex.utils.get_root_path()
if getattr(sys, 'frozen', False):
application_path = os.path.dirname(sys.executable)
elif __file__:
application_path = os.path.dirname(__file__)
if not frozen: # astropy not working :s
pass
# import pdb
# pdb.set_trace()
# fix from Chris Beaumont
# import astropy.logger
# astropy.logger.log.disable_warnings_logging()
__import__("astropy.io.votable")
# for osx
if "darwin" in platform.system().lower():
application_path = os.path.abspath(".")
def error(title, msg):
print("Error", title, msg)
from vaex.dataset import *
possibleFractions = [10**base * f for base in [-3, -2, -1, 0] for f in [0.25, 0.5, 0.75, 1.]]
possibleFractions.insert(0, 10**-4)
# print possibleFractions
class DatasetSelector(QtGui.QListWidget):
def __init__(self, parent):
super(DatasetSelector, self).__init__(parent)
# self.icon = QtGui.QIcon('icons/png/24x24/devices/memory.png')
# self.icon_server = QtGui.QIcon('icons/png/24x24/devices/memory.png')
self.icon = QtGui.QIcon(vp.iconfile('drive'))
self.icon_server = QtGui.QIcon(vp.iconfile('server-network'))
self.icon_memory = QtGui.QIcon(vp.iconfile('memory'))
self.datasets = []
self.signal_pick = vaex.events.Signal("pick")
self.signal_add_dataset = vaex.events.Signal("add dataset")
self.signal_add_dataset.connect(self.on_add_dataset)
self.signal_dataset_select = vaex.events.Signal("dataset-select")
self.currentItemChanged.connect(self.onDatasetSelected)
# self.items
def onDatasetSelected(self, data_item, previous):
if data_item is not None:
data = data_item.data(QtCore.Qt.UserRole)
if hasattr(data, "toPyObject"):
dataset = data.toPyObject()
self.signal_dataset_select.emit(dataset)
else:
self.signal_dataset_select.emit(data)
def on_add_dataset(self, dataset):
# print "added dataset", dataset
self.datasets.append(dataset)
dataset.signal_pick.connect(self.on_pick)
def on_pick(self, dataset, row):
# broadcast
logger.debug("broadcast pick")
self.signal_pick.emit(dataset, row)
def setBestFraction(self, dataset):
return
Nmax = 1000 * 1000 * 10
for fraction in possibleFractions[::-1]:
N = len(dataset)
if N > Nmax:
dataset.set_active_fraction(fraction)
logger.debug("set best fraction for dataset %r to %r" % (dataset, fraction))
else:
break
def is_empty(self):
return len(self.datasets) == 0
def open(self, path, **kwargs):
ds = vaex.open(path, **kwargs)
return self.add(ds)
def add(self, dataset):
self.setBestFraction(dataset)
item = QtGui.QListWidgetItem(self)
item.setText(dataset.name)
icon = self.icon
if hasattr(dataset, "filename"):
item.setToolTip("file: " + dataset.filename)
if isinstance(dataset, vaex.remote.DataFrameRemote):
icon = self.icon_server
item.setToolTip("source: " + dataset.path)
if isinstance(dataset, vaex.dataset.DatasetArrays):
icon = self.icon_memory
item.setIcon(icon)
# TODO: this hangs on pyside 1.2.1, linux
item.setData(QtCore.Qt.UserRole, dataset)
self.setCurrentItem(item)
self.signal_add_dataset.emit(dataset)
return dataset
class Worker(QtCore.QThread):
def __init__(self, parent, name, func, *args, **kwargs):
QtCore.QThread.__init__(self, parent=None)
self.func = func
self.args = args
self.kwargs = kwargs
self.name = name
self.signal = QtCore.SIGNAL("signal")
def run(self):
time.sleep(0.1)
print("in thread", self.currentThreadId())
self.result = self.func(*self.args, **self.kwargs)
print("result:", self.result)
# self.emit(self.signal, self.result)
# self.exec_()
def MyStats(object):
def __init__(self, data):
self.data = data
def __call___(self, args):
print(args)
# stat_name, column_name = args
# p
|
rint "do", stat_name, "on", column_name
return 1
# f = stats[stat_name]
# return column_name, stat_name, f(self.data.columns[column_name])
# stats = {"minimum": lambda x: str(np.nanmin(x)), "maximum": lambda x: str(np.nanmax(x)), "mean": lambda x: str(np.mean(x)), "std": lambda x: str(np.std(x)), "median": lambda x: str(np.median(x))}
stats = {"minimum": lambda x: str(np.nanmin(x)), "maximum": lam
|
bda x: str(np.nanmax(x)), "mean": lambda x: str(np.mean(x)), "std": lambda x: str(np.std(x))}
def statsrun(args):
columns, stat_name, column_name = args
f = stats[stat_name]
# print args
return 1
class StatWorker(QtCore.QThread):
def __init__(self, parent, data):
QtCore.QThread.__init__(self, parent=parent)
self.data = data
def run(self):
time.sleep(0.1)
print("in thread", self.currentThreadId())
jobs = [(stat_name, column_name) for stat_name in list(stats.keys()) for column_name in list(self.data.columns.keys())]
@parallelize(cores=QtCore.QThread.idealThreadCount())
def dostats(args, data=self.data):
stat_name, column_name = args
columns = data.columns
f = stats[stat_name]
result = f(columns[column_name][slice(*data.current_slice)])
print(result)
return result
values = dostats(jobs)
self.results = {}
for job, value in zip(jobs, values):
stat_name, column_name = job
if stat_name not in self.results:
self.results[stat_name] = {}
self.res
|
Neoklosch/Motey
|
motey/models/service.py
|
Python
|
apache-2.0
| 2,107
| 0.000949
|
import uuid
from motey.models.image import Image
from motey.models.service_state import ServiceState
class Service(object):
"""
Model object. Represent a service.
A service can have multiple states, action types and service types.
"""
def __init__(self, service_name, images, id=uuid.uuid4().hex, state=ServiceState.INITIAL, state_message=''):
"""
Constructor of the service model.
:param service_name: the name of the service
:type service_name: str
:param images: list of images which are asociated with the service
:type images: list
:param id: autogenerated id of the service
:type id: uuid
:param state: current state of the service. Default `INITIAL`.
:type state: motey.models.service_state.ServiceState
:param state_message: message for the current service state
:type state_message: str
"""
self.id = id
self.service_name = service_name
self.images = images
self.state = state
self.state_message = state_message
def __iter__(self):
yield 'id', self.id
yield 'service_name', self.service_name
yield 'images', [dict(image) for image in self.images]
yield 'state', self.state
yield 'state_message', self.state_message
@staticmethod
def transform(data):
"""
Static method to translate the service dict data into a service model.
:param data: service dict to be transformed
:type data: dict
:retur
|
n: the translated service model, None if something went wrong
"""
if 'service_name' not in data or 'images' not in data:
return None
return Service(
id=data['id'] if 'id' in data else uuid.uuid4().hex,
service_name=data['service_name'],
images=[Image.transf
|
orm(image) for image in data['images']],
state=data['state'] if 'state' in data else ServiceState.INITIAL,
state_message=data['state_message'] if 'state_message' in data else ''
)
|
eduardosan/csindex
|
csindex/tests/test_cassandra.py
|
Python
|
gpl-2.0
| 2,692
| 0.000373
|
#!/usr/env python
# -*- coding: utf-8 -*-
__author__ = 'eduardo'
import unittest
import uuid
from . import config
from ..model import cs
from cassandra.cluster import Session
class TestCassandra(unittest.TestCase):
"""
Test interação com o banco de dados Cassandra
"""
def setUp(self):
"""
Load test data
"""
self.session = config.session
self.es_index = config.ES_INDEX
pass
def test_communication(self):
"""
Testa comunicação com o cassandra
"""
self.assertIsInstance(self.session, Session)
def test_create_table(self):
"""
Testa criação da tabela
"""
cs.CS.create_table()
# Verifica se tabela existe
cql = "SELECT * from {}".format(self.es_index)
result = self.session.execute(cql)
self.assertListEqual(result, [])
# Apaga tabela
cs.CS.drop_table()
def test_add_cassandra(self):
"""
Testa adição de registro no Cassandra
"""
cs.CS.create_table()
# Rgistro 1
id1 = uuid.uuid4()
doc1 = cs.CS(
content={'teste': 123},
document_id=id1
)
doc1.add()
result = doc1.get()
self.assertIsNotNone(result)
# Registro 2
id2 = uuid.uuid4()
doc2 = cs.CS(
content={'teste': 1234},
document_id=id2
)
doc2.add()
result = doc2.get()
self.assertIsNotNone(result)
# Apaga tabela
cs.CS.drop_table()
def test_get_all(self):
"""
Retorna todos os documentos inseridos
"""
cs.CS.create_table()
# Rgistro 1
id1 = uuid.uuid4()
doc1 = cs.CS(
content={'teste': 123},
document_id=id1
)
doc1.add()
result = doc1.get()
self.assertIsNotNone(result)
# Registro 2
|
id2 = uuid.uuid4()
doc2 = cs.CS(
content={'teste': 1234},
document_id=id2
|
)
doc2.add()
result = doc2.get()
self.assertIsNotNone(result)
# Testa documento que não existe
id3 = uuid.uuid4()
doc3 = cs.CS(
content={'teste': 12345},
document_id=id3
)
result = doc3.get()
self.assertIsNone(result)
# lista registros
result = cs.CS.get_all()
self.assertIs(type(result), list)
self.assertGreater(len(result), 0)
# Apaga tabela
cs.CS.drop_table()
def tearDown(self):
"""
Remove test data
"""
pass
|
leighpauls/k2cro4
|
third_party/WebKit/Tools/Scripts/webkitpy/common/net/credentials_unittest.py
|
Python
|
bsd-3-clause
| 9,141
| 0.002297
|
# Copyright (C) 2009 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import tempfile
import unittest
from webkitpy.common.net.credentials import Credentials
from webkitpy.common.system.executive import Executive
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.user_mock import MockUser
from webkitpy.thirdparty.mock import Mock
from webkitpy.tool.mocktool import MockOptions
from webkitpy.common.system.executive_mock import MockExecutive
# FIXME: Other unit tests probably want this class.
class _TemporaryDirectory(object):
def __init__(self, **kwargs):
self._kwargs = kwargs
self._directory_path = None
def __enter__(self):
self._directory_path = tempfile.mkdtemp(**self._kwargs)
return self._directory_path
def __exit__(self, type, value, traceback):
os.rmdir(self._directory_path)
# Note: All tests should use this class instead of Credentials directly to avoid using a real Executive.
class MockedCredentials(Credentials):
def __init__(self, *args, **kwargs):
if 'executive' not in kwargs:
kwargs['executive'] = MockExecutive()
Credentials.__init__(self, *args, **kwargs)
class CredentialsTest(unittest.TestCase):
example_security_output = """keychain: "/Users/test/Library/Keychains/login.keychain"
class: "inet"
attributes:
0x00000007 <blob>="bugs.webkit.org (test@webkit.org)"
0x00000008 <blob>=<NULL>
"acct"<blob>="test@webkit.org"
"atyp"<blob>="form"
"cdat"<timedate>=0x32303039303832353233353231365A00 "20090825235216Z\000"
"crtr"<uint32>=<NULL>
"cusi"<sint32>=<NULL>
"desc"<blob>="Web form password"
"icmt"<blob>="default"
"invi"<sint32>=<NULL>
"mdat"<timedate>=0x32303039303930393137323635315A00 "20090909172651Z\000"
"nega"<sint32>=<NULL>
"path"<blob>=<NULL>
"port"<uint32>=0x00000000
"prot"<blob>=<NULL>
"ptcl"<uint32>="htps"
"scrp"<sint32>=<NULL>
"sdmn"<blob>=<NULL>
"srvr"<blob>="bugs.webkit.org"
"type"<uint32>=<NULL>
password: "SECRETSAUCE"
"""
def test_keychain_lookup_on_non_mac(self):
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return False
credentials = FakeCredentials("bugs.webkit.org")
self.assertEqual(credentials._is_mac
|
_os_x(), False)
self.assertEqual(credentials._credentials_from_keychain("foo"), ["foo", None])
def test_security_output_parse(self):
credentials = MockedCredentials("bugs.webkit.org")
self.assertEqual(credentials._parse_security_tool_output(self.example_security_output), ["test@we
|
bkit.org", "SECRETSAUCE"])
def test_security_output_parse_entry_not_found(self):
# FIXME: This test won't work if the user has a credential for foo.example.com!
credentials = Credentials("foo.example.com")
if not credentials._is_mac_os_x():
return # This test does not run on a non-Mac.
# Note, we ignore the captured output because it is already covered
# by the test case CredentialsTest._assert_security_call (below).
outputCapture = OutputCapture()
outputCapture.capture_output()
self.assertEqual(credentials._run_security_tool(), None)
outputCapture.restore_output()
def _assert_security_call(self, username=None):
executive_mock = Mock()
credentials = MockedCredentials("example.com", executive=executive_mock)
expected_stderr = "Reading Keychain for example.com account and password. Click \"Allow\" to continue...\n"
OutputCapture().assert_outputs(self, credentials._run_security_tool, [username], expected_stderr=expected_stderr)
security_args = ["/usr/bin/security", "find-internet-password", "-g", "-s", "example.com"]
if username:
security_args += ["-a", username]
executive_mock.run_command.assert_called_with(security_args)
def test_security_calls(self):
self._assert_security_call()
self._assert_security_call(username="foo")
def test_credentials_from_environment(self):
credentials = MockedCredentials("example.com")
saved_environ = os.environ.copy()
os.environ['WEBKIT_BUGZILLA_USERNAME'] = "foo"
os.environ['WEBKIT_BUGZILLA_PASSWORD'] = "bar"
username, password = credentials._credentials_from_environment()
self.assertEqual(username, "foo")
self.assertEqual(password, "bar")
os.environ = saved_environ
def test_read_credentials_without_git_repo(self):
# FIXME: This should share more code with test_keyring_without_git_repo
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", "SECRETSAUCE")
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("bugs.webkit.org", cwd=temp_dir_path)
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "SECRETSAUCE"))
def test_keyring_without_git_repo(self):
# FIXME: This should share more code with test_read_credentials_without_git_repo
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _is_mac_os_x(self):
return True
def _credentials_from_keychain(self, username):
return ("test@webkit.org", None)
def _credentials_from_environment(self):
return (None, None)
with _TemporaryDirectory(suffix="not_a_git_repo") as temp_dir_path:
credentials = FakeCredentials("fake.hostname", cwd=temp_dir_path, keyring=MockKeyring())
# FIXME: Using read_credentials here seems too broad as higher-priority
# credential source could be affected by the user's environment.
self.assertEqual(credentials.read_credentials(), ("test@webkit.org", "NOMNOMNOM"))
def test_keyring_without_git_repo_nor_keychain(self):
class MockKeyring(object):
def get_password(self, host, username):
return "NOMNOMNOM"
class FakeCredentials(MockedCredentials):
def _credentials_from_keychain(self, username):
|
apiad/sublime-subtitle-sync
|
subtitle_sync.py
|
Python
|
mit
| 1,030
| 0.009709
|
import sublime_plugin
import sublime
SUB_RE = '\d\d:\d\d:\d\d,\d\d\d'
def find_subtitles(view):
subs = []
sel = view.sel()
for match in view.find_all(SUB_RE):
if sel.contains(match):
subs.append(match)
# sel.clear()
# sel.add_all(subs)
return subs
def convert_to_time(sub):
h, m, s = sub.split(':')
return int(h) * 3600 + int(m) * 60 + float(s.replace(',', '.'))
def convert_to_string(time):
h = int(time / 3600)
m = int((time % 3600) / 60)
s = time % 60
return str(h).zfill(2) + ':' + str(m).zfi
|
ll(2) + ':' + ("%.3f" % s).zfill(6).replace('.', ',')
class SubtitleSyncCommand(sublime_plugin.TextCommand):
def run(self, edit, delta):
subs = find_subtitles(self.view)
for sub in subs: # self.view.sel():
time = convert_to_time(self.view.substr(sub))
time += delta
if time < 0:
time = 0
|
time = convert_to_string(time)
self.view.replace(edit, sub, time)
|
nanditav/15712-TensorFlow
|
tensorflow/python/kernel_tests/resource_variable_ops_test.py
|
Python
|
apache-2.0
| 3,885
| 0.008752
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================
|
=====================
"""Tests for tensorflow.ops.resource_variable_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.o
|
ps import array_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class ResourceVariableOpsTest(test_util.TensorFlowTestCase):
def testHandleDtypeShapeMatch(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
with self.assertRaises(ValueError):
resource_variable_ops.create_variable_op(
handle, constant_op.constant(0.0, dtype=dtypes.float32)).run()
with self.assertRaises(ValueError):
resource_variable_ops.create_variable_op(
handle, constant_op.constant([0], dtype=dtypes.int32)).run()
resource_variable_ops.create_variable_op(
handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testDtypeSurvivesIdentity(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
id_handle = array_ops.identity(handle)
resource_variable_ops.create_variable_op(
id_handle, constant_op.constant(0, dtype=dtypes.int32)).run()
def testCreateRead(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.create_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
value = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32).eval()
self.assertAllEqual(1, value)
def testManyAssigns(self):
with self.test_session() as session:
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
create = resource_variable_ops.create_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
with ops.control_dependencies([create]):
first_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
with ops.control_dependencies([first_read]):
write = resource_variable_ops.assign_variable_op(
handle, constant_op.constant(2, dtype=dtypes.int32))
with ops.control_dependencies([write]):
second_read = resource_variable_ops.read_variable_op(
handle, dtype=dtypes.int32)
f, s = session.run([first_read, second_read])
self.assertEqual(f, 1)
self.assertEqual(s, 2)
def testAssignAdd(self):
with self.test_session():
handle = resource_variable_ops.var_handle_op(dtype=dtypes.int32, shape=[])
resource_variable_ops.create_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32)).run()
assign_add = resource_variable_ops.assign_add_variable_op(
handle, constant_op.constant(1, dtype=dtypes.int32))
self.assertEqual(assign_add.eval(), 2)
if __name__ == "__main__":
test.main()
|
quom/google-cloud-python
|
runtimeconfig/unit_tests/test_variable.py
|
Python
|
apache-2.0
| 7,382
| 0.000135
|
# Copyright 2016 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
class TestVariable(unittest.TestCase):
PROJECT = 'PROJECT'
CONFIG_NAME = 'config_name'
VARIABLE_NAME = 'variable_name'
PATH = 'projects/%s/configs/%s/variables/%s' % (
PROJECT, CONFIG_NAME, VARIABLE_NAME)
@staticmethod
def _get_target_class():
from google.cloud.runtimeconfig.variable import Variable
return Variable
def _make_one(self, *args, **kw):
return self._get_target_class()(*args, **kw)
def _verifyResourceProperties(self, variable, resource):
import base64
from google.cloud._helpers import _rfc3339_to_datetime
if 'name' in resource:
self.assertEqual(variable.full_name, resource['name'])
if 'value' in resource:
self.assertEqual(
variable.value, base64.b64decode(resource['value']))
else:
self.assertIsNone(variable.value)
if 'state' in resource:
self.assertEqual(variable.state, resource['state'])
if 'updateTime' in resource:
self.assertEqual(
variable.update_time,
_rfc3339_to_datetime(resource['updateTime']))
else:
self.assertIsNone(variable.update_time)
def test_ctor(self):
from google.cloud.runtimeconfig.config import Config
client = _Client(project=self.PROJECT)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
self.assertEqual(variable.name, self.VARIABLE_NAME)
self.assertEqual(variable.full_name, self.PATH)
self.assertEqual(variable.path, '/%s' % (self.PATH,))
self.assertIs(variable.client, client)
def test_ctor_w_no_name(self):
from google.cloud.runtimeconfig.config import Config
client = _Client(project=self.PROJECT)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=None, config=config)
with self.assertRaises(ValueError):
getattr(variable, 'full_name')
def test_exists_miss_w_bound_client(self):
from google.cloud.runtimeconfig.config import Config
conn = _Connection()
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
self.assertFalse(variable.exists())
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self.assertEqual(req['query_params'], {'fields': 'name'})
def test_exists_hit_w_alternate_client(self):
from google.cloud.runtimeconfig.config import Config
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
CONFIG1 = Config(name=self.CONFIG_NAME, client=CLIENT1)
conn2 = _Connection({})
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
variable = self._make_one(name=self.VARIABLE_NAME, config=CONFIG1)
self.assertTrue(variable.exists(client=CLIENT2))
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self.assertEqual(req['query_params'], {'fields': 'name'})
def test_reload_w_bound_client(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {
'name': self.PATH,
'value': 'bXktdmFyaWFibGUtdmFsdWU=', # base64 my-variable-value
'updateTime': '2016-04-14T21:21:54.5000Z',
'state': 'VARIABLE_STATE_UNSPECIFIED',
}
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
variable.reload()
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourceProperties(variable, RESOURCE)
def test_reload_w_empty_resource(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {}
conn = _Connection(RESOURCE)
client = _Client(project=self.PROJECT, connection=conn)
config = Config(name=self.CONFIG_NAME, client=client)
variable = self._make_one(name=self.VARIABLE_NAME, config=config)
variable.reload()
# Name should not be overwritten.
|
self.assertEqual(self.VARIABLE_NAME, variable.name)
self.assertEqual(len(conn._requested), 1)
req = conn._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourc
|
eProperties(variable, RESOURCE)
def test_reload_w_alternate_client(self):
from google.cloud.runtimeconfig.config import Config
RESOURCE = {
'name': self.PATH,
'value': 'bXktdmFyaWFibGUtdmFsdWU=', # base64 my-variable-value
'updateTime': '2016-04-14T21:21:54.5000Z',
'state': 'VARIABLE_STATE_UNSPECIFIED',
}
conn1 = _Connection()
CLIENT1 = _Client(project=self.PROJECT, connection=conn1)
CONFIG1 = Config(name=self.CONFIG_NAME, client=CLIENT1)
conn2 = _Connection(RESOURCE)
CLIENT2 = _Client(project=self.PROJECT, connection=conn2)
variable = self._make_one(name=self.VARIABLE_NAME, config=CONFIG1)
variable.reload(client=CLIENT2)
self.assertEqual(len(conn1._requested), 0)
self.assertEqual(len(conn2._requested), 1)
req = conn2._requested[0]
self.assertEqual(req['method'], 'GET')
self.assertEqual(req['path'], '/%s' % (self.PATH,))
self._verifyResourceProperties(variable, RESOURCE)
class _Client(object):
_connection = None
def __init__(self, project, connection=None):
self.project = project
self._connection = connection
class _Connection(object):
def __init__(self, *responses):
self._responses = responses
self._requested = []
def api_request(self, **kw):
from google.cloud.exceptions import NotFound
self._requested.append(kw)
try:
response, self._responses = self._responses[0], self._responses[1:]
except:
raise NotFound('miss')
else:
return response
|
MayukhSobo/OpenStreetMap
|
test/test.py
|
Python
|
mit
| 799
| 0
|
#!/usr/bin/env python
def get_db(db_name):
from pymongo import MongoClient
|
client = MongoClient('localhost:27017')
db = client[db_name]
return db
def make_pipeline():
# complete the aggregation pipeline
match = {"$match": {"created": {"$exists": 1}}}
group = {"$group": {"_id": "$created.user", "total": {"$sum": 1}}}
sort = {"$sort": {"total": -1}}
limit = {"$limi
|
t": 5}
pipeline = [match, group, sort, limit]
return pipeline
def aggregate(db, pipeline):
return [doc for doc in db.openStreetDataMap.aggregate(pipeline)]
if __name__ == '__main__':
db = get_db('udacity')
pipeline = make_pipeline()
result = aggregate(db, pipeline)
# print(type(result))
# assert len(result) == 1
import pprint
pprint.pprint(result)
|
imk1/IMKTFBindingCode
|
filterHomerMotifs.py
|
Python
|
mit
| 2,184
| 0.028388
|
import sys
import argparse
def parseArgument():
# Parse the input
parser=argparse.ArgumentParser(description=\
"Filter Homer motifs to remove those with a low % of hits in target sites and a low value of target/background")
parser.add_argument("--HomerMotifsFileName", required=True,\
help='Name of file with Homer motifs')
parser.add_argument("--HomerMotifsFiltFileName", required=True,\
help='Name of file where the filtered Homer motifs will be recorded')
parser.add_argument("--minFoldChange", required=False, type=float, default=1.5,\
help='Minimum target/background ratio a motif needs to be included')
parser.add_argument("--minTargetPercentage", required=False, type=float, default=10.0,\
help='Minimum target precentage a motif needs to be included')
options = parser.parse_args()
return options
def filterHomerMotifs(options):
# Filter Homer motifs to remove those with a low % of hits in target sites and a low value of target/background
HomerMotifsFile = open(options.HomerMotifsFileName)
HomerMotifsFiltFile = open(options.HomerMotifsFiltFileName, 'w+')
recordCurrentMotif = False
for line in HomerMotifsFile:
# Iterate through the lines of the Homer motifs file and record the motifs that pass the cutoffs
if line[0] == ">":
# At a new motif, so check if
|
the motif passes the cutoffs
foregroundBackground = line.strip().split("\t")[5]
foregroundBackgroundElements = foregroundBackground.split(",")
foregroundElements = foregroundBackgroundElements[0].split("(")
backgroundElements = foregroundBackgroundElements[1].split("(")
if (float(foregroundElements
|
[1][0:-2]) < options.minTargetPercentage) or \
(float(foregroundElements[1][0:-2]) < options.minFoldChange * float(backgroundElements[1][0:-2])):
# The current motif does not meet the thresholds, so skip it
recordCurrentMotif = False
else:
# Record the current motif
recordCurrentMotif = True
if recordCurrentMotif:
# Record the current line of the current motif
HomerMotifsFiltFile.write(line)
HomerMotifsFile.close()
HomerMotifsFiltFile.close()
if __name__ == "__main__":
options = parseArgument()
filterHomerMotifs(options)
|
mjmeli/facebook-chat-word-cloud
|
facebook_wordcloud/word_counter.py
|
Python
|
mit
| 2,049
| 0.00244
|
"""
word_counter
Provides helper f
|
unction
|
s for word counts.
"""
import os
import re
from collections import Counter
STOPWORDS_FILE = os.path.join(os.path.dirname(__file__), "STOPWORDS")
# Get the frequencies of each word as a dictionary.
# strings = a list of strings, can be strings of any length/number of words
def get_frequencies(strings):
# Ensure users array is a list
if type(strings) is not list:
strings = [strings]
# Use a Counter to do this easily
counts = Counter()
# Use a regular expression to easily remove punctuation
words = re.compile(r'[\w\'-]+')
# Split each string/sentence into individual words, make them all lowercase,
# then add them to the counter
for sentence in strings:
counts.update(words.findall(sentence.lower()))
return dict(counts)
# Get the frequencies of each word as a tuple.
# strings = a list of strings, can be strings of any length/number of words
def get_frequencies_tuple(strings):
return get_frequencies(strings).items()
# Load stopwords from file, if it exists
def load_stopwords():
if os.path.isfile(STOPWORDS_FILE):
return set([x.strip() for x in open(STOPWORDS_FILE).read().split('\n')])
else:
return set()
# Filter out stopworks from a frequencies dict or tuple
def filter_stopwords(frequencies, additional_stopwords=None):
# Convert to tuples for easy of development if dictionary
is_dict = False
if type(frequencies) is dict:
is_dict = True
frequencies = frequencies.items()
# Load stopwords from file
stopwords = load_stopwords()
# Add in additional stopwords
if additional_stopwords is not None:
for word in additional_stopwords:
stopwords.add(word)
# Filter out words that are stopwords
filtered = []
for word in frequencies:
if not word[0] in stopwords:
filtered.append(word)
# Convert back to original form
if is_dict:
return dict(filtered)
else:
return filtered
|
r4wd3r/VAGO
|
src/controller/FileProcessing.py
|
Python
|
gpl-2.0
| 2,276
| 0.004394
|
# encoding=utf-8
import codecs
import sys
from src.view.FileProcessingOutput import FileProcessingOutput
class FileProcessing():
def __init__(self):
self.fileProcessingOutput = FileProcessingOutput()
def read_input_file(self, file_path, file_type):
'''
Lectura de archivo y procesamiento de archivos
:param file_path:
:return: file_lines
'''
file_lines = []
line_counter = 0
self.fileProcessingOutput.print_reading_file(file_path)
try:
with codecs.open(file_path, encoding='utf8') as f:
for l in f:
line_counter += 1
line = l.strip().encode("utf-8")
if line != "":
if self.check_line_format(line, file_type, line_counter):
file_lines.append(line)
self.fileProcessingOutput.print_input_file_lines(len(file_lines))
except:
self.fileProcessingOutput.print_error_reading_file()
sys.exit()
if not file_lines:
self.fileProcessingOutput.print_error_reading_file()
sys.exit()
return file_lines
def check_line_format(self, line, file_type, line_counter):
'''
Verifica que la linea se ajus
|
te al formato d
|
e proceso, y al tipo de archivo ingresado
:param line: Linea a procesar
:param file_type: Tipo de archivo
:param line_counter: Contador de linea, para notificar en caso de error.
:return: Retorna si la linea cumple o no con el formato establecido.
'''
if file_type == 0:
return True
elif file_type == 1:
if not ':' in line:
self.fileProcessingOutput.print_error_delimiter_not_found(line_counter)
sys.exit()
return True
elif file_type == 2:
if not ':' in line:
self.fileProcessingOutput.print_error_delimiter_not_found(line_counter)
sys.exit()
_splitted_line = line.split(':')
if len(_splitted_line) < 3:
self.fileProcessingOutput.print_error_format_not_correct(line_counter)
sys.exit()
return True
|
ctuning/ck-env
|
soft/lib.opencv/customize.py
|
Python
|
bsd-3-clause
| 8,458
| 0.033932
|
#
# Collective Knowledge (individual environment - setup)
#
# See CK LICENSE.txt for licensing details
# See CK COPYRIGHT.txt for copyright details
#
# Developer: Grigori Fursin, Grigori.Fursin@cTuning.org, http://fursin.net
#
import os
##############################################################################
# get version from path
def version_cmd(i):
hosd=i['host_os_dict']
tosd=i['target_os_dict']
win=hosd.get('windows_base','')
ck=i['ck_kernel']
fp=i['full_path']
ver=''
p0=os.path.basename(fp)
p1=os.path.dirname(fp)
px=p1
if win=='yes':
px=os.path.join(os.path.dirname(p1),'lib')
lst=os.listdir(px)
for fn in lst:
if win=='yes':
if fn.startswith('opencv_core'):
j=fn.find('.')
if j>0:
ver=fn[11:j]
break
elif fn.startswith('opencv_world'):
j=fn.find('.')
if j>0:
ver=fn[12:j]
break
elif fn.startswith(p0):
x=fn[len(p0):]
if x.startswith('.'):
ver=x[1:]
break
return {'return':0, 'cmd':'', 'version':ver}
##############################################################################
# setup environment setup
def setup(i):
"""
Input: {
cfg - meta of this soft entry
self_cfg - meta of module soft
ck_kernel - import CK kernel module (to reuse functions)
host_os_uoa - host OS UOA
host_os_uid - host OS UID
host_os_dict - host OS meta
target_os_uoa - target OS UOA
target_os_uid - target OS UID
target_os_dict - target OS meta
target_device_id - target device ID (if via ADB)
tags - list of tags used to search this entry
env - updated environment vars from meta
customize - updated customize vars from meta
deps - resolved dependencies for this soft
interactive - if 'yes', can ask questions, otherwise quiet
}
Output: {
return - return code = 0, if successful
> 0, if error
(error) - error text if return > 0
bat - prepared string for bat file
}
"""
import os
# Get variables
ck=i['ck_kernel']
s=''
iv=i.get('interactive','')
env=i.get('env',{})
cfg=i.get('cfg',{})
deps=i.get('deps',{})
tags=i.get('tags',[])
cus=i.get('customize',{})
target_d=i.get('target_os_dict',{})
hosd=i.get('host_os_dict',{})
win=target_d.get('windows_base','')
winh=hosd.get('windows_base','')
mic=target_d.get('intel_mic','')
remote=target_d.get('remote','')
mingw=target_d.get('mingw','')
tbits=target_d.get('bits','')
ep=cus['env_prefix']
pi=cus.get('path_install','')
ellp=hosd.get('env_ld_library_path','')
if ellp=='': ellp='LD_LIBRARY_PATH'
elp=hosd.get('env_library_path','')
if elp=='': elp='LIBRARY_PATH'
fp=cus.get('full_path','')
pl=os.path.dirname(fp)
px=os.path.dirname(pl)
pi=fp
found=False
while True:
if (remote=='yes' and os.path.isdir(os.path.join(pi,'jni','include'))) or \
os.path.isdir(os.path.join(pi,'include')):
found=True
break
pix=os.path.dirname(pi)
if pix==pi:
break
pi=pix
if not found:
return {'return':1, 'error':'can\'t find root dir of this installation'}
if win!='yes':
env[ep+'_LFLAG_IMGCODECS']='-lopencv_imgcodecs'
env[ep+'_LFLAG_IMGPROC']='-lopencv_imgproc'
env[ep+'_LFLAG_HIGHGUI']='-lopencv_highgui'
env[ep+'_LFLAG_CORE']='-lopencv_core'
################################################################
env[ep]=pi
if remote=='yes':
# cus['path_bin']=pi+'\\OpenCV-android-sdk\\sdk\\native\\bin'
# cus['path_lib']=pi+'\\OpenCV-android-sdk\\sdk\\native\\libs\\armeabi'
#
# cus['path_include']=pi+'\\opencv-2.4.11\\include'
# cus['path_includes']=[pi+'\\opencv-2.4.11\\3rdparty\\include\\opencl\\1.2']
#
# cus['path_static_lib']=cus['path_lib']
#
# cus['static_lib']='libopencv_core.a'
#
# cus['extra_static_libs']={'opencv_imgproc':'libopencv_imgproc.a',
# 'opencv_ocl':'libopencv_ocl.a',
# 'opencv_highgui':'libopencv_highgui.a'}
#
# env['CK_ENV_LIB_OPENCV_STATIC_LIB_PATH']=cus['path_static_lib']
#
# if win=='yes':
# s+='\nset '+ellp+'=%CK_ENV_LIB_OPENCV_LIB%;%'+ellp+'%\n'
# else:
# s+='\nexport '+ellp+'=$CK_ENV_LIB_OPENCV_LIB:$'+ellp+'\n'
# Check libs/ABI
pabi=pl[len(pi)+1:]
pinc=os.path.join(pi,'jni','include')
if not os.path.isdir(pinc):
return {'return':1, 'error':'include directory is not found in '+pi}
cus['path_include']=pinc
cus['path_lib']=pl
pl
|
x=os.path.join(pi,'3rdparty',pabi)
cus['path_static_lib']=cus['path_lib']
cus['static_lib']='libopencv_core.a'
cus['extra_static_libs']={'opencv_imgproc':'libopencv_imgproc.a',
'opencv_ocl':'libopencv_ocl.a',
|
'opencv_highgui':'libopencv_highgui.a'}
env[ep+'_JNI']=os.path.join(pi,'jni')
env[ep+'_THIRDPARTY']=os.path.join(pi,'3rdparty')
env[ep+'_STATIC_LIB_PATH']=cus['path_static_lib']
if winh=='yes':
s+='\nset '+ellp+'=%'+ep+'_LIB%;'+plx+';%'+ellp+'%\n'
else:
s+='\nexport '+ellp+'=$'+ep+'_LIB:"'+plx+'":$'+ellp+'\n'
r = ck.access({'action': 'lib_path_export_script', 'module_uoa': 'os', 'host_os_dict': hosd,
'lib_path': [ cus['path_lib'], plx ] })
if r['return']>0: return r
s += r['script']
elif winh=='yes':
ext='x64'
if tbits=='32': ext='ia32'
# Check libs version extensions
cus['path_bin']=px+'\\bin'
cus['path_lib']=px+'\\lib'
cus['path_include']=pi+'/include'
# Check version
lst=os.listdir(cus['path_lib'])
le=''
for fn in lst:
if fn.startswith('opencv_core'):
j=fn.find('.')
if j>0:
le=fn[11:j]
break
cus['path_include']=pi+'/include'
cus['path_static_lib']=cus['path_lib']
cus['path_dynamic_lib']=cus['path_bin']
cus['static_lib']='opencv_core'+le+'.lib'
cus['dynamic_lib']='opencv_core'+le+'.dll'
cus['extra_static_libs']={'opencv_imgproc':'opencv_imgproc'+le+'.lib',
'opencv_ocl':'opencv_ocl'+le+'.lib',
'opencv_highgui':'opencv_highgui'+le+'.lib'}
cus['extra_dynamic_libs']={'opencv_imgproc':'opencv_imgproc'+le+'.dll',
'opencv_ocl':'opencv_ocl'+le+'.dll',
'opencv_highgui':'opencv_highgui'+le+'.dll'}
env[ep+'_LFLAG_IMGPROC']=os.path.join(pl, 'opencv_imgproc'+le+'.lib')
env[ep+'_LFLAG_IMGCODECS']=os.path.join(pl, 'opencv_imgcodecs'+le+'.lib')
env[ep+'_LFLAG_CORE']=os.path.join(pl, 'opencv_core'+le+'.lib')
env[ep+'_LFLAG_HIGHGUI']=os.path.join(pl, 'opencv_highgui'+le+'.lib')
env[ep+'_LFLAG_OCL']=os.path.join(pl, 'opencv_ocl'+le+'.lib')
env[ep+'_STATIC_LIB_PATH']=cus['path_static_lib']
env[ep+'_DYNAMIC_LIB_PATH']=cus['path_dynamic_lib']
s+='\nset PATH='+cus['path_bin']+';%PATH%\n\n'
else:
cus['path_lib']=pl
cus['path_include']=pi+'/include'
cus['path_static_lib']=cus['path_lib']
cus['path_dynamic_lib']=cus['path_lib']
cus['dynamic_lib']='libopencv_core.so'
cus['extra_dynamic_libs']={'opencv_imgproc':'libopencv_imgproc.so',
'opencv_ocl':'libopencv_ocl.so',
|
astrofrog/psrecord
|
setup.py
|
Python
|
bsd-2-clause
| 2,302
| 0.001303
|
#!/usr/bin/env python
#
# Copyright (c) 2013, Thomas P. Robitaille
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from setuptools import setup, Command
from distutils.command.build_py import build_py
with open('README.rst') as infile:
long_description = infile.read()
from psrecord import __version__
setup(name='psrecord',
version=__version__,
description='Python package to record activity from processes',
long_description=long_description,
url='https://github.com/astrofrog/psrecord',
license='Simplified BSD License',
author='Thomas Robitaille',
author_email='thomas.robitaille@gmail.com',
packages=['psrecord', 'psrecord.tests'],
provides=['psrecord'],
scripts=['scripts/psrecord'],
install_requires=['psutil>=2'],
cmdclass={'build_py': build_py},
classifiers=[
"Development Status :: 3 - Alpha",
|
"Programming Language :: Python",
"License :: OSI Approved :: BS
|
D License",
],
)
|
telegraphic/fits2hdf
|
aadnc_benchmarks/quinoa_idea/quinoa.py
|
Python
|
mit
| 4,834
| 0.005176
|
# -*- coding: utf-8 -*-
"""
quinoa.py
=========
Create QUINOA compressed dataset.
QUINOA (QUasi Integer Noise Offset Adjustment) compression is a lossy compression
algorithm for floating point data, and is similar to the RICE-based compression
technique that FPACK uses on floating point data.
The FITS compression package FPACK quantizes floating point pixel values into
32bit integers using a linear scaling function:
integer_value = (floating_point_value - ZERO_POINT ) / SCALE_FACTOR
This array of scaled integers is then compressed using one of the supported compression
algorithms (the default algorithm is RICE).
Basically, QUINOA applies this scaling function then applies bitshuffle compression,
instead of using RICE.
QUINOA is an experimental grain, and should not be used as your staple compression
at this stage.
COUSCOUS: COnversion to Unsigned / Signed ...
"""
import numpy as np
from scipy.signal import convolve2d
from pylab import plt
def estimate_noise(data):
""" Estimate the RMS noise of an image
from http://stackoverflow.com/questions/2440504/
noise-estimation-noise-measurement-in-image
Reference: J. Immerkaer, “Fast Noise Variance Estimation”,
Computer Vision and Image Understanding,
Vol. 64, No. 2, pp. 300-302, Sep. 1996 [PDF]
"""
H, W = data.shape
data = np.nan_to_num(data)
M = [[1, -2, 1],
[-2, 4, -2],
[1, -2, 1]]
sigma = np.sum(np.sum(np.abs(convolve2d(data, M))))
sigma = sigma * np.sqrt(0.5 * np.pi) / (6 * (W - 2) * (H - 2))
return sigma
def apply_dither(data, seed):
""" Apply subtractive dither """
np.random.seed(seed)
dither_vals = np.random.random(data.shape)
data = data + dither_vals
return data
def unapply_dither(data, seed):
""" Remove subtractive dither from image """
np.random.seed(seed)
dither_vals = np.random.random(data.shape)
data = data - dither_vals
return data
def quinoa_scale(data, q=4.0, subtractive_dither=True, seed=12345):
""" Apply quinoa scaling to a floating-point dataset.
data: numpy.ndarray of data to be converted.
q: quantization parameter, default 4.0
subtractive_dither: apply dither for subtractive dithering
|
seed: change seed value
"""
if subtractive_dither:
data = apply_dither(data, seed)
zero_point = np.nanmin(data)
max_point = np.nanmax(data)
dyn_range = max_point - zero_point
data_zeroed = data - zero_point
# Compute scale factor
noise_rms = estimate_noise(data)
scale_factor = (q / noise_rms) * (2**32 / dyn_range)
data_int = np.ceil((data_zeroed) * scale_factor).astype('uint32')
scale_dict =
|
{
'zero': zero_point,
'noise_rms': noise_rms,
'scale_factor': scale_factor,
'dithered': subtractive_dither,
'seed': seed,
'data': data_int,
'q': q,
'dtype': str(data_int.dtype)
}
return scale_dict
def quinoa_unscale(scale_dict):
""" Unapply QUINOA scaled data """
ss = scale_dict
data = ss["data"].astype('float32')
data = (data / ss["scale_factor"]) + ss["zero"]
if ss["dithered"]:
data = unapply_dither(data, ss["seed"])
return data
def couscous_scale(data):
""" Apply couscous scaling to data
(ceiling rounding and converion to int) """
d_max = np.nanmax(data)
d_min = np.nanmin(data)
if d_max <= 2**8 and d_min >= -2**8:
scale_factor = 2**16
data *= scale_factor
data = np.ceil(data).astype("int32")
if d_max <= 2**15 and d_min >= -2**15:
data = np.ceil(data).astype("int16")
scale_factor = 1
elif d_max <= 2**31 and d_min >= -2**31:
data = np.ceil(data).astype("int32")
scale_factor = 1
else:
scale_factor = d_max * 2**32
data_zeroed = (data - d_min) / scale_factor
data = np.ceil(data_zeroed).astype("int32")
scale_dict = {
'data': data,
'min': d_min,
'max': d_max,
'scale_factor': scale_factor,
'dtype': str(data.dtype)
}
return scale_dict
if __name__ == "__main__":
d = np.linspace(1e4, 1e5, 100)
data = np.sin(np.outer(d, d)) * 16384
noise = np.random.random((100,100)) / 1000
data = data + noise
scale_dict = quinoa_scale(data, q=0.001)
data_unscaled = quinoa_unscale(scale_dict)
print(scale_dict)
print(data_unscaled)
plt.figure()
plt.subplot(2,2,1)
plt.imshow(data)
plt.colorbar()
plt.subplot(2,2,2)
plt.imshow(scale_dict["data"])
plt.colorbar()
plt.subplot(2,2,3)
plt.imshow(data_unscaled)
plt.colorbar()
plt.subplot(2,2,4)
plt.imshow(np.abs(data - data_unscaled) / np.max(data) * 100)
plt.colorbar()
plt.show()
|
gam-phon/taiga-back
|
taiga/auth/tokens.py
|
Python
|
agpl-3.0
| 2,045
| 0
|
# Copyright (C) 2014-2016 Andrey Antukh <niwi@niwi.nz>
# Copyright (C) 2014-2016 Jesús Espino <jespinog@gmail.com>
# Copyright (C) 2014-2016 David Barragán <bameda@dbarragan.com>
# Copyright (C) 2014-2016 Alejandro Alonso <alejandro.alonso@kaleidos.net>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of
|
the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE
|
. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from taiga.base import exceptions as exc
from django.apps import apps
from django.core import signing
from django.utils.translation import ugettext as _
def get_token_for_user(user, scope):
"""
Generate a new signed token containing
a specified user limited for a scope (identified as a string).
"""
data = {"user_%s_id" % (scope): user.id}
return signing.dumps(data)
def get_user_for_token(token, scope, max_age=None):
"""
Given a selfcontained token and a scope try to parse and
unsign it.
If max_age is specified it checks token expiration.
If token passes a validation, returns
a user instance corresponding with user_id stored
in the incoming token.
"""
try:
data = signing.loads(token, max_age=max_age)
except signing.BadSignature:
raise exc.NotAuthenticated(_("Invalid token"))
model_cls = apps.get_model("users", "User")
try:
user = model_cls.objects.get(pk=data["user_%s_id" % (scope)])
except (model_cls.DoesNotExist, KeyError):
raise exc.NotAuthenticated(_("Invalid token"))
else:
return user
|
rwl/pyreto
|
pyreto/continuous/task.py
|
Python
|
gpl-3.0
| 6,830
| 0.00366
|
# Copyright (C) 2007-2010 Richard Lincoln
#
# PYPOWER is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published
# by the Free Software Foundation, either version 3 of the License,
# or (at your option) any later version.
#
# PYPOWER is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY], without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Publi
|
c License for more details.
#
# You should have received a copy of the GNU General Public Licens
|
e
# along with PYPOWER. If not, see <http://www.gnu.org/licenses/>.
""" Defines a profit maximisation task.
"""
import logging
from pybrain.rl.environments import Task
from pyreto.discrete.task import ProfitTask as DiscreteProfitTask
from pylon import PQ, PV
logger = logging.getLogger(__name__)
class ProfitTask(DiscreteProfitTask):
""" Defines a task for continuous sensor and action spaces.
"""
def __init__(self, environment, maxSteps=24, discount=None):
super(ProfitTask, self).__init__(environment, maxSteps, discount)
#----------------------------------------------------------------------
# "Task" interface:
#----------------------------------------------------------------------
#: Limits for scaling of sensors.
self.sensor_limits = self._getSensorLimits()
#: Limits for scaling of actors.
self.actor_limits = self._getActorLimits()
#--------------------------------------------------------------------------
# "Task" interface:
#--------------------------------------------------------------------------
# def getObservation(self):
# """ A filtered mapping to getSample of the underlying environment. """
#
# sensors = self.env.getSensors()
# print "SENSORS:", sensors
# if self.sensor_limits:
# sensors = self.normalize(sensors)
#
# return sensors
def performAction(self, action):
""" Execute one action.
"""
# print "ACTION:", action
self.t += 1
Task.performAction(self, action)
# self.addReward()
self.samples += 1
#--------------------------------------------------------------------------
# "ProfitTask" interface:
#--------------------------------------------------------------------------
def _getActorLimits(self):
""" Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)],
one tuple per parameter, giving min and max for that parameter.
"""
actorLimits = []
for _ in range(self.env.numOffbids):
for _ in self.env.generators:
actorLimits.append((0.0, self.env.maxMarkup))
for _ in range(self.env.numOffbids):
for _ in self.env.generators:
if self.env.maxWithhold is not None:
actorLimits.append((0.0, self.env.maxWithhold))
logger.debug("Actor limits: %s" % actorLimits)
return actorLimits
def _getSensorLimits(self):
""" Returns a list of 2-tuples, e.g. [(-3.14, 3.14), (-0.001, 0.001)],
one tuple per parameter, giving min and max for that parameter.
"""
limits = []
limits.extend(self._getTotalDemandLimits())
# limits.extend(self._getDemandLimits())
# limits.extend(self._getPriceLimits())
# limits.extend(self._getVoltageSensorLimits())
# limits.extend(self._getVoltageMagnitudeLimits())
# limits.extend(self._getVoltageAngleLimits())
# limits.extend(self._getVoltageLambdaLimits())
# limits.extend(self._getFlowLimits())
logger.debug("Sensor limits: %s" % limits)
return limits
# limits = []
#
# # Market sensor limits.
# limits.append((1e-6, BIGNUM)) # f
# pLimit = 0.0
# for g in self.env.generators:
# if g.is_load:
# pLimit += self.env._g0[g]["p_min"]
# else:
# pLimit += self.env._g0[g]["p_max"]
# limits.append((0.0, pLimit)) # quantity
## cost = max([g.total_cost(pLimit,
## self.env._g0[g]["p_cost"],
## self.env._g0[g]["pcost_model"]) \
## for g in self.env.generators])
# cost = self.env.generators[0].total_cost(pLimit,
# self.env._g0[g]["p_cost"], self.env._g0[g]["pcost_model"])
# limits.append((0.0, cost)) # mcp
#
# # Case sensor limits.
## limits.extend([(-180.0, 180.0) for _ in case.buses]) # Va
# limits.extend([(0.0, BIGNUM) for _ in case.buses]) # P_lambda
#
# limits.extend([(-b.rate_a, b.rate_a) for b in case.branches]) # Pf
## limits.extend([(-BIGNUM, BIGNUM) for b in case.branches]) # mu_f
#
# limits.extend([(g.p_min, g.p_max) for g in case.generators]) # Pg
## limits.extend([(-BIGNUM, BIGNUM) for g in case.generators]) # Pg_max
## limits.extend([(-BIGNUM, BIGNUM) for g in case.generators]) # Pg_min
def _getTotalDemandLimits(self):
Pdmax = sum([b.p_demand for b in self.env.market.case.buses])
return [(self.env.Pd_min, Pdmax)]
def _getDemandLimits(self):
limits = [(0.0, b.p_demand) for b in self.env.market.case.buses
if b.type == PQ]
return limits
def _getPriceLimits(self):
mcpLimit = (0.0, self.env.market.priceCap)
sysLimit = (0.0, self.fmax)
return [mcpLimit, sysLimit]
def _getVoltageSensorLimits(self):
limits = []
for bus in self.env.market.case.connected_buses:
if bus.type == PV:
limits.append(None)
else:
limits.append((bus.v_min, bus.v_max))
return limits
def _getVoltageMagnitudeLimits(self):
limits = []
Vmax = [b.v_max for b in self.env.market.case.connected_buses]
Vmin = [b.v_min for b in self.env.market.case.connected_buses]
limits.extend(zip(Vmin, Vmax))
# nb = len(self.env.market.case.connected_buses)
# limits.extend([(-180.0, 180.0)] * nb)
return limits
def _getVoltageAngleLimits(self):
limits = []
nb = len(self.env.market.case.connected_buses)
limits.extend([(-180.0, 180.0)] * nb)
return limits
def _getVoltageLambdaLimits(self):
nb = len(self.env.market.case.connected_buses)
return [None] * nb
def _getFlowLimits(self):
rateA = [l.rate_a for l in self.env.market.case.online_branches]
neg_rateA = [-1.0 * r for r in rateA]
limits = zip(neg_rateA, rateA)
# limits.extend(zip(neg_rateA, rateA))
return limits
|
sicekit/sicekit
|
robots/tool-list_networks.py
|
Python
|
mit
| 2,437
| 0.0119
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This bot regenerates the page VEIDs
The following parameters are supported:
-debug If given, doesn't do any real changes, but only shows
what would have been changed.
"""
__version__ = '$Id: basic.py 4946 2008-01-29 14:58:25Z wikipedian $'
import wikipedia
import pagegenerators, catlib, re, socket, sys
from iplib import CIDR, IPv4Address
class IpNetworkBot:
def __init__(self, nets_generator, debug):
"""
Constructor. Parameters:
* generator - The page generator that determines on which pages
to work on.
* debug - If True, doesn't do any real changes, but only shows
what would have been changed.
"""
self.nets_generator = nets_generator
self.nets = dict()
self.debug = debug
def registerIpNet(self, page):
if ":" in page.title(): return
text = page.get()
in_ipnettpl = False
private = False
for line in text.split("\n"):
if line.startswith("{{IPNetwork"):
in_ipnettpl = True
continue
if line.startswith("}}"):
in_ipnettpl = False
continue
if in_ipnet
|
tpl:
if line.startswith("|PRIVATE=1"):
private = True
if not private:
print page.title()
def run(self):
print "# generated by netlist.py"
for page in self.nets_generator:
self.reg
|
isterIpNet(page)
def main():
# The generator gives the pages that should be worked upon.
gen = None
# If debug is True, doesn't do any real changes, but only show
# what would have been changed.
debug = False
wantHelp = False
# Parse command line arguments
for arg in wikipedia.handleArgs():
if arg.startswith("-debug"):
debug = True
else:
wantHelp = True
if not wantHelp:
# The preloading generator is responsible for downloading multiple
# pages from the wiki simultaneously.
cat = catlib.Category(wikipedia.getSite(), 'Category:%s' % 'IP-Network')
nets_gen = pagegenerators.CategorizedPageGenerator(cat, start = None, recurse = False)
nets_gen = pagegenerators.PreloadingGenerator(nets_gen)
bot = IpNetworkBot(nets_gen, debug)
bot.run()
else:
wikipedia.showHelp()
if __name__ == "__main__":
try:
main()
finally:
wikipedia.stopme()
|
gbour/Strawberry
|
strawberry/tag.py
|
Python
|
agpl-3.0
| 3,892
| 0.031346
|
# -*- coding: utf8 -*-
__version__ = "$Revision$ $Date$"
__author__ = "Guillaume Bour <guillaume@bour.cc>"
__license__ = """
Copyright (C) 2010-2011, Guillaume Bour <guillaume@bour.cc>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as
published by the Free Software Foundation, version 3.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from twisted.web import server
from mother.callable import Callable, callback, LoopbackSelf as self
from mother import routing
from tentacles import Object
from tentacles.fields import *
from tentacles.queryset import filter, map, len
class Tag(Object, Callable):
__stor_name__ = 'strawberry__tag'
id = Integer(pk=True, autoincrement=True)
name = String(unique=True, allow_none=False)
description = String()
def GET(self, id, **kwargs):
tag = list(filter(lambda x: x.id == id, Tag))
if len(tag) == 0:
return (404, None)
tag = tag[0]
res = {}
for name, fld in tag.__fields__.iteritems():
if isinstance(fld, Reference):
continue
res[name] = getattr(tag, name)
return res
def PUT(self, content):
if 'name' not in content:
return (400, "*name* key is mandatory")
if 'id' in content and len(filter(lambda x: x.id == content['id'], Tag)) > 0:
return (400, "id already exists")
if len(filter(lambda x: x.link == content['name'], Tag)) > 0:
return (400, "name must be unique")
tag = Tag()
for key, value in content.iteritems():
if not key in tag.__fields__:
return(409, "unknown field '%s'" % key)
setattr(tag, key, value)
tag.save()
return tag.id
def DELETE(self, id):
"""
NOTE: associated tags, even if specially created for this link, are not deleted
"""
tags = list(filter(lambda x: x.id == id, Tag))
if len(tags) == 0:
return (404, "not found")
elif len(tags) > 1:
return (500, "return several tags for the same id")
tags[0].delete()
return (200, True)
@callback
def all(self):
return list(map(lambda x: x.id, Tag))
"""
bytag is not one of GET,POST,PUT,DELETE method, it does not take default class
ctype
"""
"""
#@callback(url='/{tag}', content_type='internal/python', modifiers={'text/html': self.html_bytag})
def bytag(self, tag, **kwargs):
#return "search tag by name= %s" % tagQ
errors = {}
if len(tag) == 0:
errors['tag'] = (10, 'field required')
return routing.HTTP_400(errors) # bad request
_tag = list(filter(lambda t: t.name == tag, Tag))
if len(_tag) == 0:
return routing.HTTP_404({'tag': (04, 'not found')})
return routing.HTTP_200(_tag[0])
def html_bytag(self, tag, __callback__, **kwargs):
ret = __callback__(tag)
if not isinstance(ret, routing.HTTP_200):
#TODO: how to reference root app module ?
#strawberry = sys.modules['strawberry']
#print strawberry.http401
#return routing.Redirect(strawberry.http401)
# We return a
|
n error page: HTTP code == 404, routed to strawberry.404
return routing.HTTP_404 #(content=Template('404.html', title='404'))
tag = ret.msg
print dir(tag), tag.__fields__
#TODO: tentacles workaround
links = list(tag.Link__tags)
tid = tag.id
related = list(filter(lambda t: not t.Link__tags.isdisjoint(links) and t.id != tid, Tag))
from mother.template import Te
|
mplate
return Template('tag.html',
title="Tag ☄ %s" % tag.name,
query=tag.name,
tagname=tag.name,
links=tag.Link__tags,
searchtags=[],
related=related,
)
"""
|
tkosciol/micronota
|
micronota/cli.py
|
Python
|
bsd-3-clause
| 3,634
| 0
|
# ----------------------------------------------------------------------------
# Copyright (c) 2015--, micronota development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import click
from os import listdir
from os.path import abspath, join, dirname, splitext
from .config import Configuration
_CONTEXT_SETTINGS = dict(
# allow case insensitivity for the (sub)commands and their options
token_normalize_func=lambda x: x.lower(),
# set --help option for all (sub)commands
help_option_names=['-h', '--help'])
class AliasedGroup(click.Group):
'''Custom subclass of click.Group to enable alias for commands.
This implements a subclass of click.Group that accepts a prefix
for a command. If there were a (sub)command called "push", it would
accept "pus" as an alias (as long as it is unique).
This is borrowed from `click` example of alias.
'''
def _get_command(self, ctx, cmd_name):
return click.Group.get_command(self, ctx, cmd_name)
def get_command(self, ctx, cmd_name):
# allow automatic abbreviation of the command. "status" for
# instance will match "st". We only allow that however if
# there is only one command.
matches = [x for x in self.list_commands(ctx)
if x.lower().startswith(cmd_name.lower())]
if not matches:
return
elif len(matches) == 1:
return self._get_command(ctx, matches[0])
ctx.fail('Too many matches: %s' % ', '.join(sorted(matches)))
class ComplexCLI(AliasedGroup):
'''Custom subclass to load subcommands dynamically from a plugin folder.
It looks in `commands` folder for subcommands.
This is borrowed from `click` examples of complex.
'''
def list_commands(self, ctx):
rv = []
cmd_folder = abspath
|
(join(dirname(__file__), 'commands'))
for filename in listdir(cmd_folde
|
r):
if filename.endswith('.py') and filename != '__init__.py':
rv.append(splitext(filename)[0])
rv.sort()
return rv
def _get_command(self, ctx, cmd_name):
try:
mod = __import__('micronota.commands.' + cmd_name,
None, None, ['cli'])
except ImportError as err:
print(err)
return
return mod.cli
@click.group(cls=ComplexCLI, context_settings=_CONTEXT_SETTINGS)
@click.option('--cfg', default=None,
type=click.Path(exists=True, dir_okay=False),
help='Config file.')
@click.option('--param', default=None,
type=click.Path(exists=True, dir_okay=False),
help=('Parameter file to change the default behavior '
'of wrapped tools.'))
@click.option('--log', default=None,
type=click.Path(exists=True, dir_okay=False),
help='Logging config file.')
@click.version_option() # add --version option
@click.pass_context
def cmd(ctx, cfg, param, log):
'''Annotation pipeline for Bacterial and Archaeal (meta)genomes.
It predicts features (ncRNA, coding genes, etc.) on the input sequences
and assign functions to those features.
It works best on long sequences, such as assembled contigs, draft genomes,
or full genomes.
For more info, please check out https://github.com/biocore/micronota.
'''
# load the config.
ctx.config = Configuration(misc_fp=cfg, param_fp=param, log_fp=log)
|
mgard/epater
|
i18n.py
|
Python
|
gpl-3.0
| 2,337
| 0.001712
|
import gettext
class I18n:
"""
This class is used for internationalization without a specific language.
Objects can be stored as msgid from gettext or as str.
"""
def __init__(self, msg, isSTR=False):
if isSTR:
self.content = [msg]
else:
self.content = [self.I18n_inner(msg)]
def append(self, msg):
if type(msg) == self.__class__:
self.content += msg.content
else:
self.content.append(msg)
def __iadd__(self, msg):
self.append(msg)
return self
def format(self, *args, **kwargs):
self.content[-1].format(*args, **kwargs)
return self
def getText(self, lang):
"""
Return the localized translation
:param lang: the language to translate
"""
if type(lang) == str:
lang = gettext.translation('interpreter', './locale', languages=[lang], fallback=True)
result = ""
for msg in self.content:
if type(msg) == self.I18n_inner or type(msg) == self.__class__:
result += msg.getText(lang)
else:
result += msg
return result
# Inner class
class I18n_inner:
def __init__(self, msg):
self.msg = msg
self.formatArg = None
self.formatKwargs = None
def format(self, *args, **kwargs):
self.formatKwargs = kwargs
self.formatArg = args
return self
def getText(self, t):
i
|
f self.formatKwargs:
for key, value in self.formatKwargs.items():
if
|
type(value) == I18n or type(value) == self.__class__:
self.formatKwargs[key] = value.getText(t)
if self.formatArg:
resultArg = []
for arg in self.formatArg:
if type(arg) == I18n or type(arg) == self.__class__:
resultArg.append(arg.getText(t))
else:
resultArg.append(arg)
return t.gettext(self.msg).format(*resultArg, **self.formatKwargs)
elif self.formatKwargs:
return t.gettext(self.msg).format(**self.formatKwargs)
else:
return t.gettext(self.msg)
|
indico/indico
|
indico/modules/categories/blueprint.py
|
Python
|
mit
| 8,380
| 0.006683
|
# This file is part of Indico.
# Copyright (C) 2002 - 2022 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from flask import redirect, request
from indico.modules.categories.compat import compat_category
from indico.modules.categories.controllers.admin import RHManageUpcomingEvents
from indico.modules.categories.controllers.display import (RHCategoryCalendarView, RHCategoryIcon, RHCategoryInfo,
RHCategoryLogo, RHCategoryOverview, RHCategorySearch,
RHCategoryStatistics, RHCategoryStatisticsJSON,
RHCategoryUpcomingEvent, RHDisplayCategory, RHEventList,
RHExportCategoryAtom, RHExportCategoryICAL,
RHReachableCategoriesInfo, RHShowFutureEventsInCategory,
RHShowPastEventsInCategory, RHSubcatInfo)
from indico.modules.categories.controllers.management import (RHAddCategoryRole, RHAddCategoryRoleMembers,
RHAPIEventMoveRequests, RHCategoryRoleMembersExportCSV,
RHCategoryRoleMembersImportCSV, RHCategoryRoles,
RHCreateCategory, RHDeleteCategory, RHDeleteCategoryRole,
RHDeleteEvents, RHDeleteSubcategories, RHEditCategoryRole,
RHManageCategoryContent, RHManageCategoryIcon,
RHManageCategoryLogo, RHManageCategoryModeration,
RHManageCategoryProtection, RHManageCategorySettings,
RHMoveCategory, RHMoveEvents, RHMoveSubcategories,
RHRemoveCategoryRoleMember, RHSortSubcategories,
RHSplitCategory)
from indico.modules.users import User
from indico.web.flask.util import make_compat_redirect_func, redirect_view, url_for
from indico.web.flask.wrappers import IndicoBlueprint
def _redirect_event_creation(category_id, event_type):
anchor = f'create-event:{event_type}:{category_id}'
return redirect(url_for('.display', category_id=category_id, _anchor=anchor))
_bp = IndicoBlueprint('categories', __name__, template_folder='templates', virtual_template_folder='categories',
url_prefix='/category/<int:category_id>')
# Category management
_bp.add_url_rule('/manage/', 'manage_content', RHManageCategoryContent)
_bp.add_url_rule('/manage/delete', 'delete', RHDeleteCategory, methods=('POST',))
_bp.add_url_rule('/manage/icon', 'manage_icon', RHManageCategoryIcon, methods=('POST', 'DELETE'))
_bp.add_url_rule('/manage/logo', 'manage_logo', RHManageCategoryLogo, methods=('POST', 'DELETE'))
_bp.add_url_rule('/manage/move', 'move', RHMoveCategory, methods=('POST',))
_bp.add_url_rule('/manage/protection', 'manage_protection', RHManageCategoryProtection, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/settings', 'manage_settings', RHManageCategorySettings, methods=('POST', 'GET'))
_bp.add_url_rule('/manage/moderation', 'manage_moderation', RHManageCategoryModeration)
# Role management
_bp.add_url_rule('/manage/roles', 'manage_roles', RHCategoryRoles, methods=('POST', 'GET'))
_bp.add_url_rule('/manage/roles/create', 'add_role', RHAddCategoryRole, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/roles/<int:role_id>/edit', 'edit_role', RHEditCategoryRole, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/roles/<int:role_id>', 'delete_role', RHDeleteCategoryRole, methods=('DELETE',))
_bp.add_url_rule('/manage/roles/<int:role_id>/members', 'add_role_members', RHAddCategoryRoleMembers, methods=('POST',))
_bp.add_url_rule('/manage/roles/<int:role_id>/members/<int:user_id>', 'remove_role_member', RHRemoveCategoryRoleMember,
methods=('DELETE',))
_bp.add_url_rule('/manage/roles/<int:role_id>/members/import', 'add_members_import_csv',
RHCategoryRoleMembersImportCSV, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/roles/<int:role_id>/members.csv', 'members_export_csv', RHCategoryRoleMembersExportCSV)
# Event management
_bp.add_url_rule('/manage/events/delete', 'delete_events', RHDeleteEvents, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/events/move', 'move_events', RHMoveEvents, methods=('POST',))
_bp.add_url_rule('/manage/events/split', 'split_category', RHSplitCategory, methods=('GET', 'POST'))
# Subcategory management
_bp.add_url_rule('/manage/subcategories/create', 'create_subcategory', RHCreateCategory, methods=('GET', 'POST'))
_bp.add_url_rule('/manage/subcategories/delete', 'delete_subcategories', RHDeleteSubcategories, methods=('POST',))
_bp.add_url_rule('/manage/subcategories/move', 'move_subcategories', RHMoveSubcategories, methods=('POST',))
_bp.add_url_rule('/manage/subcategories/sort', 'sort_subcategories', RHSortSubcategories, methods=('POST',))
# Display
_bp.add_url_rule('!/', 'display', RHDisplayCategory, defaults={'category_id': 0})
_bp.add_url_rule('/', 'display', RHDisplayCategory)
_bp.add_url_rule('/event-list', 'event_list', RHEventList)
_bp.add_url_rule('/events.atom', 'export_atom', RHExportCategoryAtom)
_bp.add_url_rule('/events.ics', 'export_ical', RHExportCategoryICAL)
_bp.add_url_rule('/events.rss', 'export_rss', make_compat_redirect_func(_bp, 'export_atom'))
_bp.add_url_rule('/icon-<slug>.png', 'display_icon', RHCategoryIcon)
_bp.add_url_rule('/info', 'info', RHCategoryInfo)
_bp.add_url_rule('/info-from', 'info_from', RHReachableCategoriesInfo, methods=('GET', 'POST'))
_bp.add_url_rule('/logo-<slug>.png', 'display_logo', RHCategoryLogo)
_bp.add_url_rule('/overview', 'overview', RHCategoryOverview)
_bp.add_url_rule('/show-future-events', 'show_future_events', RHShowFutureEventsInCategory, methods=('DELETE', 'PUT'))
_bp.add_url_rule('/show-past-events', 'show_past_events', RHShowPastEventsInCategory, methods=('DELETE', 'PUT'))
_bp.add_url_rule('/statistics', 'statistics', RHCategoryStatistics)
_bp.add_url_rule('/statistics.json', 'statistics_json', RHCategoryStatisticsJSON)
_bp.add_url_rule('/subcat-info', 'subcat_info', RHSubcatInfo)
_bp.add_url_rule('/calendar', 'calendar', RHCategoryCalendarView)
_bp.add_url_rule('/upcoming', 'upcoming_event', RHCategoryUpcomingEvent)
# Event creation - redirect to anchor page opening the dialog
_bp.add_url_rule('/create/event/<any(lecture,meeting,conference):event_type>', view_func=_redirect_event_creation)
# Short URLs
_bp.add_url_rule('!/categ/<int:category_id>', view_func=redirect_view('.display'), strict_slashes=False)
_bp.add_url_rule('!/c/<int:category_id>', view_func=redirect_view('.display'), strict_slashes=False)
# Internal API
_bp.add_url_rule('!/category/search', 'search', RHCategorySearch)
_bp.add_url_rule('/api/event-move-requests', 'api_event_move_requests', RHAPIEventMoveRequests, methods=('GET', 'POST'))
# Administration
_bp.add_url_rule('!/admin/upcoming-events', 'ma
|
nage_upcoming', RHManageUpcomingEvents, methods=('GET', 'POST'))
@_bp.before_request
def _redirect_to_bootstrap():
# No users in Indico yet? Redirect from index page to bootstrap form
if (request.endpoint == 'categories.d
|
isplay' and not request.view_args['category_id'] and
not User.query.filter_by(is_system=False).has_rows()):
return redirect(url_for('bootstrap.index'))
_compat_bp = IndicoBlueprint('compat_categories', __name__)
_compat_bp.add_url_rule('/category/<legacy_category_id>/<path:path>', 'legacy_id', compat_category)
_compat_bp.add_url_rule('/category/<legacy_category_id>/', 'legacy_id', compat_category)
_compat_bp.add_url_rule('!/categoryDisplay.py
|
samuelmasuy/Concordia-Schedule-to-Gcal
|
app/scheduletogcal/calendar_controller.py
|
Python
|
gpl-2.0
| 5,318
| 0
|
# -*- coding: utf-8 -*-
# ===========================================================================
#
# Copyright (C) 2014 Samuel Masuy. All rights reserved.
# samuel.masuy@gmail.com
#
# ===========================================================================
"""
Calendar controller.
~~~~~~~~~~~~~~~~~~~
Gestion of google calendar events and secondary calendars.
:copyright: (c) 2014 by Samuel Masuy.
:license: GNU version 2.0, see LICENSE for more details.
"""
import httplib2
from datetime import datetime, timedelta
from pytz import timezone
from oauth2client.client import OAuth2WebServerFlow, OAuth2Credentials
from apiclient.discovery import build
from flask import session, redirect, url_for
from app import app
from scraper import ScheduleScraper
from export_g_i import to_ical, to_gcal
from academic_dates import get_academic_dates
TIMEZONE = timezone("America/Montreal")
def get_flow(url):
scope = "https://www.googleapis.com/auth/calendar"
return OAuth2WebServerFlow(client_id=app.config['CLIENT_ID'],
client_secret=app.config['CLIENT_SECRET'],
scope=scope,
redirect_uri=(url + 'oauth2callback'))
def create_service():
"""Create a service to communicate with the Google API."""
try:
tokken = session['credentials']
except KeyError:
return redirect(url_for('schedule_login'))
credentials = OAuth2Credentials.from_json(tokken)
http = httplib2.Http()
http = credentials.authorize(http)
# Create service required to manipulate the user calendar.
service = build("calendar", "v3", http=http)
return service
def insert_calendar(service):
""" Insert a secondary calendar in the user's calendar repository. """
calendar = {
'summary': 'Schedule Concordia',
'timeZone': 'America/Montreal'
}
created_calendar = service.calendars().insert(body=calendar).execute()
return created_calendar['id']
def map_semester(semester):
''' map semesters '''
semester_names = ['summer', 'fall', 'winter']
return dict(zip(['1', '2', '4'], semester_names))[semester]
def insert_event(url, semester):
""" Insert events in the user calendar. """
service = create_service()
created_events_id = []
semester = map_semester(semester)
calendar_schedule = ScheduleScraper(url, semester)
# Parse the schedule and get the events.
gcal = to_gcal(calendar_schedule.course_list)
ical = to_ical(calendar_schedule.course_list).to_ical()
# Check if a secondary calendar for the schedule exists.
calendar_id = cal_lookup_id(service)
# Make sure to delete all events and hollidays during all summer months.
if semester == 'summer':
semester = 'summer_5B'
if calendar_id is None:
# Create a new secondary calendar.
calendar_id = insert_calendar(service)
else:
# Delete all the events during the semester concerned.
del_old_events(service, calendar_id, semester)
# Create all the events and get their ids.
created_events_id = [service.events().insert(calendarId=calendar_id,
body=event).execute()['id']
for event in gcal]
return calendar_id, created_events_id, ical
def cal_lookup_id(service):
"""Finds the id, if existant, of the Schedule Concordia user calendar."""
calendar_list = service.calendarList().list().execute()
for calendar_list_entry in calendar_list['items']:
if calendar_list_entry['summary'] == 'Schedule Concordia':
return calendar_list_entry['id']
return None
def del_old_events(service, cal_id, term):
"""Delete all the events previously created, in the secondary calendar,
in order to sustain an eventual update."""
semester_dates, _ = get_academic_dates(term)
first_day, last_day = semester_dates
last_day = last_day + timedelta(days=1)
# Get datetime range of the first week of the semester.
dt_min = datetime(first_day.year, first_day.mo
|
nth, first_day.day,
tzinfo=TIMEZONE)
dt_max = datetime(last_day.year, last_day.month, last_day.day,
tzinfo=TIMEZONE)
# Create a list of all the events we need to delete.
old_events_list = service.events().list(
calendarId=cal_id,
timeMin=dt_min.isoformat(),
timeMax=dt_max.isoformat()).execute()
old_recur_e
|
vents = []
for old_event_id in old_events_list:
old_recur_events.append(
service.events().instances(calendarId=cal_id,
eventId=old_event_id,
timeMin=dt_min.isoformat(),
timeMax=dt_max.isoformat()).execute())
for summ in old_recur_events:
for ite in summ['items']:
service.events().delete(calendarId=cal_id,
eventId=ite['id']).execute()
def rollback(created_events, calendar):
""" Undo all changes that has been created in this application. """
service = create_service()
for event in created_events:
service.events().delete(calendarId=calendar, eventId=event).execute()
|
dubirajara/django_my_ideas_wall
|
myideas/api/urls.py
|
Python
|
agpl-3.0
| 931
| 0
|
"""tweetme URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.10/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.urls import path
from myideas.api import views
urlpatterns = [
path('', views.IdeasListApiView.as_view(), name='list_api'),
path('<i
|
nt:pk>/', views.IdeasIdApiView.as_view(), name="id_api"),
path('<username>/', views.UserListApiView.as_view(), name="use
|
r_api"),
]
|
mjem/themelog
|
tests/test_stdout.py
|
Python
|
apache-2.0
| 395
| 0
|
#
|
!/usr/bin/env python
"""Messages are written to stdout instead of the normal stderr.
"""
import logging
from themelog import init_log
logger = logging.getLogger()
init_log(stdout=True)
logger.debug('This is a debug message')
logger.info('This is a info message')
logger.w
|
arning('This is a warning message')
logger.error('This is a error message')
logger.critical('This is a critical message')
|
dvliman/jaikuengine
|
.google_appengine/google/appengine/tools/devappserver2/module_test.py
|
Python
|
apache-2.0
| 90,007
| 0.002822
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Tests for google.apphosting.tools.devappserver2.module."""
import httplib
import logging
import os
import re
import time
import unittest
import google
import mox
from google.appengine.api import appinfo
from google.appengine.api import request_info
from google.appengine.tools.devappserver2 import api_server
from google.appengine.tools.devappserver2 import application_configuration
from google.appengine.tools.devappserver2 import constants
from google.appengine.tools.devappserver2 import dispatcher
from google.appengine.tools.devappserver2 import instance
from goo
|
gle.appengine.tools.devappserver2 import module
from google.appengine.tools.devappserver2 import s
|
tart_response_utils
from google.appengine.tools.devappserver2 import wsgi_server
class ModuleConfigurationStub(object):
def __init__(self,
application_root='/root',
application='app',
module_name='default',
automatic_scaling=appinfo.AutomaticScaling(),
version='version',
runtime='python27',
threadsafe=False,
skip_files='',
inbound_services=['warmup'],
handlers=[appinfo.URLMap(url=r'/python-(.*)',
script=r'\1.py')],
normalized_libraries=None,
env_variables=None,
manual_scaling=None,
basic_scaling=None):
self.application_root = application_root
self.application = application
self.module_name = module_name
self.automatic_scaling = automatic_scaling
self.manual_scaling = manual_scaling
self.basic_scaling = basic_scaling
self.major_version = version
self.runtime = runtime
self.threadsafe = threadsafe
self.skip_files = skip_files
self.inbound_services = inbound_services
self.handlers = handlers
self.normalized_libraries = normalized_libraries or []
self.env_variables = env_variables or []
self.version_id = '%s:%s.%s' % (module_name, version, '12345')
self.is_backend = False
def check_for_updates(self):
return set()
class ModuleFacade(module.Module):
def __init__(self,
module_configuration=ModuleConfigurationStub(),
instance_factory=None,
ready=True,
allow_skipped_files=False):
super(ModuleFacade, self).__init__(
module_configuration,
host='fakehost',
balanced_port=0,
api_port=8080,
auth_domain='gmail.com',
runtime_stderr_loglevel=1,
php_executable_path='/usr/bin/php-cgi',
enable_php_remote_debugging=False,
python_config=None,
cloud_sql_config=None,
default_version_port=8080,
port_registry=dispatcher.PortRegistry(),
request_data=None,
dispatcher=None,
max_instances=None,
use_mtime_file_watcher=False,
automatic_restarts=True,
allow_skipped_files=allow_skipped_files)
if instance_factory is not None:
self._instance_factory = instance_factory
self._ready = ready
@property
def ready(self):
return self._ready
@property
def balanced_port(self):
return self._balanced_port
class AutoScalingModuleFacade(module.AutoScalingModule):
def __init__(self,
module_configuration=ModuleConfigurationStub(),
balanced_port=0,
instance_factory=None,
max_instances=None,
ready=True):
super(AutoScalingModuleFacade, self).__init__(
module_configuration,
host='fakehost',
balanced_port=balanced_port,
api_port=8080,
auth_domain='gmail.com',
runtime_stderr_loglevel=1,
php_executable_path='/usr/bin/php-cgi',
enable_php_remote_debugging=False,
python_config=None,
cloud_sql_config=None,
default_version_port=8080,
port_registry=dispatcher.PortRegistry(),
request_data=None,
dispatcher=None,
max_instances=max_instances,
use_mtime_file_watcher=False,
automatic_restarts=True,
allow_skipped_files=False)
if instance_factory is not None:
self._instance_factory = instance_factory
self._ready = ready
@property
def ready(self):
return self._ready
@property
def balanced_port(self):
return self._balanced_port
class ManualScalingModuleFacade(module.ManualScalingModule):
def __init__(self,
module_configuration=ModuleConfigurationStub(),
balanced_port=0,
instance_factory=None,
ready=True):
super(ManualScalingModuleFacade, self).__init__(
module_configuration,
host='fakehost',
balanced_port=balanced_port,
api_port=8080,
auth_domain='gmail.com',
runtime_stderr_loglevel=1,
php_executable_path='/usr/bin/php-cgi',
enable_php_remote_debugging=False,
python_config=None,
cloud_sql_config=None,
default_version_port=8080,
port_registry=dispatcher.PortRegistry(),
request_data=None,
dispatcher=None,
max_instances=None,
use_mtime_file_watcher=False,
automatic_restarts=True,
allow_skipped_files=False)
if instance_factory is not None:
self._instance_factory = instance_factory
self._ready = ready
@property
def ready(self):
return self._ready
@property
def balanced_port(self):
return self._balanced_port
class BasicScalingModuleFacade(module.BasicScalingModule):
def __init__(self,
host='fakehost',
module_configuration=ModuleConfigurationStub(),
balanced_port=0,
instance_factory=None,
ready=True):
super(BasicScalingModuleFacade, self).__init__(
module_configuration,
host,
balanced_port=balanced_port,
api_port=8080,
auth_domain='gmail.com',
runtime_stderr_loglevel=1,
php_executable_path='/usr/bin/php-cgi',
enable_php_remote_debugging=False,
python_config=None,
cloud_sql_config=None,
default_version_port=8080,
port_registry=dispatcher.PortRegistry(),
request_data=None,
dispatcher=None,
max_instances=None,
use_mtime_file_watcher=False,
automatic_restarts=True,
allow_skipped_files=False)
if instance_factory is not None:
self._instance_factory = instance_factory
self._ready = ready
@property
def ready(self):
return self._ready
@property
def balanced_port(self):
return self._balanced_port
class BuildRequestEnvironTest(unittest.TestCase):
def setUp(self):
api_server.test_setup_stubs()
self.module = ModuleFacade()
def test_build_request_environ(self):
expected_environ = {
constants.FAKE_IS_ADMIN_HEADER: '1',
'HTTP_HOST': 'fakehost:8080',
'HTTP_HEADER': 'Value',
'HTTP_OTHER': 'Values',
'CONTENT_LENGTH': '4',
'PATH_INFO': '/foo',
'QUERY_STRING': 'bar=baz',
'REQUEST_METHOD': 'PUT',
'REMOTE_ADDR': '1.2.3.4',
'SERVER_NAME': 'fakehost',
'SERVER_PORT': '8080',
'SERVER_PROTOCOL': 'HTTP/1.1',
'wsgi.version': (1, 0),
'wsgi.url_scheme': 'http',
'wsgi.multithread': True,
'wsgi.multiprocess': True}
environ = self.module.build_request_environ(
'PUT', '/foo?bar=baz', [('Header', 'Value'), ('Other', 'Values')],
|
jingxiang-li/kaggle-yelp
|
model/level3_model.py
|
Python
|
mit
| 5,368
| 0
|
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import xgboost as xgb
import argparse
from os import path
import os
from hyperopt import fmin, tpe, hp, STATUS_OK, Trials
from utils import *
import pickle
np.random.seed(345345)
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--yix', type=int, default=0)
return parser.parse_args()
# functions for xgboost training
def evalF1(preds, dtrain):
from sklearn.metrics import f1_score
labels = dtrain.get_label()
return 'f1-score', f1_score(labels, preds > 0.5)
def fpreproc(dtrain, dtest, param):
label = dtrain.get_label()
ratio = float(np.sum(label == 0)) / np.sum(label == 1)
param['scale_pos_weight'] = ratio
return (dtrain, dtest, param)
# functions for hyperparameters optimization
class Score:
def __init__(self, X, y):
self.dtrain = xgb.DMatrix(X, label=y)
def get_score(self, params):
params['max_depth'] = int(params['max_depth'])
params['min_child_weight'] = int(params['min_child_weight'])
params['num_boost_round'] = int(params['num_boost_round'])
print('Training with params:')
print(params)
cv_result = xgb.cv(params=params,
dtrain=self.dtrain,
num_boost_round=params['num_boost_round'],
nfold=5,
stratified=True,
feval=evalF1,
maximize=True,
fpreproc=fpreproc,
verbose_eval=True)
score = cv_result.ix[params['num_boost_round'] - 1, 0]
print(score)
return {'loss': -score, 'status': STATUS_OK}
def optimize(trials, X, y, max_evals):
space = {
'num_boost_round': hp.quniform('num_boost_round', 10, 200, 10),
'eta': hp.quniform('eta', 0.1, 0.3, 0.1),
'gamma': hp.quniform('gamma', 0, 1, 0.2),
'max_depth': hp.quniform('max_depth', 1, 6, 1),
'min_child_weight': hp.quniform('min_child_weight', 1, 3, 1),
'subsample': hp.quniform('subsample', 0.8, 1, 0.1),
'silent': 1,
'objective': 'binary:logistic'
}
s = Score(X, y)
best = fmin(s.get_score,
space,
algo=tpe.suggest,
trials=trials,
max_evals=max_evals
)
best['max_depth'] = int(best['max_depth'])
best['min_child_weight'] = int(best['min_child_weight'])
best['num_boost_round'] = int(best['num_boost_round'])
del s
return best
def out_fold_pred(params, X, y, reps):
pr
|
eds = np.zeros((y.shape[0]))
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
for train_ix, test_ix in makeKFold(5, y, reps):
X_train, X_test = X[train_ix, :], X[test_ix, :]
y_train = y[train_ix]
dtrain = xgb.DMatrix(X_train, label=y_train)
dtest = xgb.DMatrix(X_test)
bst = xgb.train(params=params,
dtrain=dtrain,
|
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
preds[test_ix] = bst.predict(dtest)
return preds
def get_model(params, X, y):
dtrain = xgb.DMatrix(X, label=y)
params['silent'] = 1
params['objective'] = 'binary:logistic'
params['scale_pos_weight'] = float(np.sum(y == 0)) / np.sum(y == 1)
bst = xgb.train(params=params,
dtrain=dtrain,
num_boost_round=params['num_boost_round'],
evals=[(dtrain, 'train')],
feval=evalF1,
maximize=True,
verbose_eval=None)
return bst
args = parse_args()
data_dir = '../level3-feature/' + str(args.yix)
X_train = np.load(path.join(data_dir, 'X_train.npy'))
X_test = np.load(path.join(data_dir, 'X_test.npy'))
y_train = np.load(path.join(data_dir, 'y_train.npy'))
print(X_train.shape, X_test.shape, y_train.shape)
X_train_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_train_ext.npy')
X_test_ext = np.load('../extra_ftrs/' + str(args.yix) + '/X_test_ext.npy')
print(X_train_ext.shape, X_test_ext.shape)
X_train = np.hstack((X_train, X_train_ext))
X_test = np.hstack((X_test, X_test_ext))
print('Add Extra')
print(X_train.shape, X_test.shape, y_train.shape)
trials = Trials()
params = optimize(trials, X_train, y_train, 100)
out_fold = out_fold_pred(params, X_train, y_train, 1)
clf = get_model(params, X_train, y_train)
dtest = xgb.DMatrix(X_test)
preds = clf.predict(dtest)
save_dir = '../level3-model-final/' + str(args.yix)
print(save_dir)
if not path.exists(save_dir):
os.makedirs(save_dir)
# save model, parameter, outFold_pred, pred
with open(path.join(save_dir, 'model.pkl'), 'wb') as f_model:
pickle.dump(clf, f_model)
with open(path.join(save_dir, 'param.pkl'), 'wb') as f_param:
pickle.dump(params, f_param)
np.save(path.join(save_dir, 'pred.npy'), preds)
np.save(path.join(save_dir, 'outFold.npy'), out_fold)
|
whitepages/nova
|
nova/tests/functional/api_sample_tests/test_shelve.py
|
Python
|
apache-2.0
| 2,280
| 0
|
# Copyright 2012 Nebula, Inc.
# Copyright 2013 IBM Corp.
#
# Lice
|
nsed under the Apache License, Version 2.0 (the "License"); you may
# not use this f
|
ile except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from nova.tests.functional.api_sample_tests import test_servers
CONF = cfg.CONF
CONF.import_opt('shelved_offload_time', 'nova.compute.manager')
CONF.import_opt('osapi_compute_extension',
'nova.api.openstack.compute.legacy_v2.extensions')
class ShelveJsonTest(test_servers.ServersSampleBase):
extension_name = "os-shelve"
def _get_flags(self):
f = super(ShelveJsonTest, self)._get_flags()
f['osapi_compute_extension'] = CONF.osapi_compute_extension[:]
f['osapi_compute_extension'].append(
'nova.api.openstack.compute.contrib.shelve.Shelve')
return f
def setUp(self):
super(ShelveJsonTest, self).setUp()
# Don't offload instance, so we can test the offload call.
CONF.set_override('shelved_offload_time', -1)
def _test_server_action(self, uuid, template, action):
response = self._do_post('servers/%s/action' % uuid,
template, {'action': action})
self.assertEqual(response.status_code, 202)
self.assertEqual(response.content, "")
def test_shelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
def test_shelve_offload(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-shelve-offload', 'shelveOffload')
def test_unshelve(self):
uuid = self._post_server()
self._test_server_action(uuid, 'os-shelve', 'shelve')
self._test_server_action(uuid, 'os-unshelve', 'unshelve')
|
javier-ruiz-b/docker-rasppi-images
|
raspberry-google-home/env/lib/python3.7/site-packages/google/api/config_change_pb2.py
|
Python
|
apache-2.0
| 8,266
| 0.000484
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: google/api/config_change.proto
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name="google/api/config_change.proto",
package="google.api",
syntax="proto3",
serialized_options=b"\n\016com.google.apiB\021ConfigChangeProtoP\001ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\242\002\004GAPI
|
",
serialized_pb=b'\n\x1egoogle/api/config_change.proto\x12\ngoogle.api"\x97\x01\n\x0c\x43onfigChange\x12\x0f\n\x07\x65lement\x18\x01 \x01(\t\x12\x11\n\told_value\x18\x02 \x01(\t\x12\x11\n\tnew_value\x18\x03 \x01(\t\x12+\n\x0b\x63hange_type\x18\x04 \x01(\x0e\x32\x16.google.api.ChangeType\x12#\n\x0
|
7\x61\x64vices\x18\x05 \x03(\x0b\x32\x12.google.api.Advice"\x1d\n\x06\x41\x64vice\x12\x13\n\x0b\x64\x65scription\x18\x02 \x01(\t*O\n\nChangeType\x12\x1b\n\x17\x43HANGE_TYPE_UNSPECIFIED\x10\x00\x12\t\n\x05\x41\x44\x44\x45\x44\x10\x01\x12\x0b\n\x07REMOVED\x10\x02\x12\x0c\n\x08MODIFIED\x10\x03\x42q\n\x0e\x63om.google.apiB\x11\x43onfigChangeProtoP\x01ZCgoogle.golang.org/genproto/googleapis/api/configchange;configchange\xa2\x02\x04GAPIb\x06proto3',
)
_CHANGETYPE = _descriptor.EnumDescriptor(
name="ChangeType",
full_name="google.api.ChangeType",
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name="CHANGE_TYPE_UNSPECIFIED",
index=0,
number=0,
serialized_options=None,
type=None,
),
_descriptor.EnumValueDescriptor(
name="ADDED", index=1, number=1, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="REMOVED", index=2, number=2, serialized_options=None, type=None
),
_descriptor.EnumValueDescriptor(
name="MODIFIED", index=3, number=3, serialized_options=None, type=None
),
],
containing_type=None,
serialized_options=None,
serialized_start=231,
serialized_end=310,
)
_sym_db.RegisterEnumDescriptor(_CHANGETYPE)
ChangeType = enum_type_wrapper.EnumTypeWrapper(_CHANGETYPE)
CHANGE_TYPE_UNSPECIFIED = 0
ADDED = 1
REMOVED = 2
MODIFIED = 3
_CONFIGCHANGE = _descriptor.Descriptor(
name="ConfigChange",
full_name="google.api.ConfigChange",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="element",
full_name="google.api.ConfigChange.element",
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="old_value",
full_name="google.api.ConfigChange.old_value",
index=1,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="new_value",
full_name="google.api.ConfigChange.new_value",
index=2,
number=3,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="change_type",
full_name="google.api.ConfigChange.change_type",
index=3,
number=4,
type=14,
cpp_type=8,
label=1,
has_default_value=False,
default_value=0,
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
_descriptor.FieldDescriptor(
name="advices",
full_name="google.api.ConfigChange.advices",
index=4,
number=5,
type=11,
cpp_type=10,
label=3,
has_default_value=False,
default_value=[],
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=47,
serialized_end=198,
)
_ADVICE = _descriptor.Descriptor(
name="Advice",
full_name="google.api.Advice",
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name="description",
full_name="google.api.Advice.description",
index=0,
number=2,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=b"".decode("utf-8"),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR,
)
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=None,
is_extendable=False,
syntax="proto3",
extension_ranges=[],
oneofs=[],
serialized_start=200,
serialized_end=229,
)
_CONFIGCHANGE.fields_by_name["change_type"].enum_type = _CHANGETYPE
_CONFIGCHANGE.fields_by_name["advices"].message_type = _ADVICE
DESCRIPTOR.message_types_by_name["ConfigChange"] = _CONFIGCHANGE
DESCRIPTOR.message_types_by_name["Advice"] = _ADVICE
DESCRIPTOR.enum_types_by_name["ChangeType"] = _CHANGETYPE
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ConfigChange = _reflection.GeneratedProtocolMessageType(
"ConfigChange",
(_message.Message,),
{
"DESCRIPTOR": _CONFIGCHANGE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.ConfigChange)
},
)
_sym_db.RegisterMessage(ConfigChange)
Advice = _reflection.GeneratedProtocolMessageType(
"Advice",
(_message.Message,),
{
"DESCRIPTOR": _ADVICE,
"__module__": "google.api.config_change_pb2"
# @@protoc_insertion_point(class_scope:google.api.Advice)
},
)
_sym_db.Register
|
yeti-platform/yeti
|
core/web/api/investigation.py
|
Python
|
apache-2.0
| 5,579
| 0.001255
|
from __future__ import unicode_literals
import logging
import re
from datetime import datetime
from bson import ObjectId
from bson.json_util import loads
from flask import request
from flask_classy import route
from flask_login import current_user
from core import investigation
from core.entities import Entity
from core.errors import ObservableValidationError
from core.helpers import iterify
from core.investigation import ImportResults
from core.observables import *
from core.web.api.api import render
from core.web.api.crud import CrudApi, CrudSearchApi
from core.web.helpers import get_object_or_404
from core.web.helpers import requires_permissions, get_queryset, get_user_groups
class InvestigationSearch(CrudSearchApi):
template = "investigation_api.html"
objectmanager = investigation.Investigation
def search(self, query):
fltr = query.get("filter", {})
params = query.get("params", {})
regex = params.pop("regex", False)
ignorecase = params.pop("ignorecase", False)
page = params.pop("page", 1) - 1
rng = params.pop("range", 50)
investigations = get_queryset(
self.objectmanager, fltr, regex, ignorecase, replace=False
)
if not current_user.has_role("admin"):
shared_ids = [current_user.id] + [group.id for group in get_user_groups()]
investigations = investigations.filter(
Q(sharing__size=0)
| Q(sharing__in=shared_ids)
| Q(sharing__exists=False)
)
return list(investigations)[page * rng : (page + 1) * rng]
class Investigation(CrudApi):
objectmanager = investigation.Investigation
@route("/add/<string:id>", methods=["POST"])
@requires_permissions("write")
def add(self, id):
i = get_object_or_404(self.objectmanager, id=id)
data = loads(request.data)
i.add(iterify(data["links"]), iterify(data["nodes"]))
return render(i.info())
@route("/remove/<string:id>", methods=["POST"])
@requires_permissions("write")
def remove(self, id):
i = get_object_or_404(self.objectmanager, id=id)
data = loads(request.data)
i.remove(iterify(data["links"]), iterify(data["nodes"]))
return render(i.info())
@route("/rename/<string:id>", methods=["POST"])
@requires_permissions("write")
def rename(self, id):
i = get_object_or_404(self.objectmanager, id=id)
i.modify(name=request.
|
json["name"], updated=datetime.utcnow())
return render("ok")
@route("/nodesearch/<path:que
|
ry>", methods=["GET"])
@requires_permissions("read")
def nodesearch(self, query):
result = []
query = re.compile("^{}".format(query), re.IGNORECASE)
observables = Observable.objects(value=query).limit(5)
entities = Entity.objects(name=query).limit(5)
for results in [observables, entities]:
for node in results:
result.append(node.to_mongo())
return render(result)
@route("/import_results/<string:id>")
@requires_permissions("read")
def import_results(self, id):
results = get_object_or_404(ImportResults, id=id)
return render(results.to_mongo())
@route("/bulk_add/<string:id>", methods=["POST"])
@requires_permissions("write")
def bulk_add(self, id):
i = get_object_or_404(self.objectmanager, id=id)
data = loads(request.data)
nodes = []
response = {"status": "ok", "message": ""}
try:
for node in data["nodes"]:
if node["type"] in globals() and issubclass(
globals()[node["type"]], Observable
):
_type = globals()[node["type"]]
try:
n = _type.get_or_create(value=node["value"])
except ObservableValidationError as e:
logging.error((node, e))
continue
if node["new_tags"]:
n.tag(node["new_tags"].split(", "))
nodes.append(n)
i.add([], nodes)
except Exception as e:
response = {"status": "error", "message": str(e)}
return render(response)
@route("/search_existence", methods=["POST"])
@requires_permissions("read")
def search_existence(self):
"""Query investigation based on given observable, incident or entity
Query[ref]: class of the given node, which should be observable or entity
Query[id]: the id of the given node.
"""
# ToDo sharing permissions
REF_CLASS = ("observable", "entity")
data = loads(request.data)
if "id" not in data or "ref" not in data:
response = {"status": "error", "message": "missing argument."}
elif not ObjectId.is_valid(data["id"]):
response = {"status": "error", "message": "given id is not valid."}
elif data["ref"] not in REF_CLASS:
response = {"status": "error", "message": "reference class is not valid."}
else:
query = {
"nodes": {
"$elemMatch": {"$id": ObjectId(data["id"]), "$ref": data["ref"]}
},
}
response = self.objectmanager.objects(__raw__=query).order_by("-updated")
for inv in response:
if not inv.name:
inv["name"] = "Unnamed Investigation"
return render(response)
|
elbeardmorez/quodlibet
|
quodlibet/tests/test_qltk_cbes.py
|
Python
|
gpl-2.0
| 4,802
| 0.000208
|
# -*- coding: utf-8 -*-
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
from tests import TestCase, mkstemp
import os
from quodlibet.qltk.cbes import ComboBoxEntrySave, StandaloneEditor
import quodlibet.config
class TComboBoxEntrySave(TestCase):
memory = "pattern 1\npattern 2\n"
saved = "pattern text\npattern name\n"
def setUp(self):
quodlibet.config.init()
h, self.fname = mkstemp()
os.close(h)
with open(self.fname, "w") as f:
f.write(self.memory)
with open(self.fname + ".saved", "w") as f:
f.write(self.saved)
self.cbes = ComboBoxEntrySave(self.fname, count=2)
self.cbes2 = ComboBoxEntrySave(self.fname, count=2)
self.cbes3 = ComboBoxEntrySave(self.fname, count=2,
filter=lambda ls, it, *d: ls.get_value(it, 0) == "filter")
def test_equivalence(self):
model1 = self.cbes.model_store
model2 = self.cbes2.model_store
self.failUnlessEqual(model1, model2)
rows1 = list(model1)
rows2 = list(model2)
for row1, row2 in zip(rows1, rows2):
self.failUnlessEqual(row1[0], row2[0])
self.failUnlessEqual(row1[1], row2[1])
self.failUnlessEqual
|
(row1[2], row2[2])
def test_text_changed_signal(self):
called = [0]
def cb(*args):
called[0] += 1
def get_count():
c = called[0]
called[0] = 0
|
return c
self.cbes.connect("text-changed", cb)
entry = self.cbes.get_child()
entry.set_text("foo")
self.failUnlessEqual(get_count(), 1)
self.cbes.prepend_text("bar")
# in case the model got changed but the entry is still the same
# the text-changed signal should not be triggered
self.failUnlessEqual(entry.get_text(), "foo")
self.failUnlessEqual(get_count(), 0)
def test_shared_model(self):
self.cbes.prepend_text("a test")
self.test_equivalence()
def test_initial_size(self):
# 1 saved, Edit, separator, 3 remembered
self.failUnlessEqual(len(self.cbes.get_model()), 5)
def test_prepend_text(self):
self.cbes.prepend_text("pattern 3")
self.memory = "pattern 3\npattern 1\n"
self.test_save()
def test_save(self):
self.cbes.write()
self.failUnlessEqual(self.memory, open(self.fname).read())
self.failUnlessEqual(self.saved, open(self.fname + ".saved").read())
def test_set_text_then_prepend(self):
self.cbes.get_child().set_text("foobar")
self.cbes.prepend_text("foobar")
self.memory = "foobar\npattern 1\n"
self.test_save()
def test_filter(self):
self.cbes3.prepend_text("filter")
self.failUnlessEqual(1, len(self.cbes3.get_model()))
def tearDown(self):
self.cbes.destroy()
self.cbes2.destroy()
self.cbes3.destroy()
os.unlink(self.fname)
os.unlink(self.fname + ".saved")
quodlibet.config.quit()
class TStandaloneEditor(TestCase):
TEST_KV_DATA = [
("Search Foo", "https://foo.com/search?q=<artist>-<title>")]
def setUp(self):
quodlibet.config.init()
h, self.fname = mkstemp()
os.close(h)
with open(self.fname + ".saved", "w") as f:
f.write(
"%s\n%s\n" % (self.TEST_KV_DATA[0][1],
self.TEST_KV_DATA[0][0]))
self.sae = StandaloneEditor(self.fname, "test", None, None)
def test_constructor(self):
self.failUnless(self.sae.model)
data = [(row[1], row[0]) for row in self.sae.model]
self.failUnlessEqual(data, self.TEST_KV_DATA)
def test_load_values(self):
values = StandaloneEditor.load_values(self.fname + ".saved")
self.failUnlessEqual(self.TEST_KV_DATA, values)
def test_defaults(self):
defaults = [("Dot-com Dream", "http://<artist>.com")]
try:
os.unlink(self.fname)
except OSError:
pass
# Now create a new SAE without saved results and use defaults
self.fname = "foo"
self.sae.destroy()
self.sae = StandaloneEditor(self.fname, "test2", defaults, None)
self.sae.write()
data = [(row[1], row[0]) for row in self.sae.model]
self.failUnlessEqual(defaults, data)
def tearDown(self):
self.sae.destroy()
try:
os.unlink(self.fname)
os.unlink(self.fname + ".saved")
except OSError:
pass
quodlibet.config.quit()
|
loftytopping/UManSysProp_public
|
umansysprop/partition_models.py
|
Python
|
gpl-3.0
| 17,483
| 0.004519
|
# vim: set et sw=4 sts=4 fileencoding=utf-8:
#
# Copyright (c) 2016 David Topping.
# All Rights Reserved.
# This file is part of umansysprop.
#
# umansysprop is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# umansysprop is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public Licens
|
e along with
# umansysprop. If no
|
t, see <http://www.gnu.org/licenses/>.
from __future__ import (
unicode_literals,
absolute_import,
print_function,
division,
)
str = type('')
import warnings
from math import sqrt, exp, log, log10, isinf
from . import data
from . import groups
from . import activity_coefficient_models as aiomfac
from .forms import smiles
import numpy as np
class PartitioningIterationLimit(Warning):
"""
Warning raised when the partitioning loop is terminated due to an excessive
number of iterations
"""
class PartitioningPrecisionLimit(Warning):
"""
Warning raised when then partitioning loop is terminated due to the
precision limit being reached
"""
class IdealPartitioning(object):
"""
Associates a SMILES compound with various data necessary for the ideal
partitioning model.
"""
def __init__(self, compound, abundance, temperature, pressure):
self.compound = compound
self.abundance = abundance
self.molar_mass = compound.molwt
# Mole-based Ci*, µmol/m³
self.c_i_star = (1e6 * 10 ** pressure) / (0.000082057 * temperature)
self.activity_coefficient = 1.0
# To be calculated iteratively by update()
self.condensed_abundance = None
self.activity = None
def update(self, coa, coefficient):
self.activity_coefficient=np.clip(coefficient, 1.0e-3, 10000.0)
self.condensed_abundance = ((1 + self.c_i_star*self.activity_coefficient / coa) ** -1) * self.abundance * (1e12 / 6.023e23)
self.activity = self.activity_coefficient * (self.condensed_abundance / coa)
class NonIdealPartitioning(object):
"""
Extends :class:`IdealPartitioning` with the extra data required by the
AIOMFAC non-ideal partitioning model.
"""
def __init__(self, compound, abundance, temperature, pressure):
super(NonIdealPartitioning, self).__init__(
compound, abundance, temperature, pressure)
m = groups.aiomfac(compound)
# XXX What about inorganic?
#m.update(aiomfac_inorganic(inorganic_ions))
# XXX What do any of these variables mean?! Unfortunately the former
# coder left no clue beyond their name ...
self.q_k_i = { group: count * data.AIOMFAC_QI[group] for group, count in m.items() }
self.q_i = groups.aggregate_matches(m, data.AIOMFAC_QI)
self.r_i = groups.aggregate_matches(m, data.AIOMFAC_RI)
#self.t_i_m_n =
non_zero_groups = {
group
for compound, matches in m.items()
for group, count in matches.items()
if count > 0
}
q_k_i = {
compound: {
group: count * data.AIOMFAC_QI[group]
for group, count in matches.items()
}
for compound, matches in m.items()
}
q_i = {
compound: groups.aggregate_matches(matches, data.AIOMFAC_QI)
for compound, matches in m.items()
}
r_i = {
compound: groups.aggregate_matches(matches, data.AIOMFAC_RI)
for compound, matches in m.items()
}
t_i_m_n = {
compound: {
group1: {
group2: q_k_i[compound].get(group1, 0) * exp(
-data.AIOMFAC_SR_INTERACTIONS[main_group1][main_group2] /
temperature)
for group2 in non_zero_groups
for main_group2 in (data.AIOMFAC_MAIN_GROUP[group2],)
}
for group1 in non_zero_groups
for main_group1 in (data.AIOMFAC_MAIN_GROUP[group1],)
}
for compound, matches in m.items()
}
u_i_m_n = {
compound: {
group1: {
group2: q_k_i[compound].get(group1, 0) * exp(
-data.AIOMFAC_SR_INTERACTIONS[main_group2][main_group1] /
temperature)
for group2 in non_zero_groups
for main_group2 in (data.AIOMFAC_MAIN_GROUP[group2],)
}
for group1 in non_zero_groups
for main_group1 in (data.AIOMFAC_MAIN_GROUP[group1],)
}
for compound, matches in m.items()
}
s_i_n = {
compound: {
group1: sum(t_i_m_n[compound][group2][group1] for group2 in non_zero_groups)
for group1 in non_zero_groups
}
for compound in t_i_m_n
}
class WaterMixin(object):
"""
Water is special-cased in both partition models. This mixin class can be
added to either of the partitioning classes above to provide water data
for the model.
"""
def __init__(self, temperature, humidity):
# Saturation vapour pressure of water, Pa, Hyland, R. W. and A. Wexler,
# ASHRAE Trans, 89(2A), 500-519, 1983.
sat_vap_water = exp(
(-0.58002206e4 / temperature) + 0.13914993e1 -
(0.48640239e-1 * temperature) +
(0.41764768e-4 * temperature ** 2) -
(0.14452093e-7 * temperature ** 3) +
(0.65459673e1 * log(temperature)))
super(WaterMixin, self).__init__(
compound=smiles('O'),
abundance=(
(humidity / 100.0) * 6.023e23 * sat_vap_water * 1e-6 /
(8.314472 * temperature)
),
temperature=temperature,
pressure=log10(sat_vap_water * 9.86923267e-6),
)
# XXX This is wrong
self.molar_mass=18.016
# XXX Not sure why we're overriding the c_i_star calculation here,
# but it's necessary to match the original...
self.c_i_star=(1e6 * sat_vap_water) / (8.314472 * temperature)
class WaterIdealPartitioning(WaterMixin, IdealPartitioning):
pass
class WaterNonIdealPartitioning(WaterMixin, NonIdealPartitioning):
pass
def aiomfac_organic(compounds):
return {
compound: groups.aiomfac(compound)
for compound in compounds
}
def aiomfac_inorganic(compounds):
return {
compound: groups.matches(data.AIOMFAC_ION_SMARTS, compound)
for compound in compounds
}
def aiomfac_salts(compounds):
m = groups.all_matches(data.AIOMFAC_ION_SMARTS, compounds)
total_ions = groups.aggregate_matches(m, data.AIOMFAC_ION_CHARGE_ABS)
cations = {
ion
for ion, count in m.items()
if count and data.AIOMFAC_ION_CHARGE[ion] > 0.0
}
anions = {
ion
for ion, count in m.items()
if count and data.AIOMFAC_ION_CHARGE[ion] < 0.0
}
result = {}
for cation in cations:
for anion in anions:
salt = data.AIOMFAC_ION_SALT[(cation, anion)]
quantity = 2.0 * m[cation] * m[anion] * sqrt(
(data.AIOMFAC_ION_CHARGE_ABS[cation] * data.AIOMFAC_ION_CHARGE_ABS[anion]) /
(data.AIOMFAC_SALT_CATION_STOICH[salt] * data.AIOMFAC_SALT_ANION_STOICH[salt])
) / total_ions
if quantity > 0.0:
result[salt] = quantity
return result
def activity_coefficients_sr(organic_compounds, inorganic_ions, temperature):
m = aiomfac_organic(organic_compounds)
m.update(aiomfac_inorganic(inorganic_
|
Jidgdoi/MythologyQuest
|
src/JsonParser.py
|
Python
|
gpl-3.0
| 1,734
| 0.03291
|
# -*- coding:utf-8 -*-
#
|
Cyril Fournier
# 10/08/2016
# ==============================
|
=========
# Update the trackList.json of a JBrowse,
# by adding category, description, color
# =======================================
import os,sys
import json
from Hero import Hero
from Monster import Monster
from Item import Item
# ============================
# === FUNCTIONS ===
# ============================
def loadJSONmap(filehandler):
"""
Load the JSON part of a map file.
'filehandler": file object
"""
return json.loads( filehandler.read() )
def loadJSON(filename):
"""
Load a JSON file.
Output is a nested dictionary: {'Class': {'ID': {'attribute': value}}}.
'filename': JSON file to load.
"""
return json.loads( open(filename,"r").read() )
def readObject(jsonData, Class):
"""
Return all objects of type Class as a dictionary.
'jsonData': dictionary formated to JSON format.
'Class': Class to read.
"""
dObject = {}
for ID in jsonData[Class.__name__].keys():
obj = Class()
[setattr(obj, k, v) for k,v in jsonData[Class.__name__][ID].items()]
if Class.__name__ == 'Hero': obj.activeSprite()
dObject[ID] = obj
return dObject
def addObject(jsonData, Object):
"""
Add the instance of 'Object' to the JSON.
'jsonData': dictionary formated to JSON format.
'Object': Object to add.
"""
if jsonData[Object.__class__.__name__].has_key( Object.id ): print("Error: the Object ID '%s' already exist in the JSON Data." %Object.id, file=sys.stderr)
else: jsonData[Object.__class__.__name__][Object.id] = vars(Object)
def writeJson(jsonData, oFile):
with open(oFile, "w") as fh:
json.dump(jsonData, fh, indent=4)
def printJson(jsonData):
print(json.dumps(jsonData, indent=4), file=sys.stderr)
|
yelongyu/leetcode
|
136.single_number.py
|
Python
|
apache-2.0
| 801
| 0
|
# -*- coding: utf-8 -*-
""
|
"
Given an array of integers, every element appears twice except for one. Find
that single one.
Note:
Your algorithm should have a linear runtime complexity. Could you implement it
without using extra memory?
"""
__author__ = 'yelongyu1024@gmail.com'
class Solution(object):
def __init__(self, nums):
self.nums = nums
def single_number(self):
num_count = {}
|
for num in self.nums:
num_count.setdefault(num, 0)
num_count[num] = num_count.get(num) + 1
print num_count
for key, value in num_count.items():
if value == 1:
return key
def main():
solution = Solution([2, 3, 5, 7, 9, 3, 5, 7, 9])
print solution.single_number()
if __name__ == '__main__':
main()
|
TheClimateCorporation/properscoring
|
properscoring/_brier.py
|
Python
|
apache-2.0
| 7,326
| 0.000956
|
import numpy as np
from ._utils import move_axis_to_end, suppress_warnings
def brier_score(observations, forecasts):
"""
Calculate the Brier score (BS)
The Brier score (BS) scores binary forecasts $k \in \{0, 1\}$,
..math:
BS(p, k) = (p_1 - k)^2,
where $p_1$ is the forecast probability of $k=1$.
Parameters
----------
observations, forecasts : array_like
Broadcast compatible arrays of forecasts (probabilities between 0 and
1) and observations (0, 1 or NaN).
Returns
-------
out : np.ndarray
Brier score for each forecast/observation.
References
----------
Jochen Broecker. Chapter 7 in Forecast Verification: A Practitioner's Guide
in Atmospheric Science. John Wiley & Sons, Ltd, Chichester, UK, 2nd
edition, 2012.
https://drive.google.com/a/climate.com/file/d/0B8AfRcot4nsIYmc3alpTeTZpLWc
Tilmann Gneiting and Adrian E. Raftery. Strictly proper scoring rules,
prediction, and estimation, 2005. University of Washington Department of
Statistics Technical Report no. 463R.
https://www.stat.washington.edu/research/reports/2004/tr463R.pdf
"""
machine_eps = np.finfo(float).eps
forecasts = np.asarray(forecasts)
if (forecasts < 0.0).any() or (forecasts > (1.0 + machine_eps)).any():
raise ValueError('forecasts must not be outside of the unit interval '
'[0, 1]')
observations = np.asarray(observations)
if observations.ndim > 0:
valid_obs = observations[~np.isnan(observations)]
else:
valid_obs = observations if not np.isnan(observations) else []
if not set(np.unique(valid_obs)) <= {0, 1}:
raise ValueError('observations can only contain 0, 1, or NaN')
return (forecasts - observations) ** 2
def _threshold_brier_score_vectorized(observations, forecasts, thresholds):
observations = np.asarray(observations)
thresholds = np.asarray(thresholds)
forecasts = np.asarray(forecasts)
def exceedances(x):
# NaN safe calculation of threshold exceedances
# add an extra dimension to `x` and broadcast `thresholds` so that it
# varies along that new dimension
with suppress_warnings('invalid value encountered in greater'):
exceeds = (x[..., np.newaxis] >
thresholds.reshape((1,) * x.ndim + (-1,))
).astype(float)
if x.ndim == 0 and np.isnan(x):
exceeds[:] = np.nan
else:
exceeds[np.where(np.isnan(x))] = np.nan
return exceeds
binary_obs = exceedances(observations)
if observations.shape == forecasts.shape:
prob_forecast = exceedances(forecasts)
elif observations.shape == forecasts.shape[:-1]:
# axis=-2 should be the 'realization' axis, after swapping that axes
# to the end of forecasts and inserting one extra axis
with suppress_warnings('Mean of empty slice'):
prob_forecast = np.nanmean(exceedances(forecasts), axis=-2)
else:
raise AssertionError
return brier_score(binary_obs, prob_forecast)
try:
from ._gufuncs import _threshold_brier_score_gufunc as \
_threshold_brier_score_core
except ImportError:
_threshold_brier_score_core = _threshold_brier_score_vectorized
def threshold_brier_score(observations, forecasts, threshold, issorted=False,
axis=-1):
"""
Calculate the Brier scores of an ensemble for exceeding given thresholds.
According to the threshold decomposition of CRPS, the resulting Brier
scores can thus be summed along the last axis to calculate CRPS, as
.. math::
CRPS(F, x) = \int_z BS(F(z), H(z - x)) dz
where $F(x) = \int_{z \leq x} p(z) dz$ is the cumulative distribution
function (CDF) of the forecast distribution $F$, $x$ is a point estimate of
the true observation (observational error is neglected), $BS$ denotes the
Brier score and $H(x)$ denotes the Heaviside step function, which we define
here as equal to 1 for x >= 0 and 0 otherwise.
It is more efficient to calculate CRPS directly, but this threshold
decomposition itself provides a useful summary of model quality as a
function of measurement values.
The Numba accelerated version of this function is much faster for
calculating many thresholds simultaneously: it runs in time
O(N * (E * log(E) + T)), where N is the number of observations, E is the
ensemble size and T is the number of thresholds.
The non-Numba accelerated version requires time and space O(N * E * T).
Parameters
----------
observations : float or array_like
Observations float or array. Missing values (NaN) are given scores of
NaN.
forecasts : float or array_like
Array of forecasts ensemble members, of the same shape as observations
except for the extra axis corresponding to the ensemble. If forecasts
has the same shape as observations, the forecasts are treated as
deterministic. Missing values (NaN) are ignored.
threshold : scalar or 1d array_like
Threshold value(s) at which to calculate exceedence Brier scores.
issorted : bool, optional
Optimization flag to indicate that the elements of `ensemble`
|
are
already sorted along `axis`.
axis : int, optional
Axis in forecasts which corresponds to different ensemble members,
along which to calculate the threshold decomposition.
Returns
-------
out : np.ndarray
|
Brier scores at each thresold for each ensemble forecast against the
observations. If ``threshold`` is a scalar, the result will have the
same shape as observations. Otherwise, it will have an additional final
dimension corresponding to the threshold levels.
References
----------
Gneiting, T. and Ranjan, R. Comparing density forecasts using threshold-
and quantile-weighted scoring rules. J. Bus. Econ. Stat. 29, 411-422
(2011). http://www.stat.washington.edu/research/reports/2008/tr533.pdf
See also
--------
crps_ensemble, brier_score
"""
observations = np.asarray(observations)
threshold = np.asarray(threshold)
forecasts = np.asarray(forecasts)
if axis != -1:
forecasts = move_axis_to_end(forecasts, axis)
if forecasts.shape == observations.shape:
forecasts = forecasts[..., np.newaxis]
if observations.shape != forecasts.shape[:-1]:
raise ValueError('observations and forecasts must have matching '
'shapes or matching shapes except along `axis=%s`'
% axis)
scalar_threshold = threshold.ndim == 0
if threshold.ndim > 1:
raise ValueError('threshold must be scalar or 1-dimensional')
if threshold.ndim == 1 and not (np.sort(threshold) == threshold).all():
raise ValueError('1D thresholds must be sorted')
threshold = threshold.reshape((1,) * observations.ndim + (-1,))
if not issorted:
forecasts = np.sort(forecasts, axis=-1)
result = _threshold_brier_score_core(observations, forecasts, threshold)
if scalar_threshold:
result = result.squeeze(axis=-1)
return result
|
hrayr-artunyan/shuup
|
shuup/front/basket/objects.py
|
Python
|
agpl-3.0
| 14,937
| 0.001674
|
# -*- coding: utf-8 -*-
# This file is part of Shuup.
#
# Copyright (c) 2012-2016, Shoop Ltd. All rights reserved.
#
# This source code is licensed under the AGPLv3 license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import unicode_literals
import random
from collections import Counter
from decimal import Decimal
import six
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
from shuup.core.models import OrderLineType, PaymentMethod, ShippingMethod
from shuup.core.order_creator import OrderSource, SourceLine
from shuup.core.order_creator._source import LineSource
from shuup.front.basket.storage import BasketCompatibilityError, get_storage
from shuup.utils.numbers import parse_decimal_string
from shuup.utils.objects import compare_partial_dicts
class BasketLine(SourceLine):
def __init__(self, source=None, **kwargs):
self.__in_init = True
super(BasketLine, self).__init__(source, **kwargs)
self.__in_init = False
@property
def shop_product(self):
"""
ShopProduct object of this line.
:rtype: shuup.core.models.ShopProduct
"""
return self.product.get_shop_instance(self.shop)
def cache_info(self, request):
product = self.product
# TODO: ensure shop identity?
price_info = product.get_price_info(request, quantity=self.quantity)
self.base_unit_price = price_info.base_unit_price
sel
|
f.discount_amount = price_info.discount_amount
assert self.price == price_info.price
self.net_weight = product.net_weight
self.gross_weight = product.gross_weight
self.shipping_mode = product.shipping_mode
self.sku = product.sku
self.text = product.safe_translation_getter("name", any_language=True)
@property
def type(self):
if self.product:
return OrderLineType.PRODUCT
else:
return (
|
self.__dict__.get("type") or OrderLineType.OTHER)
@type.setter
def type(self, type):
if self.__in_init:
self.__dict__["type"] = type
return
if self.product and type != OrderLineType.PRODUCT:
raise ValueError("Can not set a line type for a basket line when it has a product set")
if type not in OrderLineType.as_dict():
raise ValueError("Invalid basket line type. Only values of OrderLineType are allowed.")
self.__dict__["type"] = type
def set_quantity(self, quantity):
cls = Decimal if self.product.sales_unit.allow_fractions else int
self.quantity = cls(max(0, quantity))
@property
def can_delete(self):
return (self.type == OrderLineType.PRODUCT and self.line_source != LineSource.DISCOUNT_MODULE)
@property
def can_change_quantity(self):
return (self.type == OrderLineType.PRODUCT and self.line_source != LineSource.DISCOUNT_MODULE)
class BaseBasket(OrderSource):
def __init__(self, request, basket_name="basket"):
super(BaseBasket, self).__init__(request.shop)
self.basket_name = basket_name
self.request = request
if request:
self.ip_address = request.META.get("REMOTE_ADDR")
self.storage = get_storage()
self._data = None
self.dirty = False
self._lines_cache = None
self.customer = getattr(request, "customer", None)
self.orderer = getattr(request, "person", None)
self.creator = getattr(request, "user", None)
def _load(self):
"""
Get the currently persisted data for this basket.
This will only access the storage once per request in usual
circumstances.
:return: Data dict.
:rtype: dict
"""
if self._data is None:
try:
self._data = self.storage.load(basket=self)
except BasketCompatibilityError as error:
msg = _("Basket loading failed: Incompatible basket (%s)")
messages.error(self.request, msg % error)
self.storage.delete(basket=self)
self._data = self.storage.load(basket=self)
self.dirty = False
return self._data
def save(self):
"""
Persist any changes made into the basket to storage.
One does not usually need to directly call this;
:obj:`~shuup.front.middleware.ShuupFrontMiddleware` will usually
take care of it.
"""
self.clean_empty_lines()
self.storage.save(basket=self, data=self._data)
self.dirty = False
def delete(self):
"""
Clear and delete the basket data.
"""
self.storage.delete(basket=self)
self.uncache()
self._data = None
self.dirty = False
def finalize(self):
"""
Mark the basket as "completed" (i.e. an order is created/a conversion made).
This will also clear the basket's data.
"""
self.storage.finalize(basket=self)
self.uncache()
self._data = None
self.dirty = False
def clear_all(self):
"""
Clear all data for this basket.
"""
self._data = {}
self.uncache()
self.dirty = True
@property
def _data_lines(self):
"""
Get the line data (list of dicts).
If the list is edited, it must be re-assigned
to ``self._data_lines`` to ensure the `dirty`
flag gets set.
:return: List of data dicts
:rtype: list[dict]
"""
return self._load().setdefault("lines", [])
@_data_lines.setter
def _data_lines(self, new_lines):
"""
Set the line data (list of dicts).
Note that this assignment must be made instead
of editing `_data_lines` in-place to ensure
the `dirty` bit gets set.
:param new_lines: New list of lines.
:type new_lines: list[dict]
"""
self._load()["lines"] = new_lines
self.dirty = True
self.uncache()
def add_line(self, **kwargs):
line = self.create_line(**kwargs)
self._data_lines = self._data_lines + [line.to_dict()]
return line
def create_line(self, **kwargs):
return BasketLine(source=self, **kwargs)
@property
def _codes(self):
return self._load().setdefault("codes", [])
@_codes.setter
def _codes(self, value):
if hasattr(self, "_data"): # Check that we're initialized
self._load()["codes"] = value
def add_code(self, code):
modified = super(BaseBasket, self).add_code(code)
self.dirty = bool(self.dirty or modified)
return modified
def clear_codes(self):
modified = super(BaseBasket, self).clear_codes()
self.dirty = bool(self.dirty or modified)
return modified
def remove_code(self, code):
modified = super(BaseBasket, self).remove_code(code)
self.dirty = bool(self.dirty or modified)
return modified
def get_lines(self):
if self.dirty or not self._lines_cache:
lines = [BasketLine.from_dict(self, line) for line in self._data_lines]
orderable_lines = []
for line in lines:
if line.type != OrderLineType.PRODUCT:
orderable_lines.append(line)
elif line.shop_product.is_orderable(line.supplier, self.request.customer, line.quantity):
orderable_lines.append(line)
self._lines_cache = orderable_lines
return self._lines_cache
def _initialize_product_line_data(self, product, supplier, shop, quantity=0):
if product.variation_children.count():
raise ValueError("Attempting to add variation parent to basket")
return {
# TODO: FIXME: Make sure line_id's are unique (not random)
"line_id": str(random.randint(0, 0x7FFFFFFF)),
"product": product,
"supplier": supplier,
"shop": shop,
"quantity":
|
airbnb/knowledge-repo
|
knowledge_repo/postprocessors/extract_images_to_s3.py
|
Python
|
apache-2.0
| 2,903
| 0.001033
|
import os
import posixpath
import random
import string
import logging
import tempfile
import time
from .extract_images import ExtractImages
logger = logging.getLogger(__name__)
class ExtractImagesToS3(ExtractImages):
'''
This KnowledgePostProcessor subclass extracts images from posts to S3. It
is designed to be used upon addition to a knowledge repository, which can
reduce the size of repositories. It replaces local images with remote urls
based on `http_image_root`.
`s3_image_root` should be the root of the image folder on an S3 remote, such
as "s3://my_bucket/images".
`http_image_root` should be the root of the server where the images will be
accessible after uploading.
Note: This requires that user AWS credentials are set up appropriately and
that they have installed the aws cli packages.
'''
_registry_keys = ['extract_images_to_s3']
def __init__(self, s3_image_root, http_image_root):
self.s3_image_root = s3_image_root
self.http_image_root = http_image_root
def copy_image(self, kp, img_path, is_ref=False, repo_name='knowledge'):
# Copy image data to new file
if is_ref:
_, tmp_path = tempfile.mkstemp()
with open(tmp_path, 'wb') as f:
f.write(kp._read_ref(img_path))
else:
tmp_path = img_path
try:
# Get image type
img_ext = posixpath.splitext(img_path)[1]
# Make random filename for image
random_string = ''.join(random.choice(st
|
ring.ascii_lowercase) for i in range(6))
fname_img = '{repo_name}_{time}_{random_string}{ext}'.format(
repo_name=repo_name,
time=int(round(time.time() * 100)),
random_string=random_string,
ext=img_ext).strip().replace(' ', '-')
# Copy image to accessible folder on S3
fname_s3 = posixpath.join(self.s3_image_root, repo_name, fname_img)
# Note: The following command may
|
need to be prefixed with a login agent;
# for example, to handle multi-factor authentication.
cmd = "aws s3 cp '{0}' {1}".format(tmp_path, fname_s3)
logger.info("Uploading images to S3: {cmd}".format(cmd=cmd))
retval = os.system(cmd)
if retval != 0:
raise Exception('Problem uploading images to s3')
finally:
# Clean up temporary file
if is_ref:
os.remove(tmp_path)
# return uploaded path of file
return posixpath.join(self.http_image_root, repo_name, fname_img)
def skip_image(self, kp, image):
import re
if re.match('http[s]?://', image['src']):
return True
return False
def cleanup(self, kp):
if kp._has_ref('images'):
kp._drop_ref('images')
|
AndrewRook/game_designer
|
card_game/migrations/0002_auto__del_unique_cards__del_cards__del_versions__del_unique_versions__.py
|
Python
|
mit
| 7,131
| 0.006731
|
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting model 'unique_cards'
db.delete_table(u'card_game_unique_cards')
# Deleting model 'cards'
db.delete_table(u'card_game_cards')
# Deleting model 'versions'
db.delete_table(u'card_game_versions')
# Deleting model 'unique_versions'
|
db.delete_table(u'card_game_unique_versions')
# Adding model 'Unique_Card'
db.create_table(u'card_game_unique_card', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(
|
to=orm['card_game.Card'])),
))
db.send_create_signal(u'card_game', ['Unique_Card'])
# Adding model 'Card'
db.create_table(u'card_game_card', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('cost', self.gf('django.db.models.fields.IntegerField')(default=0)),
('art', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('text', self.gf('django.db.models.fields.TextField')()),
('power', self.gf('django.db.models.fields.IntegerField')(default=0)),
('toughness', self.gf('django.db.models.fields.IntegerField')(default=1)),
))
db.send_create_signal(u'card_game', ['Card'])
# Adding model 'Version'
db.create_table(u'card_game_version', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('version_number', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.Unique_Version'])),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.Card'])),
))
db.send_create_signal(u'card_game', ['Version'])
# Adding model 'Unique_Version'
db.create_table(u'card_game_unique_version', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=128)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256)),
('creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'card_game', ['Unique_Version'])
def backwards(self, orm):
# Adding model 'unique_cards'
db.create_table(u'card_game_unique_cards', (
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.cards'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'card_game', ['unique_cards'])
# Adding model 'cards'
db.create_table(u'card_game_cards', (
('toughness', self.gf('django.db.models.fields.IntegerField')(default=1)),
('art', self.gf('django.db.models.fields.files.ImageField')(max_length=100, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=64)),
('power', self.gf('django.db.models.fields.IntegerField')(default=0)),
('text', self.gf('django.db.models.fields.TextField')()),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('cost', self.gf('django.db.models.fields.IntegerField')(default=0)),
))
db.send_create_signal(u'card_game', ['cards'])
# Adding model 'versions'
db.create_table(u'card_game_versions', (
('version_number', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.unique_versions'])),
('card_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['card_game.cards'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
))
db.send_create_signal(u'card_game', ['versions'])
# Adding model 'unique_versions'
db.create_table(u'card_game_unique_versions', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('description', self.gf('django.db.models.fields.CharField')(max_length=256)),
('name', self.gf('django.db.models.fields.CharField')(max_length=128, unique=True)),
('creation_date', self.gf('django.db.models.fields.DateField')(auto_now_add=True, blank=True)),
))
db.send_create_signal(u'card_game', ['unique_versions'])
# Deleting model 'Unique_Card'
db.delete_table(u'card_game_unique_card')
# Deleting model 'Card'
db.delete_table(u'card_game_card')
# Deleting model 'Version'
db.delete_table(u'card_game_version')
# Deleting model 'Unique_Version'
db.delete_table(u'card_game_unique_version')
models = {
u'card_game.card': {
'Meta': {'object_name': 'Card'},
'art': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'cost': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'power': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'text': ('django.db.models.fields.TextField', [], {}),
'toughness': ('django.db.models.fields.IntegerField', [], {'default': '1'})
},
u'card_game.unique_card': {
'Meta': {'object_name': 'Unique_Card'},
'card_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Card']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
u'card_game.unique_version': {
'Meta': {'object_name': 'Unique_Version'},
'creation_date': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
u'card_game.version': {
'Meta': {'object_name': 'Version'},
'card_id': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Card']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version_number': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['card_game.Unique_Version']"})
}
}
complete_apps = ['card_game']
|
Naereen/mazhe
|
phystricksRechercheTangente.py
|
Python
|
gpl-3.0
| 3,050
| 0.041981
|
# -*- coding: utf8 -*-
from __future__ import unicode_literals
from phystricks import *
def VGZooJnvvZc():
n_ssfig=6
pspictQuestion,figQuestion = SinglePicture("TangenteQuestionOM",script_filename="RechercheTangente")
pspictDetail,figDetail = SinglePicture("TangenteDetailOM",scr
|
ipt_filename="RechercheTangente")
pspictsSubFig,figSubFig = MultiplePictures("LesSubFiguresOM",n_ssfig)
pspicts=[pspictQuestion,pspictDetail]
pspicts.extend(pspictsSubFig)
mx=0.7
Mx=5
x=var('x')
f=phyFunction(-3/x+5).graph(mx,Mx)
P=f.get_point(1.7)
Q=f.get_point(4)
P.put_mark(0.3,P.advised_mark_angle(pspicts),"$P$",automatic_place=pspicts)
Q.put_mark(0.3,Q.advised_mark_angle(pspicts),"$Q$",automatic_place=pspicts)
Px=Point(P.x,0)
Py=Point(0,P.y)
Q
|
x=Point(Q.x,0)
Qy=Point(0,Q.y)
Py.put_mark(0.1,180,"$f(a)$",automatic_place=(pspicts,"E"))
Qy.put_mark(0.1,180,"$f(x)$",automatic_place=(pspicts,"E"))
Px.put_mark(0.2,-90,"$a$",automatic_place=(pspicts,"N"))
Qx.put_mark(0.2,-90,"$x$",automatic_place=(pspicts,"N"))
v1=Segment(Q,Qx)
v2=Segment(P,Px)
h1=Segment(Q,Qy)
h2=Segment(P,Py)
I=Intersection(v1,h2)[0]
h3=Segment(P,I)
v1.parameters.color="green"
v1.parameters.style="dashed"
v2.parameters=v1.parameters
h1.parameters=v1.parameters
h2.parameters=v1.parameters
h3.parameters=v1.parameters
corde=Segment(P,Q).dilatation(1.7)
corde.parameters.color="cyan"
Dx=MeasureLength(h3,0.2)
Dx.put_mark(0.2,-90,"$x-a$",automatic_place=(pspicts,"N"))
Dy=MeasureLength(Segment(Q,I),-0.2)
Dy.put_mark(0.2,0,"$f(x)-f(a)$",automatic_place=(pspicts,"W"))
pspictDetail.DrawGraphs(corde,v1,v2,h1,h2,h3,f,P,Px,Py,Q,Qx,Qy,Dx,Dy)
for psp in pspictsSubFig :
psp.mother.caption="\ldots de mieux en mieux \ldots"
psp.dilatation_X(1)
psp.dilatation_Y(1)
psp.DrawDefaultAxes()
pspictsSubFig[0].mother.caption="Pas très non \ldots"
pspictsSubFig[-1].mother.caption="\ldots presque parfait"
fixed_size=4
tangente=f.get_tangent_segment(P.x).fix_size(fixed_size)
tangente.parameters.color="red"
for i,psp in enumerate(pspictsSubFig):
psp.dilatation(0.7)
Qi = f.get_point( Q.x-i*(Q.x-P.x)/(n_ssfig) )
Qi.put_mark(0.3,Qi.advised_mark_angle(pspicts)+180,"$Q_{%s}$"%str(i),automatic_place=(pspicts,"corner"))
corde=Segment(P,Qi).fix_size(fixed_size)
corde.parameters.color="cyan"
psp.DrawGraphs(corde,tangente,f,Qi,P)
psp.axes.no_graduation()
psp.DrawDefaultAxes()
figSubFig.conclude()
figSubFig.write_the_file()
pspictDetail.axes.no_graduation()
pspictDetail.DrawDefaultAxes()
pspictDetail.dilatation(1)
figDetail.conclude()
figDetail.write_the_file()
pspictQuestion.DrawGraphs(f,P)
pspictQuestion.axes.no_graduation()
pspictQuestion.DrawDefaultAxes()
pspictQuestion.dilatation(0.7)
figQuestion.conclude()
figQuestion.write_the_file()
|
steinitzu/spotify-api
|
spotify/__init__.py
|
Python
|
mit
| 75
| 0.013333
|
from
|
.client import Client
from .auth import OAuth
_
|
_version__ = '0.0.8'
|
piotrmaslanka/bellum
|
mother/ajax/__init__.py
|
Python
|
agpl-3.0
| 39
| 0
|
'''Contains AJAX apps of mot
|
her app'''
| |
googleapis/python-aiplatform
|
samples/generated_samples/aiplatform_generated_aiplatform_v1_vizier_service_complete_trial_sync.py
|
Python
|
apache-2.0
| 1,480
| 0.000676
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for CompleteTrial
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency,
|
execute the following:
# python3 -m pip install google-cloud-aiplatform
# [START aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync]
from google.cloud import aiplatform_v1
def sample_complete_trial():
# Create a client
client = aiplatform_v1.VizierServiceClient()
# Initialize request argument(s)
request = aiplatform_v1.CompleteTrialRequest(
name="name_value",
)
# Make the request
response = client.complete_trial(request=reques
|
t)
# Handle the response
print(response)
# [END aiplatform_generated_aiplatform_v1_VizierService_CompleteTrial_sync]
|
nwjs/chromium.src
|
third_party/blink/web_tests/http/tests/websocket/echo-with-no-extension_wsh.py
|
Python
|
bsd-3-clause
| 2,057
| 0
|
# Copyright 2012, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWA
|
RE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import six
_GOODBYE_MESSAGE = u'Goodbye'
def web_socket_do_extra_handshake(request):
request.ws_extension_processors = []
def web_socket_transfer_data(request):
while True:
line = request.ws_stream.receive_message()
if line is None:
return
if isinstance(line, six.text
|
_type):
request.ws_stream.send_message(line, binary=False)
if line == _GOODBYE_MESSAGE:
return
else:
request.ws_stream.send_message(line, binary=True)
|
lariodiniz/GrimorioTRPG
|
grimorio/core/models.py
|
Python
|
gpl-2.0
| 3,520
| 0.011691
|
# coding: utf-8
#--------------//////////----------------------
#Projeto Criado por: Lário Diniz
#Contatos: developer.lario@gmail.com
#Data: 06/08/2015
#--------------//////////----------------------
from django.db import models
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _
from django.core.urlresolvers import reverse
import datetime
class Reach(models.Model):
name=models.CharField(_('Nome'), max_length=100, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Alcance')
verbose_name_plural = _(u'Alcances')
class Duration(models.Model):
name=models.CharField(_('Nome'), max_length=100, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Duração')
verbose_name_plural = _(u'Durações')
class Descriptors(models.Model):
name=models.CharField(_('Nome'), max_length=100, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Descritor')
verbose_name_plural = _(u'Descritores')
class Tipe(models.Model):
name=models.CharField(_('Nome'), max_length=100, unique=True)
def __unicode__(self):
return self.name
class Meta:
ord
|
ering = ['name']
verbose_name = _(u'Tipo')
verbose_name_plural = _(u'Tipos')
class Book(models.Model):
name=models.CharField(_('Nome'), max_length=100, unique=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Livro')
verbose_name_plural = _(u'Livros')
class Sp
|
ells(models.Model):
name=models.CharField(_('Nome'), max_length=250, unique=True)
execution=models.CharField(_(u'Execução'), max_length=100)
nivel=models.IntegerField(_('Nivel da Magia'))
reach=models.ForeignKey('Reach', verbose_name=_('Alcance'), blank=True)
target=models.CharField(_('Alvo'), max_length=250)
duration=models.ForeignKey('Duration', verbose_name=_(u'Duração'), blank=True)
resistence=models.CharField(_('Teste de Resistencia'), max_length=100)
descriptors=models.ForeignKey('Descriptors', verbose_name=_(u'Descritores'), blank=True)
book=models.ForeignKey('Book', verbose_name=_(u'Livros'), blank=True)
page=models.IntegerField(_('Pagina'))
tipe=models.ForeignKey('Tipe', verbose_name=_(u'Tipo'), blank=True)
description=models.TextField(_(u'Descrição'), blank=True)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Magia')
verbose_name_plural = _(u'Magias')
def get_absolute_url(self):
return reverse('magicdetail', kwargs={'pk': self.pk})
class Character(models.Model):
user = models.ForeignKey(User,verbose_name=_('Usuarios'), blank=True)
name=models.CharField(_('Nome'), max_length=100)
PM=models.IntegerField(_('PM MAX'))
spells=models.ManyToManyField('Spells', verbose_name=_('Magias'), blank=True)
descricao=models.CharField(_(u'Descrição'), max_length=250)
def __unicode__(self):
return self.name
class Meta:
ordering = ['name']
verbose_name = _(u'Personagen')
verbose_name_plural = _(u'personagens')
def get_absolute_url(self):
return reverse('detail-character', kwargs={'pk': self.pk})
|
mangadul/WhatsAppy
|
yowsup/common/constants.py
|
Python
|
gpl-3.0
| 837
| 0.003584
|
class YowConstants:
DOMAIN = "s.whatsapp.net"
ENDPOINTS = (
("e1.whatsapp.net", 443),
("e2.whatsapp.net", 443),
("e3.whatsapp.net", 443),
("e4.whatsapp.net", 443),
("e5.whatsapp.net", 443),
("e6.whatsapp.net", 443),
("e7.whatsapp.net", 443),
|
("e8.whatsapp.net", 443),
("e9.whatsapp.net", 443),
("e10.whatsapp.net", 443),
("e11.whatsapp.net", 443),
("e12.whatsapp.net", 443),
("e13.whatsapp.net", 443),
("e14.whatsapp.net", 443),
("e15.whatsapp.net", 443),
("e16.whatsapp.net", 443),
)
WHATSAPP_BROADCAST_SERVER = "broadcast"
WHATSAPP_
|
SERVER = "s.whatsapp.net"
WHATSAPP_GROUP_SERVER = "g.us"
PATH_STORAGE = "~/.yowsup"
PREVIEW_WIDTH = 64
PREVIEW_HEIGHT = 64
|
rwl/muntjac
|
muntjac/demo/sampler/features/commons/JSApiExample.py
|
Python
|
apache-2.0
| 4,137
| 0.000242
|
import time
import threading
from time import gmtime, strftime
from muntjac.api import Button, VerticalLayout, Label, TextArea
from muntjac.terminal.theme_resource import ThemeResource
from muntjac.ui.button import IClickListener
class JSApiExample(VerticalLayout):
def __init__(self):
super(JSApiExample, self).__init__()
self._toBeUpdatedFromThread = None
self._startThread = None
self._running = Label('')
self.setSpacing(True)
javascript = Label("<h3>Run Native JavaScript</h3>",
Label.CONTENT_XHTML)
self.addComponent(javascript)
script = TextArea()
script.setWidth('100%')
script.setRows(3)
script.setValue('alert(\"Hello Muntjac\");')
self.addComponent(script)
self.addCompone
|
nt(Button('Run script', RunListener(self, script)))
# syn
|
c = Label("<h3>Force Server Syncronization</h3>",
# Label.CONTENT_XHTML)
# self.addComponent(sync)
#
# self.addComponent(Label('For advanced client side programmers '
# 'Muntjac offers a simple method which can be used to force '
# 'the client to synchronize with the server. This may be '
# 'needed for example if another part of a mashup changes '
# 'things on server.'))
#
# self._toBeUpdatedFromThread = Label("This Label component will be "
# "updated by a background thread. Click \"Start "
# "background thread\" button and start clicking "
# "on the link below to force "
# "synchronization.", Label.CONTENT_XHTML)
# self.addComponent(self._toBeUpdatedFromThread)
#
# # This label will be show for 10 seconds while the background process
# # is working
# self._running.setCaption('Background process is running for 10 '
# 'seconds, click the link below')
# self._running.setIcon(
# ThemeResource('../base/common/img/ajax-loader-medium.gif'))
#
# # Clicking on this button will start a repeating thread that updates
# # the label value
# self._startThread = Button('Start background thread',
# StartListener(self))
# self.addComponent(self._startThread)
#
# # This link will make an Ajax request to the server that will respond
# # with UI changes that have happened since last request
# self.addComponent(Label("<a href=\"javascript:vaadin.forceSync();\">"
# "javascript: vaadin.forceSync();</a>", Label.CONTENT_XHTML))
class RunListener(IClickListener):
def __init__(self, component, script):
self._component = component
self._script = script
def buttonClick(self, event):
self._component.getWindow().executeJavaScript(
str(self._script.getValue()))
class StartListener(IClickListener):
def __init__(self, component):
self._component = component
def buttonClick(self, event):
self._component._startThread.getParent().replaceComponent(
self._component._startThread,
self._component._running)
BackgroundProcess(self._component).start()
class BackgroundProcess(threading.Thread):
def __init__(self, component):
super(BackgroundProcess, self).__init__()
self._component = component
def run(self):
try:
i = 0
while i < 10:
time.sleep(1000)
self._component._toBeUpdatedFromThread.setValue(
'<strong>Server time is '
+ strftime("%H:%M:%S", gmtime())
+ '</strong>')
i += 1
self._component._toBeUpdatedFromThread.setValue(
'Background process finished')
self._component._running.getParent().replaceComponent(
self._component._running, self._component._startThread)
except self.InterruptedException, e:
# TODO Auto-generated catch block
e.printStackTrace()
|
WebClub-NITK/Hacktoberfest-2k17
|
Sorting/Bubble_Sort/nishanthebbar2011.py
|
Python
|
mit
| 318
| 0.034591
|
arr=[]
n=int(input("Please enter the number of elements and then the actual elements themselves!"))
for i in range(n):
x=int(input())
arr.a
|
ppend(x)
for i in range(n-1)
|
:
for j in range(0,n-i-1):
if arr[j]>arr[j+1]:
arr[j],arr[j+1]=arr[j+1],arr[j]
print("The sorted arr is",arr)
|
bruny/romcollectionbrowser
|
resources/tests/test_matcher.py
|
Python
|
gpl-2.0
| 2,187
| 0.037401
|
# -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'resources', 'lib'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'resources', 'lib', 'pyscraper'))
from matcher import Matcher
import util as util
import unittest
class TestMatcher(unittest.TestCase):
@classmethod
def setUpClass(cls):
# This is required so that readScraper() can parse the XML instruction files
util.RCBHOME = os.path.join(os.path.dirname(__file__), '..', '..')
# Test matching against a result set
def test_getBestResultsWithRomanNumerals(self):
results = [{'SearchKey': ['Tekken 2']}, {'SearchKey': ['Tekken 3']}, {'SearchKey': ['Tekken IV']}]
gamename = 'Tekken II'
m = Matcher()
x = m.getBestResults(results, gamename)
self.assertEquals(x.get('SearchKey')[0], 'Tekken 2')
def test_getBestResultsWithApostropheAndYear(self):
results = [{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}]
gamename = 'FIFA \'98'
m = Matcher()
x = m.getBestResults(results, gamename)
self.assertTrue(x.get('SearchKey')[0] == 'FIFA 98',
"Expected to match title (was {0})".format(x.get('SearchKey')[0]))
def test_getBestResultsMatchingWithUnicode(self):
results = [{'SearchKey': [u'スーパー競輪']}]
gamename = u'スーパー競輪'
m = Matcher()
x = m.getBestResults(results, gamename)
self.assertTrue(x.get('SearchKey')[0] =
|
= u'スーパー競輪', "Expected matching unicode strings to match")
def test_getBestResultsNonMatchingWithUnicode(self):
results = [{'SearchKey': [u'スーパー競輪']}]
gamename = 'Super Test Game'
m = Matcher()
x = m.getBestResults(results, gamename)
self.assertIsNone(x, "Expected non-matching strings to not match, including unicode")
def test_getBestResultsWithBrackets(self):
results =
|
[{'SearchKey': ['FIFA 98']}, {'SearchKey': ['FIFA 97']}, {'SearchKey': ['FIFA 2001']}]
gamename = 'FIFA \'98 (1998) [Electronic Arts]'
m = Matcher()
x = m.getBestResults(results, gamename)
self.assertEquals(x.get('SearchKey')[0], 'FIFA 98')
if __name__ == "__main__":
unittest.main()
|
F5Networks/f5-ansible-modules
|
ansible_collections/f5networks/f5_modules/plugins/modules/bigiq_regkey_license_assignment.py
|
Python
|
mit
| 19,962
| 0.001904
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright: (c) 2017, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
DOCUMENTATION = r'''
---
module: bigiq_regkey_license_assignment
short_description: Manage regkey license assignment on BIG-IPs from a BIG-IQ
description:
- Manages the assignment of regkey licenses on a BIG-IQ. Assignment means
the license is assigned to a BIG-IP, or it needs to be assigned to a BIG-IP.
Additionally, this module supports revoking the assignments from BIG-IP devices.
version_added: "1.0.0"
options:
pool:
description:
- The registration key pool to use.
type: str
required: True
key:
description:
- The registration key you want to assign from the pool.
type: str
required: True
device:
description:
- When C(managed) is C(no), specifies the address, or hostname, where the BIG-IQ
can reach the remote device to register.
- When C(managed) is C(yes), specifies the managed device, or device UUID, that
you want to register.
- If C(managed) is C(yes), it is very important you do not have more than
one device with the same name. BIG-IQ internally recognizes devices by their ID,
and therefore, this module cannot guarantee the correct device will be
registered. The device returned is the device that is used.
type: str
required: True
managed:
description:
- Whether the specified device is a managed or un-managed device.
- When C(state) is C(present), this parameter is required.
type: bool
device_port:
description:
- Specifies the port of the remote device to connect to.
- If this parameter is not specified, the default is C(443).
type: int
default: 443
device_username:
description:
- The username used to connect to the remote device.
- This username should be one that has sufficient privileges on the remote device
to do licensing. Usually this is the C(Administrator) role.
- When C(managed) is C(no), this parameter is required.
type: str
device_password:
description:
- The password of the C(device_username).
- When C(managed) is C(no), this parameter is required.
type: str
state:
description:
- When C(present), ensures the device is assigned the specified license.
- When C(absent), ensures the license is revoked from the remote device and freed
on the BIG-IQ.
type: str
choices:
- present
- absent
default: present
extends_documentation_fragment: f5networks.f5_modules.f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Register an unmanaged device
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 1.1.1.1
managed: no
device_username: admin
device_password: secret
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by name
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: bigi1.foo.com
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
- name: Register a managed device, by UUID
bigiq_regkey_license_assignment:
pool: my-regkey-pool
key: XXXX-XXXX-XXXX-XXXX-XXXX
device: 7141a063-7cf8-423f-9829-9d40599fa3e0
managed: yes
state: present
provider:
user: admin
password: secret
server: lb.mydomain.com
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
import re
import time
from datetime import datetime
from ansible.module_utils.basic import AnsibleModule
from ..module_utils.bigip import F5RestClient
from ..module_utils.common import (
F5ModuleError, AnsibleF5Parameters, f5_argument_spec
)
from ..module_utils.icontrol import bigiq_version
from ..module_utils.ipaddress import is_valid_ip
from ..module_utils.teem import send_teem
class Parameters(AnsibleF5Parameters):
api_map = {
'deviceReference': 'device_reference',
'deviceAddress': 'device_address',
'httpsPort': 'device_port'
}
api_attributes = [
'deviceReference', 'deviceAddress', 'httpsPort', 'managed'
]
returnables = [
'device_address', 'device_reference', 'device_username', 'device_password',
'device_port', 'managed'
]
updatables = [
'device_reference', 'device_address', 'device_username', 'device_password',
'device_port', 'managed'
]
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
raise
return result
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
@property
def device_password(self):
if self._values['device_password'] is None:
return None
return self._values['device_password']
@property
def device_username(self):
if self._values['device_username'] is None:
return None
return self._values['device_username']
@property
def device_address(self):
if self.device_is_address:
return self._values['device']
@property
def device_port(self):
if self._values['device_port'] is None:
return None
return int(self._values['device_port'])
@property
def device_is_address(self):
if is_valid_ip(self.device):
return True
return False
@property
def device_is_id(self):
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, self.device):
return True
return False
@property
def device_is_name(self):
if not self.device_is_address and not self.device_is_id:
return True
return False
@property
def device_reference(self):
if not self.managed:
return None
if self.device_is_address:
# This range lookup is how you do lookups for single IP addresses. Weird.
filter = "address+eq+'{0}...{0}'".format(self.device)
elif self.device_is_name:
filter = "hostname+eq+'{0}'".format(self.device)
elif self.device_is_id:
filter = "uuid+eq+'{0}'".format(self.device)
else:
raise F5ModuleError(
"Unknown device format '{0}'".format(self.device)
)
uri = "https://{0}:{1}/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/" \
"?$filter={2}&$top=1".format(self.
|
client.provider['server'],
self.client.provider['server_port'], filter)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if resp.status == 200 and response['totalItems'] == 0:
raise F5ModuleError(
"No device with the specified address was found."
)
elif 'code' in response and respon
|
se['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp._content)
id = response['items'][0]['uuid']
result = dict(
link='https://localhost/mgmt/shared/resolver/device-groups/cm-bigip-allBigIpDevices/devices/{0}'.format(id)
)
return result
@property
def pool_id(self):
filter = "(name%20eq%20'{0}')".format(self.pool)
uri = 'https://{0}:{1}/mgmt/cm/device/licensing/pool/regkey/licenses?$filter={2}&$top=1'.format(
self
|
foursquare/pants
|
contrib/go/src/python/pants/contrib/go/tasks/go_thrift_gen.py
|
Python
|
apache-2.0
| 7,092
| 0.010011
|
# coding=utf-8
# Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
# Licensed under the Apache License, Version 2.0 (see LICENSE).
from __future__ import absolute_import, division, print_function, unicode_literals
import os
import re
from pants.backend.codegen.thrift.lib.thrift import Thrift
from pants.base.build_environment import get_buildroot
from pants.base.exceptions import TaskError
from pants.base.revision import Revision
from pants.base.workunit import WorkUnitLabel
from pants.option.custom_types import target_option
from pants.task.simple_codegen_task import SimpleCodegenTask
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.process_handler import subprocess
from twitter.common.collections import OrderedSet
from pants.contrib.go.targets.go_thrift_library import GoThriftGenLibrary, GoThriftLibrary
class GoThriftGen(SimpleCodegenTask):
sources_globs = ('**/*',)
@classmethod
def register_options(cls, register):
super(GoThriftGen, cls).register_options(register)
register('--strict', default=True, fingerprint=True, type=bool,
help='Run thrift compiler with strict warnings.')
register('--gen-options', advanced=True, fingerprint=True,
help='Use these apache thrift go gen options.')
register('--thrift-import', type=str, advanced=True, fingerprint=True,
help='Use this thrift-import gen option to thrift.')
register('--thrift-import-target', type=target_option, advanced=True,
help='Use this thrift import on symbolic defs.')
register('--multiple-files-per-target-override', advanced=True, fingerprint=True,
help='If set, multiple thrift files will be allowed per target, regardless of '
'thrift version. Otherwise, only versions greater than 0.10.0 will be assumed to '
|
'support multiple files.')
@classmethod
def subsystem_dependencies(cls):
return super(GoThriftGen, cls).subsystem_dependencies() + (Thrift.scoped(cls),)
@property
def _thrift_binary(self):
return self._thrift.select(context=self.context)
@property
def _thrift_version(self):
return self._thrift.version(context=self.context)
@memoized_property
def _thrift(self):
return Thrift.scoped_instance(self)
@memoized_propert
|
y
def _deps(self):
thrift_import_target = self.get_options().thrift_import_target
if thrift_import_target is None:
raise TaskError('Option thrift_import_target in scope {} must be set.'.format(
self.options_scope))
thrift_imports = self.context.resolve(thrift_import_target)
return thrift_imports
@memoized_property
def _service_deps(self):
service_deps = self.get_options().get('service_deps')
return list(self.resolve_deps(service_deps)) if service_deps else self._deps
SERVICE_PARSER = re.compile(r'^\s*service\s+(?:[^\s{]+)')
NAMESPACE_PARSER = re.compile(r'^\s*namespace go\s+([^\s]+)', re.MULTILINE)
def _declares_service(self, source):
with open(source) as thrift:
return any(line for line in thrift if self.SERVICE_PARSER.search(line))
def _get_go_namespace(self, source):
with open(source) as thrift:
namespace = self.NAMESPACE_PARSER.search(thrift.read())
if not namespace:
raise TaskError('Thrift file {} must contain "namespace go "', source)
return namespace.group(1)
def synthetic_target_extra_dependencies(self, target, target_workdir):
for source in target.sources_relative_to_buildroot():
if self._declares_service(os.path.join(get_buildroot(), source)):
return self._service_deps
return self._deps
def synthetic_target_type(self, target):
return GoThriftGenLibrary
def is_gentarget(self, target):
return isinstance(target, GoThriftLibrary)
@memoized_method
def _validate_supports_more_than_one_source(self):
# Support for doing the right thing with multiple files landed in
# https://issues.apache.org/jira/browse/THRIFT-3776; first available in 0.10.0
if self.get_options().multiple_files_per_target_override:
return
required_version = '0.10.0'
if Revision.semver(self._thrift_version) < Revision.semver(required_version):
raise TaskError('A single .thrift source file is supported per go_thrift_library with thrift '
'version `{}`: upgrade to at least `{}` to support multiple files.'.format(
self._thrift_version, required_version))
@memoized_property
def _thrift_cmd(self):
cmd = [self._thrift_binary]
thrift_import = 'thrift_import={}'.format(self.get_options().thrift_import)
if thrift_import is None:
raise TaskError('Option thrift_import in scope {} must be set.'.format(self.options_scope))
gen_options = self.get_options().gen_options
if gen_options:
gen_options += ',' + thrift_import
else:
gen_options = thrift_import
cmd.extend(('--gen', 'go:{}'.format(gen_options)))
if self.get_options().strict:
cmd.append('-strict')
if self.get_options().level == 'debug':
cmd.append('-verbose')
return cmd
def _generate_thrift(self, target, target_workdir):
target_cmd = self._thrift_cmd[:]
bases = OrderedSet(tgt.target_base for tgt in target.closure() if self.is_gentarget(tgt))
for base in bases:
target_cmd.extend(('-I', base))
target_cmd.extend(('-o', target_workdir))
all_sources = list(target.sources_relative_to_buildroot())
if len(all_sources) != 1:
self._validate_supports_more_than_one_source()
for source in all_sources:
file_cmd = target_cmd + [os.path.join(get_buildroot(), source)]
with self.context.new_workunit(name=source,
labels=[WorkUnitLabel.TOOL],
cmd=' '.join(file_cmd)) as workunit:
result = subprocess.call(file_cmd,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'))
if result != 0:
raise TaskError('{} ... exited non-zero ({})'.format(self._thrift_binary, result))
gen_dir = os.path.join(target_workdir, 'gen-go')
src_dir = os.path.join(target_workdir, 'src')
safe_mkdir(src_dir)
go_dir = os.path.join(target_workdir, 'src', 'go')
os.rename(gen_dir, go_dir)
@classmethod
def product_types(cls):
return ['go']
def execute_codegen(self, target, target_workdir):
self._generate_thrift(target, target_workdir)
@property
def _copy_target_attributes(self):
"""Override `_copy_target_attributes` to exclude `provides`."""
return [a for a in super(GoThriftGen, self)._copy_target_attributes if a != 'provides']
def synthetic_target_dir(self, target, target_workdir):
all_sources = list(target.sources_relative_to_buildroot())
source = all_sources[0]
namespace = self._get_go_namespace(source)
return os.path.join(target_workdir, 'src', 'go', namespace.replace(".", os.path.sep))
|
rbian/tp-libvirt
|
v2v/tests/linux_vm_check.py
|
Python
|
gpl-2.0
| 3,241
| 0.000309
|
import re
import logging
from autotest.client.shared import error
from virttest import utils_v2v
def run(test, params, env):
"""
Check VM after conversion
"""
target = params.get('target')
check_obj = utils_v2v.LinuxVMCheck(test, params, env)
logging.info("Check guest os info")
os_info = check_obj.get_vm_os_info()
os_vendor = check_obj.get_vm_os_vendor()
if os_vendor == 'Red Hat':
os_version = os_info.split()[6]
else:
raise error.TestFail("Only RHEL is supported now.")
logging.info("Check guest kernel after conversion")
kernel_version = check_obj.get_vm_kernel()
if re.search('xen', kernel_version):
raise error.TestFail("FAIL")
else:
logging.info("SUCCESS")
logging.info("
|
Check parted info after conversion")
parted_info = check_obj.get_vm_parted()
if os_version != '3':
if re.findall('/dev/vd\S+', parted_info):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
logging.info("Check virtio_net module in modprobe co
|
nf")
modprobe_conf = check_obj.get_vm_modprobe_conf()
if not re.search('No such file', modprobe_conf):
virtio_mod = re.findall(r'(?m)^alias.*virtio', modprobe_conf)
net_blk_mod = re.findall(r'(?m)^alias\s+scsi|(?m)^alias\s+eth',
modprobe_conf)
if len(virtio_mod) == len(net_blk_mod):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
logging.info("Check virtio module")
modules = check_obj.get_vm_modules()
if os_version == '3':
if re.search("e1000|^ide", modules):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
elif re.search("virtio", modules):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
logging.info("Check virtio pci devices")
pci = check_obj.get_vm_pci_list()
if os_version != '3':
if (re.search('[Vv]irtio network', pci) and
re.search('[Vv]irtio block', pci)):
if target == "ovirt":
logging.info("SUCCESS")
elif (target != "ovirt" and
re.search('[Vv]irtio memory', pci)):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
else:
raise error.TestFail("FAIL")
logging.info("Check in /etc/rc.local")
rc_output = check_obj.get_vm_rc_local()
if re.search('^[modprobe|insmod].*xen-vbd.*', rc_output):
raise error.TestFail("FAIL")
else:
logging.info("SUCCESS")
logging.info("Check vmware tools")
if check_obj.has_vmware_tools() is False:
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
logging.info("Check tty")
tty = check_obj.get_vm_tty()
if re.search('[xh]vc0', tty):
raise error.TestFail("FAIL")
else:
logging.info("SUCCESS")
logging.info("Check video")
video = check_obj.get_vm_video()
if not re.search('el6', kernel_version):
if re.search('cirrus', video):
logging.info("SUCCESS")
else:
raise error.TestFail("FAIL")
|
cobbler/cobbler
|
tests/xmlrpcapi/repo_test.py
|
Python
|
gpl-2.0
| 3,559
| 0.000843
|
import pytest
@pytest.fixture
def create_repo(remote, token):
"""
Creates a Repository "testrepo0" with a mirror "http://www.sample.com/path/to/some/repo" and the attribute
"mirror_locally=0".
:param remote: The xmlrpc object to connect to.
:param token: The token to authenticate against the remote object.
"""
repo = remote.new_repo(token)
remote.modify_repo(repo, "name", "testrepo0", token)
remote.modify_repo(repo, "mirror", "http://www.sample.com/path/to/some/repo"
|
, token)
remote.modify_repo(repo, "mirror_locally", False, token)
remote.save_repo(repo, token)
@pytest.fixture
def remove_repo(remote, token):
"""
Removes the Repository "testrepo0" which can be created with create_repo.
:param remote: The xmlrpc object to connect to.
:param token: The token to authenticate against the remote object.
"""
yield
remote.remove_repo("testrepo0", token)
@pytest.mark.usefixtures("cobbler_xml
|
rpc_base")
class TestRepo:
@pytest.mark.usefixtures("remove_repo")
def test_create_repo(self, remote, token):
"""
Test: create/edit a repo object
"""
# Arrange --> Nothing to arrange
# Act & Assert
repo = remote.new_repo(token)
assert remote.modify_repo(repo, "name", "testrepo0", token)
assert remote.modify_repo(repo, "mirror", "http://www.sample.com/path/to/some/repo", token)
assert remote.modify_repo(repo, "mirror_locally", False, token)
assert remote.save_repo(repo, token)
def test_get_repos(self, remote):
"""
Test: Get repos
"""
# Arrange --> Nothing to do
# Act
result = remote.get_repos()
# Assert
assert result == []
@pytest.mark.usefixtures("create_repo", "remove_repo")
def test_get_repo(self, remote, token):
"""
Test: Get a repo object
"""
# Arrange --> Done in fixture
# Act
repo = remote.get_repo("testrepo0")
# Assert
assert repo.get("name") == "testrepo0"
@pytest.mark.usefixtures("create_repo", "remove_repo")
def test_find_repo(self, remote, token):
"""
Test: find a repo object
"""
# Arrange --> Done in fixture
# Act
result = remote.find_repo({"name": "testrepo0"}, token)
# Assert
assert result
@pytest.mark.usefixtures("create_repo", "remove_repo")
def test_copy_repo(self, remote, token):
"""
Test: copy a repo object
"""
# Arrange --> Done in fixture
# Act
repo = remote.get_item_handle("repo", "testrepo0", token)
# Assert
assert remote.copy_repo(repo, "testrepocopy", token)
# Cleanup
remote.remove_repo("testrepocopy", token)
@pytest.mark.usefixtures("create_repo")
def test_rename_repo(self, remote, token):
"""
Test: rename a repo object
"""
# Arrange
# Act
repo = remote.get_item_handle("repo", "testrepo0", token)
result = remote.rename_repo(repo, "testrepo1", token)
# Assert
assert result
# Cleanup
remote.remove_repo("testrepo1", token)
@pytest.mark.usefixtures("create_repo")
def test_remove_repo(self, remote, token):
"""
Test: remove a repo object
"""
# Arrange --> Done in fixture
# Act
result = remote.remove_repo("testrepo0", token)
# Assert
assert result
|
jedie/DragonPy
|
dragonpy/tests/test_BASIC_simple09.py
|
Python
|
gpl-3.0
| 25,042
| 0.000958
|
#!/usr/bin/env python
"""
6809 unittests
~~~~~~~~~~~~~~
Test CPU with BASIC Interpreter from simple6809 ROM.
:created: 2014 by Jens Diemer - www.jensdiemer.de
:copyleft: 2014-2015 by the DragonPy team, see AUTHORS for more details.
:license: GNU GPL v3 or above, see LICENSE for more details.
"""
import logging
from dragonpy.tests.test_base import Test6809_BASIC_simple6809_Base
from dragonpy.utils.BASIC09_floating_point import BASIC09FloatingPoint
log = logging.getLogger("DragonPy")
class Test_simple6809_BASIC(Test6809_BASIC_simple6809_Base):
def test_print01(self):
self.periphery.add_to_input_queue('? "FOO"\r\n')
op_call_count, cycles, output = self._run_until_OK()
# print(op_call_count, cycles, output)
self.assertEqual(output,
['? "FOO"\r\n', 'FOO\r\n', 'OK\r\n']
)
self.assertEqual(op_call_count, 1085)
self.assertEqual(cycles, 7354) # TODO: cycles are probably not set corrent in CPU, yet!
def test_print02(self):
self.periphery.add_to_input_queue('PRINT "BAR"\r\n')
op_call_count, cycles, output = self._run_until_OK()
# print(op_call_count, cycles, output)
self.assertEqual(output,
['PRINT "BAR"\r\n', 'BAR\r\n', 'OK\r\n']
)
self.assertEqual(op_call_count, 1424)
def test_print03(self):
self.periphery.add_to_input_queue('PRINT 0\r\n')
op_call_count, cycles, output = self._run_until_OK()
# print(op_call_count, cycles, output)
self.assertEqual(output,
['PRINT 0\r\n', ' 0 \r\n', 'OK\r\n']
)
self.assertEqual(op_call_count, 1366)
def test_print04(self):
self.periphery.add_to_input_queue('PRINT 4\r\n')
op_call_count, cycles, output = self._run_until_OK()
# print(op_call_count, cycles, output)
self.assertEqual(output,
['PRINT 4\r\n', ' 4 \r\n', 'OK\r\n']
)
self.assertEqual(op_call_count, 3184)
def test_STR(self):
self.periphery.add_to_input_queue(
'A=0\r\n'
'? "A="+STR$(A)\r\n'
)
op_call_count, cycles, output = self._run_until_OK(
OK_count=2, max_ops=20000
)
print(op_call_count, cycles, output)
self.assertEqual(output,
['A=0\r\n', 'OK\r\n', '? "A="+STR$(A)\r\n', 'A= 0\r\n', 'OK\r\n']
)
self.assertEqual(op_call_count, 11229)
def test_print_string_variable(self):
self.periphery.add_to_input_queue(
'A$="B"\r\n'
'?A$\r\n'
)
op_call_count, cycles, output = self._run_until_OK(
OK_count=2, max_ops=8500
)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['A$="B"\r\n', 'OK\r\n', '?A$\r\n', 'B\r\n', 'OK\r\n']
)
def test_TM_Error(self):
self.periphery.add_to_input_queue('X="Y"\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=3500)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['X="Y"\r\n', '?TM ERROR\r\n', 'OK\r\n']
)
class Test_simple6809_BASIC_Float1(Test6809_BASIC_simple6809_Base):
def test_print_float(self):
self.periphery.add_to_input_queue('?2.5\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=5500)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?2.5\r\n', ' 2.5 \r\n', 'OK\r\n']
)
def test_print_negative_float(self):
self.periphery.add_to_input_queue('?-3.4\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=6300)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?-3.4\r\n', '-3.4 \r\n', 'OK\r\n']
)
def test_print_rounded_float(self):
self.periphery.add_to_input_queue('?1.123456789\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=15000)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?1.123456789\r\n', ' 1.12345679 \r\n', 'OK\r\n']
)
def test_division1(self):
self.periphery.add_to_input_queue('?6/2\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=4500)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?6/2\r\n', ' 3 \r\n', 'OK\r\n']
)
def test_division2(self):
self.peri
|
phery.add_to_input_queue('?3/2\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=4500)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?3/2\r\n', ' 1.5 \r\n', 'OK\r\n']
)
def test_division3(self):
self.periphery.add_to_input_queue('?5/3\r\n')
op_call_count, cycles, output = self._run_until_OK(max_
|
ops=5100)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?5/3\r\n', ' 1.66666667 \r\n', 'OK\r\n']
)
def test_multiply1(self):
self.periphery.add_to_input_queue('?3*2\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=4500)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?3*2\r\n', ' 6 \r\n', 'OK\r\n']
)
def test_multiply2(self):
self.periphery.add_to_input_queue('?8*-3\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=5100)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?8*-3\r\n', '-24 \r\n', 'OK\r\n']
)
class Test_simple6809_BASIC_NumericFunctions(Test6809_BASIC_simple6809_Base):
def test_ABS(self):
self.periphery.add_to_input_queue('?ABS(-2)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=7900)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?ABS(-2)\r\n', ' 2 \r\n', 'OK\r\n']
)
def test_ATN(self):
self.periphery.add_to_input_queue('?ATN(2)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=17200)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?ATN(2)\r\n', ' 1.10714872 \r\n', 'OK\r\n']
)
def test_COS(self):
self.periphery.add_to_input_queue('?COS(3)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=15000)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?COS(3)\r\n', '-.989992497 \r\n', 'OK\r\n']
)
def test_EXP(self):
self.periphery.add_to_input_queue('?EXP(10)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=14000)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?EXP(10)\r\n', ' 22026.4658 \r\n', 'OK\r\n']
)
def test_FIX(self):
self.periphery.add_to_input_queue('?FIX(-7.4)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=11000)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?FIX(-7.4)\r\n', '-7 \r\n', 'OK\r\n']
)
def test_INT(self):
self.periphery.add_to_input_queue('?INT(-7.4)\r\n')
op_call_count, cycles, output = self._run_until_OK(max_ops=11000)
# print(op_call_count, cycles, output)
self.assertEqual(output,
['?INT(-7.4)\r\n', '-8 \r\n', 'OK\r\n']
|
abhikeshav/ydk-py
|
cisco-ios-xr/ydk/models/cisco_ios_xr/_meta/_Cisco_IOS_XR_infra_syslog_oper.py
|
Python
|
apache-2.0
| 26,188
| 0.017413
|
import re
import collections
from enum import Enum
from ydk._core._dm_meta_info import _MetaInfoClassMember, _MetaInfoClass, _MetaInfoEnum
from ydk.types import Empty, YList, YLeafList, DELETE, Decimal64, FixedBitsDict
from ydk._core._dm_meta_info import ATTRIBUTE, REFERENCE_CLASS, REFERENCE_LIST, REFERENCE_LEAFLIST, REFERENCE_IDENTITY_CLASS, REFERENCE_ENUM_CLASS, REFERENCE_BITS, REFERENCE_UNION
from ydk.errors import YPYError, YPYModelError
from ydk.providers._importer import _yang_ns
_meta_table = {
'SystemMessageSeverityEnum' : _MetaInfoEnum('SystemMessageSeverityEnum', 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper',
{
'message-severity-unknown':'MESSAGE_SEVERITY_UNKNOWN',
'message-severity-emergency':'MESSAGE_SEVERITY_EMERGENCY',
'message-severity-alert':'MESSAGE_SEVERITY_ALERT',
'message-severity-critical':'MESSAGE_SEVERITY_CRITICAL',
'message-severity-error':'MESSAGE_SEVERITY_ERROR',
'message-severity-warning':'MESSAGE_SEVERITY_WARNING',
'message-severity-notice':'MESSAGE_SEVERITY_NOTICE',
'message-severity-informational':'MESSAGE_SEVERITY_INFORMATIONAL',
'message-severity-debug':'MESSAGE_SEVERITY_DEBUG',
}, 'Cisco-IOS-XR-infra-syslog-oper', _yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper']),
'Logging.History' : {
'meta_info' : _MetaInfoClass('Logging.History',
False,
[
_MetaInfoClassMember('message', ATTRIBUTE, 'str' , None, None,
[], [],
''' Syslog Message
''',
'message',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('properties', ATTRIBUTE, 'str' , None, None,
[], [],
''' Syslog Properties
''',
'properties',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'history',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Logging' : {
'meta_info' : _MetaInfoClass('Logging',
False,
[
_MetaInfoClassMem
|
ber('history', REFERENCE_CLASS, 'History' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Logging.History',
|
[], [],
''' Syslog Info
''',
'history',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'logging',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.AnRemoteServers.AnRemoteLogServer' : {
'meta_info' : _MetaInfoClass('Syslog.AnRemoteServers.AnRemoteLogServer',
False,
[
_MetaInfoClassMember('ip-address', ATTRIBUTE, 'str' , None, None,
[], [],
''' IP Address
''',
'ip_address',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('rh-discriminator', ATTRIBUTE, 'str' , None, None,
[], [],
''' Remote-Host Discriminator
''',
'rh_discriminator',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('vrf-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Name
''',
'vrf_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('vrf-severity', ATTRIBUTE, 'str' , None, None,
[], [],
''' VRF Severity
''',
'vrf_severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'an-remote-log-server',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.AnRemoteServers' : {
'meta_info' : _MetaInfoClass('Syslog.AnRemoteServers',
False,
[
_MetaInfoClassMember('an-remote-log-server', REFERENCE_LIST, 'AnRemoteLogServer' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'Syslog.AnRemoteServers.AnRemoteLogServer',
[], [],
''' AN Remote Log Servers
''',
'an_remote_log_server',
'Cisco-IOS-XR-infra-syslog-oper', False),
],
'Cisco-IOS-XR-infra-syslog-oper',
'an-remote-servers',
_yang_ns._namespaces['Cisco-IOS-XR-infra-syslog-oper'],
'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper'
),
},
'Syslog.Messages.Message' : {
'meta_info' : _MetaInfoClass('Syslog.Messages.Message',
False,
[
_MetaInfoClassMember('message-id', ATTRIBUTE, 'int' , None, None,
[(-2147483648, 2147483647)], [],
''' Message ID of the system message
''',
'message_id',
'Cisco-IOS-XR-infra-syslog-oper', True),
_MetaInfoClassMember('card-type', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message card location: 'RP', 'DRP', 'LC', 'SC',
'SP' or 'UNK'
''',
'card_type',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('category', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message category
''',
'category',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('group', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message group
''',
'group',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('message-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Message name
''',
'message_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('node-name', ATTRIBUTE, 'str' , None, None,
[], ['([a-zA-Z0-9_]*\\d+/){1,2}([a-zA-Z0-9_]*\\d+)'],
''' Message source location
''',
'node_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('process-name', ATTRIBUTE, 'str' , None, None,
[], [],
''' Process name
''',
'process_name',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('severity', REFERENCE_ENUM_CLASS, 'SystemMessageSeverityEnum' , 'ydk.models.cisco_ios_xr.Cisco_IOS_XR_infra_syslog_oper', 'SystemMessageSeverityEnum',
[], [],
''' Message severity
''',
'severity',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('text', ATTRIBUTE, 'str' , None, None,
[], [],
''' Additional message text
''',
'text',
'Cisco-IOS-XR-infra-syslog-oper', False),
_MetaInfoClassMember('time-of-day', ATTRIBUTE, 'str' , None, None,
[], [],
''' Time of day of event in DDD MMM DD YYYY HH:MM
:SS format, e.g Wed Apr 01 2009 15:50:26
''',
|
GuessWhoSamFoo/pandas
|
pandas/tests/io/json/test_ujson.py
|
Python
|
bsd-3-clause
| 38,511
| 0
|
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import calendar
import datetime
import decimal
from functools import partial
import locale
import math
import re
import time
import dateutil
import numpy as np
import pytest
import pytz
import pandas._libs.json as ujson
from pandas._libs.tslib import Timestamp
import pandas.compat as compat
from pandas.compat import StringIO, range, u
from pandas import DataFrame, DatetimeIndex, Index, NaT, Series, date_range
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
def _clean_dict(d):
"""
Sanitize dictionary for JSON by converting all keys to strings.
Parameters
----------
d : dict
The dictionary to convert.
Returns
-------
cleaned_dict : dict
"""
return {str(k): v for k, v in compat.iteritems(d)}
@pytest.fixture(params=[
None, # Column indexed by default.
"split",
"records",
"values",
"index"])
def orient(request):
return request.param
@pytest.fixture(params=[None, True])
def numpy(request):
return request.param
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encode_decimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
@pytest.mark.parametrize("ensure_ascii", [True, False])
def test_encode_string_conversion(self, ensure_ascii):
string_input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(string_input,
ensure_ascii=ensure_ascii,
**encode_kwargs)
assert output == expected_output
assert string_input == json.loads(output)
assert string_input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, encode_html_chars=True)
@pytest.mark.parametrize("long_number", [
-4342969734183514, -12345678901234.56789012, -528656961.4399388
])
def test_double_long_numbers(self, long_number):
sut = {u("a"): long_number}
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encode_non_c_locale(self):
lc_category = locale.LC_NUMERIC
# We just need one of these locales to work.
for new_locale in ("it_IT.UTF-8", "Italian_Italy"):
if tm.can_set_locale(new_locale, lc_category):
with tm.set_locale(new_locale, lc_category):
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads("4.78", precise_float=True) == 4.78
break
def test_decimal_decode_test_precise(self):
sut = {u("a"): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encode_double_tiny_exponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
@pytest.mark.parametrize("unicode_key", [
u("key1"), u("بن")
])
def test_encode_dict_with_unicode_keys(self, unicode_key):
unicode_dict = {unicode_key: u("value1")}
assert unicode_dict == ujson.decode(ujson.encode(unicode_dict))
@pytest.mark.parametrize("double_input", [
math.pi,
-math.pi # Should work with negatives too.
])
def test_encode_double_conversion(self, double_input):
output = ujson.encode(double_input)
assert round(double_input, 5) == round(json.loads(output), 5)
assert round(double_input, 5) == round(ujson.decode(output), 5)
def test_encode_with_decimal(self):
decimal_input = 1.0
output = ujson.encode(decimal_input)
assert output == "1.0"
def test_encode_array_of_nested_arrays(self):
nested_input = [[[[]]]] * 20
output = ujson.encode(nested_input)
assert nested_input == json.loads(output)
assert nested_input == ujson.decode(output)
nested_input = np.array(nested_input)
tm.assert_numpy_array_equal(nested_input, ujson.decode(
output, numpy=True, dtype=nested_input.dtype))
def test_encode_array_of_doubles(self):
doubles_input = [31337.31337, 31337.31337,
31337.31337, 31337.31337] * 10
output = ujson.encode(doubles_input)
assert doubles_input == json.loads(output)
assert doubles_input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(doubles_input),
ujson.decode(output, numpy=True))
def test_double_precision(self):
double_input = 30.012345678901234
output = ujson.encode(double_input, double_precision=15)
assert double_input == json.loads(output)
assert double_input == ujson.decode(output
|
)
for double_precision in (3, 9):
|
output = ujson.encode(double_input,
double_precision=double_precision)
rounded_input = round(double_input, double_precision)
assert rounded_input == json.loads(output)
assert rounded_input == ujson.decode(output)
@pytest.mark.parametrize("invalid_val", [
20, -1, "9", None
])
def test_invalid_double_precision(self, invalid_val):
double_input = 30.12345678901234567890
expected_exception = (ValueError if isinstance(invalid_val, int)
else TypeError)
with pytest.raises(expected_exc
|
Pierre-Sassoulas/django-survey
|
survey/exporter/tex/question2tex_chart.py
|
Python
|
agpl-3.0
| 4,424
| 0.000226
|
import logging
from django.conf import settings
from django.utils.translation import gettext_lazy as _
from survey.exporter.tex.question2tex import Question2Tex
LOGGER = logging.getLogger(__name__)
class Question2TexChart(Question2Tex):
"""
This class permit to generate latex code directly from the Question
object.
"""
TEX_SKELETON = """
\\begin{figure}[h!]
\\begin{tikzpicture}
\\pie%s{
%s
}
\\end{tikzpicture}
\\caption{\\label{figure:q%d-%d}%s}
\\end{figure}
"""
def __init__(self, question, **options):
super().__init__(question, **options)
self.pos = options.get("pos")
self.rotate = options.get("rotate")
self.radius = options.get("radius")
self.color = options.get("color")
self.explode = options.get("explode")
self.sum = options.get("sum")
self.after_number = options.get("after_number")
self.before_number = options.get("before_number")
self.scale_font = options.get("scale_font")
self.text = options.get("text")
self.style = options.get("style")
self.type = options.get("type")
# This permit to label correctly multiple charts so we do not have the
# same label for each chart
self.latex_label = options.get("latex_label", 1)
def get_colors(self):
"""Return a formatted string for a tikz pgf-pie chart."""
colors = []
for answer in self.cardinality:
answer = Question2Tex.get_clean_answer(answer)
try:
colors.append(self.color[answer])
except (KeyError, ValueError):
msg = "Color for '%s' not provided. You could " % answer
msg += "add '%s: \"red!50\"', in your color config." % answer
LOGGER.warning(msg)
colors.append(settings.SURVEY_DEFAULT_PIE_COLOR)
return "{%s}" % ", ".join(colors)
def get_results(self):
"""Return a formatted string for a tikz pgf-pie chart."""
pie = ""
for answer, cardinality in list(self.cardinality.items()):
if not answer:
ans = _("Left blank")
ans = Question2Tex.get_clean_answer(answer)
pie += f"{cardinality}/{ans},"
if not pie:
return ""
final_answers = []
for answer in pie.split(","):
|
if answer:
final_answers.append(answer)
return
|
" {}".format(",\n ".join(final_answers))
def get_pie_options(self): # noqa: C901
r"""Return the options of the pie for: \pie[options]{data}"""
options = ""
if self.pos:
options += "pos={%s}," % self.pos
if self.explode:
options += "explode={%s}," % self.explode
if self.rotate:
options += f"rotate={self.rotate},"
if self.radius:
options += f"radius={self.radius},"
if self.color:
options += f"color={self.get_colors()},"
if self.sum:
options += f"sum={self.sum},"
if self.after_number:
options += f"after number={self.after_number},"
if self.before_number:
options += f"before number={self.before_number},"
if self.scale_font:
options += "scale font, "
if self.text:
options += f"text={self.text},"
if self.style:
options += f"style={self.style},"
if self.type and self.type != "pie":
options += f"{self.type},"
# Removing last ','
options = options[:-1]
if options:
return f"[{options}]"
return ""
def get_caption_specifics(self):
return "{} '{}' ".format(_("for the question"), Question2Tex.html2latex(self.question.text))
def tex(self):
"""Return a pfg-pie pie chart of a question.
You must use pgf-pie in your latex file for this to works ::
\\usepackage{pgf-pie}
See http://pgf-pie.googlecode.com/ for detail and arguments doc."""
results = self.get_results()
if not results:
return str(_("No answers for this question."))
return Question2TexChart.TEX_SKELETON % (
self.get_pie_options(),
results,
self.question.pk,
self.latex_label,
self.get_caption(),
)
|
dsarkozi/care4care-sdp-grp4
|
Care4Care/C4CApplication/views/RemoveFavoriteRedirectView.py
|
Python
|
agpl-3.0
| 926
| 0.007559
|
from django.views.decorators.cache import neve
|
r_cache
from django.views.generic.base import RedirectView
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse_lazy
from
|
C4CApplication.views.utils import create_user
class RemoveFavoriteRedirectView(RedirectView):
url = reverse_lazy("favorites")
user= None
def dispatch(self, request, *args, **kwargs):
if 'email' not in self.request.session:
raise PermissionDenied # HTTP 403
self.user = create_user(self.request.session['email'])
return super(RemoveFavoriteRedirectView, self).dispatch(request, *args, **kwargs)
@never_cache
def get(self, request, *args, **kwargs):
favorite_mail = kwargs['pk']
self.user.remove_favorite(favorite_mail)
return super(RemoveFavoriteRedirectView, self).get(request, *args, **kwargs)
|
FedoraScientific/salome-paravis
|
test/VisuPrs/GaussPoints/C1.py
|
Python
|
lgpl-2.1
| 2,013
| 0.002981
|
# Copyright (C) 2010-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Less
|
er General Public
# License along with this library; if not,
|
write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# This case corresponds to: /visu/GaussPoints/C1 case
# Create Gauss Points on the field of the MED file
import os
import sys
from paravistest import datadir, pictureext, get_picture_dir
from presentations import GaussPointsOnField, EntityType, get_time, process_prs_for_test
import pvserver as paravis
import pvsimple
# Directory for saving snapshots
picturedir = get_picture_dir("GaussPoints/C1")
if not picturedir.endswith(os.sep):
picturedir += os.sep
# MED file
file_name = datadir + "T_COUPLEX1.med"
field_name = "Conc. I129"
timestamp_nb = -1 # last timestamp
paravis.OpenDataFile(file_name)
med_reader = pvsimple.GetActiveSource()
if med_reader is None:
raise RuntimeError("File wasn't imported!!!")
# Create Gauss Points presentation
prs = GaussPointsOnField(med_reader, EntityType.CELL, field_name, timestamp_nb)
if prs is None:
raise RuntimeError, "Created presentation is None!!!"
# Display presentation and get snapshot
view = pvsimple.GetRenderView()
time = get_time(med_reader, timestamp_nb)
pic_name = picturedir + field_name + "_" + str(time) + "_GAUSSPOINTS." + pictureext
process_prs_for_test(prs, view, pic_name)
|
sburnett/seattle
|
deploymentscripts/attic/deploy_server_final.py
|
Python
|
mit
| 13,882
| 0.01592
|
"""
<Program Name>
deploy_server_final.py
<Started>
July 2009
<Author>
n2k8000@u.washington.edu
Konstantin Pik
<Purpose>
This is a fully automated webserver that handles only certain web requests. The server assumes files exist
in the locations that they're supposed to exist, and the files are placed there by make_summary.py which in turn
draws its files from deploy_main and related scripts.
<Usage>
python deploy_server_final.py
The deploy_server_monitor.py file should start and stop the server for you, so you shouldn't have to
manage the server by yourself.
"""
import os
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
class RequestHandler(BaseHTTPRequestHandler):
"""
<Purpose>
This class is the custom request handler that we'll have our server use.
<Arguments>
BaseHTTPRequestHandler:
The requesthandler object
"""
def do_GET(self):
"""
<Purpose>
Class method to handle GET requests.
<Arguments>
self:
this is the request object.
<Exceptions>
any type of error while sending back a reply.
<Side Effects>
None.
<Returns>
None.
"""
try:
# parse the requested page and see if it's valid
parse_status, explanation_str = self.parse_header(self.path)
# parse_status:
# -1: error
# 0: /log/* request
# 1: /detailed/node/timestamp request
print str(self.parse_header(self.path))
explanation_str = str(explanation_str)
# error
if parse_status == -1:
# invalid header, close the connection and die but notify user
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('Invalid request ('+explanation_str+')')
print '-1'
return
# 1: /detailed/node/timestamp request
elif parse_status == 1:
print '1'
# just need to respond with the file that's contained in explanation_str
# and once we verify that it exists, we're golden
# path to the "detailed" file
file_path = explanation_str
if os.path.isfile(file_path):
try:
# TODO: make HTML here to nav around previous node things
detailed_file_handle = open(file_path, 'r')
self.send_response(200)
self.send_header('Content-type', 'text/plain')
self.end_headers()
self.wfile.write(detailed_file_handle.read())
detailed_file_handle.close()
return
except Exception, e:
print 'Error while sending detailed log file'
print e
return
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('Invalid file request')
return
# 0: /log/* request
elif parse_status == 0:
print '0'
# request was successfull, we just want the filename from index
log_index = explanation_str
success_status, log_filename = self.get_filename_from_index(log_index)
if success_status == -1:
# some kind of error of which the description is stored in log_filename
#sockobj.send('The server encountered an error opening the file, please'+\
# ' try your request again')
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('The server encountered an error opening the file, please'+\
' try your request again')
return
# the file exists!
# just dump the file at this point, and then...
# send the HTML file
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.send_html_file(log_filename, log_index)
return
# invalid type
else:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('Invalid request type 2')
return
except IOError:
self.send_error(404,'File Not Found: %s' % self.path)
return
def do_POST(self):
# POST requests are not handled by our server. just let the user know that.
try:
self.send_response(200)
self.send_header('Content-type', 'text/html')
self.end_headers()
self.wfile.write('Invalid request: POST is not supported')
except :
pass
def make_link_to(self, index, caption):
"""
<Purpose>
This function makes a link to the index with a caption
<Arguments>
self:
this object
index: (expected int, but can be str)
the index to link to (relative to this one, 0 is most recent)
caption:
the caption for that index
<Exceptions>
None.
<Side Effects>
None.
<Returns>
HTML to be inserted and created for page by page navigation
"""
# index is an int
return '<a href="/log/'+str(index)+'"> '+caption+' '+str(index)+'</a>'
def get_next_index(self, current_index_string):
"""
<Purpose>
Gets the 'next' index to grab.
<Arguments>
self:
this requesthandler object
current_index_string:
the string representation of a number: current index
<Exceptions>
None.
<Side Effects>
None.
<Returns>
Integer. The next index in the series.
"""
# current index is a string, so cast to int
current_index = int(current_index_string)
return current_index+1
def get_previous_index(self, current_index_string):
"""
<Purpose>
Gets the 'previous' index to grab.
<Arguments>
self:
this requesthandler object
current_index_string:
the string representation of a number: current index
<Exceptions>
None.
<Side Effects>
None.
<Returns>
Integer. The previous index in the series.
"""
# current index is a string, so cast to int
current_index = int(current_index_string)
return current_index-1
def print_navigation(self, current_index):
"""
<Purpose>
Prints the navigation on the current page.
<Arguments>
self:
this requesthandler object
current_index:
The current index that this person is on.
<Exceptions>
None.
<Side Effects>
None.
<Returns>
String. HTML representation of the navigation.
"""
# current_index: current index
# the html string we're going to build up
html = ""
html+='<table width="100%"
|
><tr>'
# returns some HTML that are the navigation links at the bottom of the page
previous_index = self.get_previous_index(current_index)
if previous_index != -1:
# not empty, so make a link
html += '<td align="center">'
previous_link = self.make_link
|
_to(previous_index, 'Previous')
html += previous_link+'</td>'
next_index = self.get_next_index(current_index)
if next_index != -1:
html += '<td align="center">'
next_link = self.make_link_to(next_index, 'Next')
html += next_link+'</td>'
html += '</table>'
return html
def read_whole_file(self, file_handle):
"""
<Purpose>
Reads in a whole file given a file handle
<Arguments>
self:
this requesthandler object
file_handle
the file handle of the file to read
<Exceptions>
None.
<Side Effects>
None.
<Returns>
String. The file contents as string.
"""
# reads in a whole file given a file handle
temp_str = ""
for each_line in file_handle.xreadlines():
temp_str += each_line
return temp_str
def send_html_file(self, html_fn, log_index):
"""
<Purpose>
|
marios-zindilis/musicbrainz-django-models
|
musicbrainz_django_models/tests/test_l_artist_release.py
|
Python
|
gpl-2.0
| 451
| 0
|
from django.test import
|
TestCase
from ..models import l_artist_release
class test_l_artist_release(TestCase):
def setUp(self):
"""
Set up the test subject.
"""
self.subject = l_artist_release
|
()
def test__l_artist_release__instance(self):
self.assertIsInstance(self.subject, l_artist_release)
def test__l_artist_release__str(self):
self.assertEqual(str(self.subject), 'L Artist Release')
|
obi-two/Rebelion
|
data/scripts/templates/object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s12_gen1.py
|
Python
|
mit
| 497
| 0.044266
|
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTAT
|
ION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Weapon()
result.template = "object/weapon/melee/sword/crafted_saber/shared_sword_lightsaber_one_handed_s12_gen1.iff"
r
|
esult.attribute_template_id = 10
result.stfName("weapon_name","sword_lightsaber_type12")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result
|
Foxi352/netlib
|
network.py
|
Python
|
gpl-3.0
| 39,974
| 0.003402
|
#!/usr/bin/env python3
# vim: set encoding=utf-8 tabstop=4 softtabstop=4 shiftwidth=4 expandtab
#########################################################################
# Parts Copyright 2016 C. Strassburg (lib.utils) c.strassburg@gmx.de
# Copyright 2017- Serge Wagener serge@wagener.family
#########################################################################
# This file is part of SmartHomeNG
#
# SmartHomeNG is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SmartHomeNG is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SmartHomeNG If not, see <http://www.gnu.org/licenses/>.
#########################################################################
"""
| *** ATTENTION: This is early work in progress. Interfaces are subject to change. ***
| *** DO NOT USE IN PRODUCTION until you know what you are doing ***
|
This library contains the future network classes for SmartHomeNG.
New network functions and utilities are going to be implemented in this library.
This classes, functions and methods are mainly meant to be used by plugin developers
"""
import logging
import re
import ipaddress
import requests
import select
import socket
import threading
import time
import queue
class Network(object):
""" This Class has some usefull static methods that you can use in your projects """
@staticmethod
def is_mac(mac):
"""
Validates a MAC address
:param mac: MAC address
:type string: str
:return: True if value is a MAC
:rtype: bool
"""
mac = str(mac)
if len(mac) == 12:
for c in mac:
try:
if int(c, 16) > 15:
return False
except:
return False
return True
octets = re.split('[\:\-\ ]', mac)
if len(octets) != 6:
return False
for i in octets:
try:
if int(i, 16) > 255:
return False
except:
return False
return True
@staticmethod
def is_ip(string):
"""
Checks if a string is a valid ip-address (v4 or v6)
:param string: String to check
:type string: str
:return: True if an ip, false otherwise.
:rtype: bool
"""
return (Network.is_ipv4(string) or Network.is_ipv6(string))
@staticmethod
def is_ipv4(string):
"""
Checks if a string is a valid ip-address (v4)
:param string: String to check
:type string: str
:return: True if an ip, false otherwise.
:rtype: bool
"""
try:
ipaddress.IPv4Address(string)
return True
except ipaddress.AddressValueError:
return False
@staticmethod
def is_ipv6(string):
"""
Checks if a string is a valid ip-address (v6)
:param string: String to check
:type string: str
:return: True if an ipv6, false otherwise.
:rtype: bool
"""
try:
ipaddress.IPv6Address(string)
return True
except ipaddress.AddressValueError:
return False
@staticmethod
def is_hostname(string):
"""
Checks if a string is a valid hostname
The hostname has is checked to have a valid format
:param string: String to check
:type string: str
:return: True if a hostname, false otherwise.
:rtype: bool
"""
try:
return bool(re.match("^(([a-zA-Z]|[a-zA-Z][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z]|[A-Za-z][A-Za-z0-9\-]*[A-Za-z0-9])$", string))
except TypeError:
return False
@staticmethod
def get_local_ipv4_address():
"""
Get's local ipv4 address of the interface with the default gateway.
Return '127.0.0.1' if no suitable interface is found
:return: IPv4 address as a stri
|
ng
:rtype: string
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('8.8.8.8', 1))
IP = s.getsockname
|
()[0]
except:
IP = '127.0.0.1'
finally:
s.close()
return IP
@staticmethod
def get_local_ipv6_address():
"""
Get's local ipv6 address of the interface with the default gateway.
Return '::1' if no suitable interface is found
:return: IPv6 address as a string
:rtype: string
"""
s = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
try:
s.connect(('2001:4860:4860::8888', 1))
IP = s.getsockname()[0]
except:
IP = '::1'
finally:
s.close()
return IP
@staticmethod
def ip_port_to_socket(ip, port):
"""
Returns an ip address plus port to a socket string.
Format is 'ip:port' for IPv4 or '[ip]:port' for IPv6
:return: Socket address / IPEndPoint as string
:rtype: string
"""
if Network.is_ipv6(ip):
ip = '[{}]'.format(ip)
return '{}:{}'.format(ip, port)
@staticmethod
def ipver_to_string(ipver):
"""
Converts a socket address family to an ip version string 'IPv4' or 'IPv6'
:param ipver: Socket family
:type ipver: socket.AF_INET or socket.AF_INET6
:return: 'IPv4' or 'IPv6'
:rtype: string
"""
return 'IPv6' if ipver == socket.AF_INET6 else 'IPv4'
class Http(object):
"""
Creates an instance of the Http class.
:param baseurl: base URL used everywhere in this instance (example: http://www.myserver.tld)
:type baseurl: str
"""
def __init__(self, baseurl=None):
self.logger = logging.getLogger(__name__)
self.baseurl = baseurl
self._response = None
self.timeout = 10
def get_json(self, url=None, params=None):
"""
Launches a GET request and returns JSON answer as a dict or None on error.
:param url: Optional URL to fetch from. If None (default) use baseurl given on init.
:param params: Optional dict of parameters to add to URL query string.
:type url: str
:type params: dict
:return: JSON answer decoded into a dict or None on whatever error occured
:rtype: dict | None
"""
self.__get(url=url, params=params)
json = None
try:
json = self._response.json()
except:
self.logger.warning("Invalid JSON received from {} !".format(url if url else self.baseurl))
return json
def get_text(self, url=None, params=None, encoding=None, timeout=None):
"""
Launches a GET request and returns answer as string or None on error.
:param url: Optional URL to fetch from. Default is to use baseurl given to constructor.
:param params: Optional dict of parameters to add to URL query string.
:param encoding: Optional encoding of the received text. Default is to let the lib try to figure out the right encoding.
:type url: str
:type params: dict
:type encoding: str
:return: Answer decoded into a string or None on whatever error occured
:rtype: str | None
"""
_text = None
if self.__get(url=url, params=params, timeout=timeout):
try:
if encoding:
self._response.encoding = encoding
_text = self._response.text
except:
self.logger.error("Successfull GET, but decoding response failed. This should
|
VaclavDedik/infinispan-py
|
tests/func/server.py
|
Python
|
mit
| 2,231
| 0
|
# -*- coding: utf-8 -*-
import os
import signal
import time
import subprocess
import pytest
class Mode(object):
STANDALONE = "standalone.sh"
DOMAIN = "domain.sh"
class InfinispanServer(object):
DOWNLOAD_URL = "http://downloads.jboss.org/infinispan/%s.Final/"
ZIP_NAME = "infinispan-server-%s.Final-bin.zip"
DIR_NAME = "infinispan-server-%s.Final"
def __init__(self, version="8.2.5", mode=Mode.STANDALONE):
self.version = version
self.mode = mode
self.process = None
if pytest.config.getoption("--domain"):
self.mode = Mode.DOMAIN
this_dir = os.path.dirname(os.path.realpath(__file__))
server_dir = os.path.join(this_dir, "server")
zip_name = self.ZIP_NAME % version
dir_name = self.DIR_NAME % version
self.dir_path = os.path.join(server_dir, dir_name)
url = (self.DOWNLOAD_URL + zip_name) % version
print("Downloading and unzipping %s" % zip_name)
download_script = os.path.join(this_dir, "download_server.sh")
ret = subprocess.call(
[download_script, url, zip_name, server_dir, dir_name])
if ret != 0:
raise RuntimeError("Failed to download %s" % zip_name)
def start(self):
if self.process:
|
raise RuntimeError("Server already running")
launch_script = os.path.join(self.dir_path, "bin", self.mode)
self.process = subprocess.Popen(
[launch_script], shell=True, preexec_fn=os.setsid)
time.sleep(self._get_wait_time())
def stop
|
(self):
if not self.process:
raise RuntimeError("Server is already stopped")
os.killpg(os.getpgid(self.process.pid), signal.SIGTERM)
time.sleep(self._get_wait_time(start=False))
self.process = None
def kill(self):
if not self.process:
raise RuntimeError("Server is already stopped")
os.killpg(os.getpgid(self.process.pid), signal.SIGKILL)
self.process = None
def _get_wait_time(self, start=True):
t = 5 if start else 2
if self.mode == Mode.DOMAIN:
t *= 4
if pytest.config.getoption("--waitlong"):
t *= 3
return t
|
amfoss/fosswebsite
|
workshop/urls.py
|
Python
|
mit
| 2,168
| 0.007841
|
# created by Chirath R, chirath.02@gmail.com
from django.conf.urls import url
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from workshop.views import WorkshopRegistrationListView, WorkshopDetailView, WorkshopRegistrationUpdateView, \
WorkshopRegisterFormView, WorkshopListView, WorkshopFeedbackCreateView, WorkshopGalleryCreateView, \
WorkshopGalleryListView, WorkshopGalleryDeleteView, WorkshopCreateView, WorkshopUpdateV
|
iew, WorkshopDeleteView
urlpatterns = [
url(r'^$', WorkshopListView.as_view(), name='workshop_list'),
url(r'^create/$', login_required(WorkshopCreateView.as_view()), name='workshop_
|
create'),
url(r'^(?P<workshop_id>[0-9]+)/$', WorkshopDetailView.as_view(), name='workshop_detail'),
# TODO(2) Fix update and uncomment
# url(r'^(?P<pk>[0-9]+)/update/$', login_required(WorkshopUpdateView.as_view()), name='workshopdetail_update'),
url(r'^(?P<pk>[0-9]+)/delete/$', login_required(WorkshopDeleteView.as_view()), name='workshop_delete'),
url(r'^(?P<workshop_id>[0-9]+)/register/$', WorkshopRegisterFormView.as_view(), name='workshop_register'),
url(r'^(?P<workshop_id>[0-9]+)/register/list/$',
login_required(WorkshopRegistrationListView.as_view()), name='workshop_registration_list'),
url(r'^(?P<workshop_id>[0-9]+)/register/update/$',
login_required(WorkshopRegistrationUpdateView.as_view()), name='workshop_update'),
url(r'^success/$',
TemplateView.as_view(template_name='workshop/success.html'), name='workshop_registration_success'),
url(r'^(?P<workshop_id>[0-9]+)/feedback/$', WorkshopFeedbackCreateView.as_view(), name='workshop_feedback'),
url(r'^feedback/success/$',
TemplateView.as_view(template_name='workshop/success_feedback.html'), name='feedback_success'),
url(r'^(?P<pk>[0-9]+)/add-image/$', login_required(WorkshopGalleryCreateView.as_view()), name='image_create'),
url(r'^(?P<pk>[0-9]+)/gallery/$', WorkshopGalleryListView.as_view(), name='image_list'),
url(r'^image/(?P<pk>[0-9]+)/delete/$', login_required(WorkshopGalleryDeleteView.as_view()), name='image_delete'),
]
|
missionpinball/mpf
|
mpf/platforms/lisy/__init__.py
|
Python
|
mit
| 33
| 0
|
"""LISY System
|
1/80
|
platform."""
|
stephane-martin/salt-debian-packaging
|
salt-2016.3.3/tests/unit/states/selinux_test.py
|
Python
|
apache-2.0
| 5,176
| 0.000193
|
# -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python libs
from __future__ import absolute_import
# Import Salt Testing Libs
from salttesting import skipIf, TestCase
from salttesting.mock import (
NO_MOCK,
NO_MOCK_REASON,
MagicMock,
patch
)
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt Libs
from salt.states import selinux
selinux.__opts__ = {}
selinux.__salt__ = {}
@skipIf(NO_MOCK, NO_MOCK_REASON)
class SelinuxTestCase(TestCase):
'''
Test cases for salt.states.selinux
'''
# 'mode' function tests: 1
def test_mode(self):
'''
Test to verifies the mode SELinux is running in,
can be set to enforcing or permissive.
'''
ret = {'name': '',
'changes': {},
'result': False,
'comment': ''}
comt = ('unknown is not an accepted mode')
ret.update({'name': 'unknown', 'comment': comt})
self.assertDictEqual(selinux.mode('unknown'), ret)
mock_en = MagicMock(return_value='Enforcing')
mock_pr = MagicMock(side_effect=['Permissive', 'Enforcing'])
with patch.dict(selinux.__salt__,
{'selinux.getenforce': mock_en,
'selinux.setenforce': mock_pr}):
comt = ('SELinux is already in Enforcing mode')
ret.update({'name': 'Enforcing', 'comment': comt, 'result': True})
self.assertDictEqual(selinux.mode('Enforcing'), ret)
with patch.dict(selinux.__opts__, {'test': True}):
comt = ('SELinux mode is set to be changed to Permissive')
ret.update({'name': 'Permissive', 'comment': comt,
'result': None})
self.assertDictEqual(selinux.mode('Permissive'), ret)
with patch.dict(selinux.__opts__, {'test': False}):
comt = ('SELinux has been set to Permissive mode')
ret.update({'name': 'Permissive', 'comment': comt,
'result': True})
self.assertDictEqual(selinux.mode('Permissive'), ret)
comt = ('Failed to set SELinux to Permissive mode')
ret.update({'name': 'Permissive', 'comment': comt,
'result': False})
self.assertDictEqual(selinux.mode('Permissive'), ret)
# 'boolean' function tests: 1
def test_boolean(self):
'''
Test to set up an SELinux boolean.
'''
name = 'samba_create_home_dirs'
value = True
ret = {'name': name,
'changes': {},
'result': False,
'comment': ''}
mock_en = MagicMock(return_value=[])
with patch.dict(selinux.__salt__,
{'selinux.list_sebool': mock_en}):
comt = ('Boolean {0} is not available'.format(name))
ret.update({'comment': comt})
self.assertDictEqual(selinux.boolean(name, value), ret)
mock_bools = MagicMock(return_value={name: {'State': 'on',
'Default': 'on'}})
with patch.dict(selinux.__salt__,
{'selinux.list_sebool': mock_bools}):
comt = ('None is not a valid value for the boolean')
ret.update({'comment': comt})
self.assertDictEqual(selinux.boolean(name, None), ret)
comt = ('Boolean is in the correct state')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(selinux.boolean(name, value, True), ret)
comt = ('Boolean is in the correct state')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(selinux.boolean(name, value), ret)
|
mock_bools = MagicMock(return_value={name: {'State': 'off',
'Default': 'on'}})
mock = MagicMock(side_effect=[True, False]
|
)
with patch.dict(selinux.__salt__,
{'selinux.list_sebool': mock_bools,
'selinux.setsebool': mock}):
with patch.dict(selinux.__opts__, {'test': True}):
comt = ('Boolean samba_create_home_dirs'
' is set to be changed to on')
ret.update({'comment': comt, 'result': None})
self.assertDictEqual(selinux.boolean(name, value), ret)
with patch.dict(selinux.__opts__, {'test': False}):
comt = ('Boolean samba_create_home_dirs has been set to on')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(selinux.boolean(name, value), ret)
comt = ('Failed to set the boolean '
'samba_create_home_dirs to on')
ret.update({'comment': comt, 'result': True})
self.assertDictEqual(selinux.boolean(name, value), ret)
if __name__ == '__main__':
from integration import run_tests
run_tests(SelinuxTestCase, needs_daemon=False)
|
jhunkeler/acstools
|
acstools/acszpt.py
|
Python
|
bsd-3-clause
| 14,427
| 0.000416
|
"""
This module contains a class, :class:`Query`, that was implemented to provide
users with means to programmatically query the
`ACS Zeropoints Calculator <https://acszeropoints.stsci.edu>`_.
The API works by submitting requests to the
ACS Zeropoints Calculator referenced above and hence, it is only valid for ACS
specific instruments (HRC, SBC, or WFC).
The API can be used in two ways by specifying either a
``(date, detector, filter)`` combination or just a ``(date, detector)``
combination. In the first case, the query
will return the zeropoint information for the specific filter and detector at
specified date. In the second case, the query will return the zeropoint
information for all the filters for the desired detector at the specified date.
In either case, the result will be an ``astropy.table.QTable`` where each column
is an ``astropy.units.quantity.Quantity`` object with the appropriate units attached.
Examples
--------
Retrieve the zeropoint information for all the filters on 2016-04-01 for WFC:
>>> from acstools import acszpt
>>> date = '2016-04-01'
>>> detector = 'WFC'
>>> q = acszpt.Query(date=date, detector=detector)
>>> zpt_table = q.fetch()
>>> print(zpt_table)
FILTER PHOTPLAM PHOTFLAM STmag VEGAmag ABmag
Angstrom erg / (Angstrom cm2 s) mag(ST) mag mag(AB)
str6 float64 float64 float64 float64 float64
------ -------- ---------------------- ------- ------- -------
F435W 4329.2 3.148e-19 25.155 25.763 25.665
F475W 4746.2 1.827e-19 25.746 26.149 26.056
F502N 5023.0 5.259e-18 22.098 22.365 22.285
F550M 5581.5 3.99e-19 24.898 24.825 24.856
F555W 5360.9 1.963e-19 25.667 25.713 25.713
F606W 5922.0 7.811e-20 26.668 26.405 26.498
F625W 6312.0 1.188e-19 26.213 25.735 25.904
F658N 6584.0 1.97e-18 23.164 22.381 22.763
F660N 6599.4 5.156e-18 22.119 21.428 21.714
F775W 7693.2 9.954e-20 26.405 25.272 25.667
F814W 8045.0 7.046e-20 26.78 25.517 25.944
F850LP 9033.2 1.52e-19 25.945 24.332 24.858
F892N 8914.8 1.502e-18 23.458 21.905 22.4
Retrieve the zeropoint information for the F435W filter on 2016-04-01 for WFC:
>>> from acstools import acszpt
>>> date = '2016-04-01'
>>> detector = 'WFC'
>>> filt = 'F435W'
>>> q = acszpt.Query(date=date, detector=detector, filter=filt
|
)
>>> zpt_table = q.fetch()
>>> print(zpt_table
|
)
FILTER PHOTPLAM PHOTFLAM STmag VEGAmag ABmag
Angstrom erg / (Angstrom cm2 s) mag(ST) mag mag(AB)
------ -------- ---------------------- ------- ------- -------
F435W 4329.2 3.148e-19 25.155 25.763 25.665
Retrieve the zeropoint information for the F435W filter for WFC at multiple dates:
>>> from acstools import acszpt
>>> dates = ['2004-10-13', '2011-04-01', '2014-01-17', '2018-05-23']
>>> queries = []
>>> for date in dates:
... q = acszpt.Query(date=date, detector='WFC', filt='F435W')
... zpt_table = q.fetch()
... # Each object has a zpt_table attribute, so we save the instance
... queries.append(q)
>>> for q in queries:
... print(q.date, q.zpt_table['PHOTFLAM'][0], q.zpt_table['STmag'][0])
2004-10-13 3.074e-19 erg / (Angstrom cm2 s) 25.181 mag(ST)
2011-04-01 3.138e-19 erg / (Angstrom cm2 s) 25.158 mag(ST)
2014-01-17 3.144e-19 erg / (Angstrom cm2 s) 25.156 mag(ST)
2018-05-23 3.152e-19 erg / (Angstrom cm2 s) 25.154 mag(ST)
>>> type(queries[0].zpt_table['PHOTFLAM'])
astropy.units.quantity.Quantity
"""
import datetime as dt
import logging
import os
from urllib.request import urlopen
from urllib.error import URLError
import astropy.units as u
from astropy.table import QTable
from bs4 import BeautifulSoup
import numpy as np
__taskname__ = "acszpt"
__author__ = "Nathan Miles"
__version__ = "1.0"
__vdate__ = "22-Jan-2019"
__all__ = ['Query']
# Initialize the logger
logging.basicConfig()
LOG = logging.getLogger(f'{__taskname__}.Query')
LOG.setLevel(logging.INFO)
class Query:
"""Class used to interface with the ACS Zeropoints Calculator API.
Parameters
----------
date : str
Input date in the following ISO format, YYYY-MM-DD.
detector : {'HRC', 'SBC', 'WFC'}
One of the three channels on ACS: HRC, SBC, or WFC.
filt : str or `None`, optional
One of valid filters for the chosen detector. If no filter is supplied,
all of the filters for the chosen detector will be used:
* HRC:
F220W, F250W, F330W,
F344N, F435W, F475W,
F502N, F550M, F555W,
F606W, F625W, F658N, F660N,
F775W, F814W, F850LP, F892N
* WFC:
F435W, F475W,
F502N, F550M, F555W,
F606W, F625W, F658N, F660N,
F775W, F814W, F850LP, F892N
* SBC:
F115LP, F122M, F125LP,
F140LP, F150LP, F165LP
"""
def __init__(self, date, detector, filt=None):
# Set the attributes
self._date = date
self._detector = detector.upper()
self._filt = filt
self.valid_filters = {
'WFC': ['F435W', 'F475W', 'F502N', 'F550M',
'F555W', 'F606W', 'F625W', 'F658N',
'F660N', 'F775W', 'F814W', 'F850LP', 'F892N'],
'HRC': ['F220W', 'F250W', 'F330W', 'F344N',
'F435W', 'F475W', 'F502N', 'F550M',
'F555W', 'F606W', 'F625W', 'F658N',
'F660N', 'F775W', 'F814W', 'F850LP', 'F892N'],
'SBC': ['F115LP', 'F122M', 'F125LP',
'F140LP', 'F150LP', 'F165LP']
}
self._zpt_table = None
# Set the private attributes
if filt is None:
self._url = ('https://acszeropoints.stsci.edu/results_all/?'
f'date={self.date}&detector={self.detector}')
else:
self._filt = filt.upper()
self._url = ('https://acszeropoints.stsci.edu/results_single/?'
f'date1={self.date}&detector={self.detector}'
f'&{self.detector}_filter={self.filt}')
# ACS Launch Date
self._acs_installation_date = dt.datetime(2002, 3, 7)
# The farthest date in future that the component and throughput files
# are valid for. If input date is larger, extrapolation is not valid.
self._extrapolation_date = dt.datetime(2021, 12, 31)
self._msg_div = '-' * 79
self._valid_detectors = ['HRC', 'SBC', 'WFC']
self._response = None
self._failed = False
self._data_units = {
'FILTER': u.dimensionless_unscaled,
'PHOTPLAM': u.angstrom,
'PHOTFLAM': u.erg / u.cm ** 2 / u.second / u.angstrom,
'STmag': u.STmag,
'VEGAmag': u.mag,
'ABmag': u.ABmag
}
self._block_size = len(self._data_units)
@property
def date(self):
"""The user supplied date. (str)"""
return self._date
@property
def detector(self):
"""The user supplied detector. (str)"""
return self._detector
@property
def filt(self):
"""The user supplied filter, if one was given. (str or `None`)"""
return self._filt
@property
def zpt_table(self):
"""The results returned by the ACS Zeropoint Calculator. (`astropy.table.QTable`)"""
return self._zpt_table
def _check_inputs(self):
"""Check the inputs to ensure they are valid.
Returns
-------
status : bool
True if all inputs are valid, False if one is not.
"""
valid_detector = True
valid_filter = True
valid_date = True
# Determine the submitted detector is valid
if self.detector not in self._valid_detectors:
msg = (f'{self.detector} is not a valid detector option.\n'
'Please choose one of the following:\n'
f
|
indictranstech/focal-erpnext
|
projects/doctype/project/project.py
|
Python
|
agpl-3.0
| 2,450
| 0.023673
|
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils import flt, getdate
from frappe import _
from erpnext.utilities.transaction_base import delete_events
from frappe.model.document import Document
class Project(Document):
def get_gross_profit(self):
pft, per_pft =0, 0
pft = flt(self.project_value) - flt(self.est_material_cost)
#if pft > 0:
per_pft = (flt(pft) / flt(self.project_value)) * 100
ret = {'gross_margin_value': pft, 'per_gross_margin': per_pft}
return ret
def validate(self):
"""validate start date before end date"""
if self.project_start_date and self.completion_date:
if getdate(self.completion_date) < getdate(self.project_start_date):
frappe.throw(_("Expected Completion Date can not be less than Project Start Date"))
self.update_milestones_completed()
def update_milestones_completed(self):
if self.project_milestones:
completed = filter(lambda x: x.status=="Completed", self.project_milestones)
self.percent_milestones_completed = len(completed) * 100 / len(self.project_milestones)
def on_update(self):
self.add_calendar_event()
def update_percent_complete(self):
total = frappe.db.sql("""select count(*) from tabTask where project=%s""",
self.name)[0][0]
if total:
completed = frappe.db.sql("""select count(*) from tabTask where
project=%s and status in ('Closed', 'Cancelled')""", self.name)[0][0]
frappe.db.set_value("Project", self.name, "percent_complete",
int(float(completed) / total * 100))
def add_calendar_event(self):
# delete any earlier event for this project
delete_events(self.doctype, self.name)
# add events
for milestone in self.get("project_milestones"):
if milestone.milestone_date:
description = (milestone.milestone or "Milestone") + " for " + self.name
frappe.get_doc({
"doctype": "Event",
"owner": self.owner,
"subject": description,
"description": description,
"starts_on": milestone.milestone_date + " 10:00:00",
"event_type": "Private",
"ref_type": self.doctype,
"ref_name": self.name
}).insert(ignore_per
|
missions=True)
def on_trash(self):
delete_events(self.doctype, self.name)
@frappe.whitelist()
def get_cost_center_name(project_name):
return frappe.db.get_value("Proj
|
ect", project_name, "cost_center")
|
retr0h/molecule
|
molecule/util.py
|
Python
|
mit
| 5,717
| 0.00035
|
# Copyright (c) 2015-2017 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import fnmatch
import jinja2
import os
import re
import sys
import colorama
import yaml
from molecule import logger
LOG = logger.get_logger(__name__)
colorama.init(autoreset=True)
def print_debug(title, data):
title = 'DEBUG: {}'.format(title)
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK, title,
colorama.Fore.RESET, colorama.Back.RESET, colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT, data,
colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
def print_environment_vars(env):
"""
Print ``Ansible`` and ``Molecule`` environment variables and returns None.
:param env: A dict containing the shell's environment as collected by
``os.environ``.
:return: None
"""
ansible_env = {k: v for (k, v) in env.items() if 'ANSIBLE_' in k}
print_debug('ANSIBLE ENVIRONMENT', safe_dump(ansible_env))
molecule_env = {k: v for (k, v) in env.items() if 'MOLECULE_' in k}
print_debug('MOLECULE ENVIRONMENT', safe_dump(molecule_env))
def sysexit(code=1):
sys.exit(code)
def sysexit_with_message(msg, code=1):
LOG.critical(msg)
sysexit(code)
def run_command(cmd, debug=False):
"""
Execute the given command and returns None.
:param cmd: A `sh.Command` object to execute.
:param debug: An optional bool to toggle debug output.
:return: ``sh`` object
"""
if debug:
# WARN(retr0h): Uses an internal ``sh`` data structure to dig
# the environment out of the ``sh.command`` object.
print_environment_vars(cmd._partial_call_args.get('env', {}))
print_debug('COMMAND', str(cmd))
return cmd()
def os_walk(directory, pattern):
for root, _, files in os.walk(directory):
for basename in files:
if fnmatch.fnmatch(basename, pattern):
filename = os.path.join(root, basename)
yield filename
def render_template(template, **kwargs):
t = jinja2.Environment()
t = t.from_string(template)
return t.render(kwargs)
def write_file(filename, content):
"""
Writes a file with the given filename and content and returns None.
:param filename: A string containing the target filename.
:param content: A string containing the data to be written.
:return: None
"""
with open(filename, 'w') as f:
f.write(content)
file_prepender(filename)
def file_prepender(filename):
"""
Prepend an informational hea
|
der on files managed by Molecule and returns
None.
:param filename: A string containing the target filename.
:return: None
"""
molecule_header = '# Molecule managed\n\n'
with open(fi
|
lename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(molecule_header + content)
def safe_dump(data):
"""
Dump the provided data to a YAML document and returns a string.
:param data: A string containing an absolute path to the file to parse.
:return: str
"""
# TODO(retr0h): Do we need to encode?
# yaml.dump(data) produces the document as a str object in both python
# 2 and 3.
return yaml.safe_dump(data, default_flow_style=False, explicit_start=True)
def safe_load(string):
"""
Parse the provided string returns a dict.
:param string: A string to be parsed.
:return: dict
"""
return yaml.safe_load(string) or {}
def safe_load_file(filename):
"""
Parse the provided YAML file and returns a dict.
:param filename: A string containing an absolute path to the file to parse.
:return: dict
"""
with open(filename, 'r') as stream:
return safe_load(stream)
def instance_with_scenario_name(instance_name, scenario_name):
return '{}-{}'.format(instance_name, scenario_name)
def strip_ansi_escape(string):
return re.sub(r'\x1b[^m]*m', '', string)
def strip_ansi_color(s):
# Taken from tabulate
invisible_codes = re.compile('\x1b\[\d*m')
return re.sub(invisible_codes, '', s)
def verbose_flag(options):
verbose = 'v'
verbose_flag = []
for i in range(0, 3):
if options.get(verbose):
verbose_flag = ['-{}'.format(verbose)]
del options[verbose]
if options.get('verbose'):
del options['verbose']
break
verbose = verbose + 'v'
return verbose_flag
def title(word):
return ' '.join(x.capitalize() or '_' for x in word.split('_'))
|
dannywxh/mypy
|
MyPys/common.py
|
Python
|
apache-2.0
| 3,687
| 0.034014
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, re, time, sys
import hashlib, bencode
import requests
from bs4 import BeautifulSoup
reload(sys)
#print sys.getdefaultencoding()
#sys.setdefaultencoding('utf-8')
print sys.getdefaultencoding()
def parse_tor(file):
bt_path = {}
bt_file = open(file, 'rb')
bt_info = bencode.bdecode(bt_file.read()).get('info')
bt_info_hash_hex = hashlib.sha1(bencode.bencode(bt_info)).hexdigest()
bt_file_size = bt_info.get('length')
bt_file_name = bt_info.get('name')
bt_path[bt_file_name]=bt_file_size
print bt_path
bt_file.close()
#提取无码的格式,如 082516-001
def format_rule1(s):
pattern="\d{6}-\d{3}|\d{6}-\d{2}|\d{6}_\d{3}|\d{6}_\d{2}"
rs=re.findall(pattern, s);
if len(rs)>=1:
return rs[0]
else:
return ""
def format_rule2(s):
rs=''
#匹配开头是数字,判断是非wm编号
wm=re.findall(r'^\d+',s)
if len(wm)==1: #是wm
rs=s[0:10]
return rs
# 如:mide-267FHD_ok_0001.mp4
#查找所有的非数字,['mide-', 'FHD_ok_', '.mp']
#第一个元素就是"mide-"
alpha_list=re.findall(r'\D+', s)
if len(alpha_list)>0:
rs+=alpha_list[0]
#查找所有的数字,['267', '0001', '4']
#第一个元素就是"267"
num_list=re.findall(r'\d+', s)
if len(num_list)>0:
rs+=num_list[0]
if rs=='':
rs=s
rs=rs.replace("-","")
rs=rs.replace(" ","")
rs=rs.replace("_","")
rs=rs.lower()
return rs
#for test
def format_torrent(path):
for x in os.listdir(path):
print format_rule2(x)
def walkpath(path):
#files= [(dirpath,filenames) for dirpath,dirname,filenames in os.walk(path)]
files= []
for dirpath,dirname,filenames in os.walk(path.decode('utf-8')):
for filename in filenames:
files.append((filename,dirpath))
return files
def walkfile(path):
files=[x for x in os.
|
listdir(path) if all([os.path.splitext(x)[1]=='.txt', not os.path.isdir(path+"\\"+x)])]
# txtfile=[f for f in files if os.path.splitext(f)[1]=='.txt']
store=[]
for txtfile in files:
for line in open(path+"/"+txtfile):
p,f=os.path.split(line)
store.append((f.replace("\n","")
|
,txtfile))
return store
#����list�ԱȺ��Ĺ��ܣ������ܵ���
def comparelist(src,des):
#src: ["file"]
#des:[("file","path")]
from collections import defaultdict
dic=defaultdict(list)
for x in src:
for a,b in des:
#print x,a,b
if format_rule2(x)==format_rule2(a):
dic[x].append(os.path.join(b,a))
return dic
def download(url):
headers = {'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate, compress',
'Accept-Language': 'en-us;q=0.5,en;q=0.3',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:22.0) Gecko/20100101 Firefox/22.0'}
print "download from "+url+"\n"
try:
response = requests.get(url=url,headers=headers,timeout=5) # 最基本的GET请求
return response
except Exception,e:
print e
#print "status_code",response.status_code
|
filippog/pysnmp
|
pysnmp/entity/rfc3413/ntfrcv.py
|
Python
|
bsd-3-clause
| 4,010
| 0.001746
|
#
# This file is part of pysnmp software.
#
# Copyright (c) 2005-2016, Ilya Etingof <ilya@glas.net>
# License: http://pysnmp.sf.net/license.html
#
import sys
from pyasn1.compat.octets import null
from pysnmp.proto import rfc3411, error
from pysnmp.proto.api import v1, v2c # backend is always SMIv2 compliant
from pysnmp.proto.proxy import rfc2576
from pysnmp import debug
# 3.4
class NotificationReceiver:
pduTypes = (v1.TrapPDU.tagSet, v2c.SNMPv2TrapPDU.tagSet,
v2c.InformRequestPDU.tagSet)
def __init__(self, snmpEngine, cbFun, cbCtx=None):
snmpEngine.msgAndPduDsp.registerContextEngineId(
null, self.pduTypes, self.processPdu # '' is a wildcard
)
self.__cbFunVer = 0
self.__cbFun = cbFun
self.__cbCtx = cbCtx
def close(self, snmpEngine):
snmpEngine.msgAndPduDsp.unregisterContextEngineId(
null, self.pduTypes
)
self.__cbFun = self.__cbCtx = None
def processPdu(self, snmpEngine, messageProcessingModel,
securityModel, securityName, securityLevel,
contextEngineId, contextName, pduVersion, PDU,
maxSizeResponseScopedPDU, stateReference):
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
origPdu = PDU
PDU = rfc2576.v1ToV2(PDU)
else:
origPdu = None
errorStatus = 'noError'
errorIndex = 0
varBinds = v2c.apiPDU.getVarBinds(PDU)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, varBinds %s' % (stateReference, varBinds))
# 3.4
if PDU.tagSet in rfc3411.confirmedClassPDUs:
# 3.4.1 --> no-op
rspPDU = v2c.apiPDU.getResponse(PDU)
# 3.4.2
v2c.apiPDU.setErrorStatus(rspPDU, errorStatus)
v2c.apiPDU.setErrorIndex(rspPDU, errorIndex)
v2c.apiPDU.setVarBinds(rspPDU, varBinds)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, confirm PDU %s' % (stateReference, rspPDU.prettyPrint()))
# Agent-side API complies with SMIv2
if messageProcessingModel == 0:
rspPDU = rfc2576.v2ToV1(rspPDU, origPdu)
statusInformation = {}
# 3.4.3
try:
|
snmpEngine.msgAndPduDsp.returnResponsePdu(
snmpEngine, messageProcessingModel, securityModel,
securityName, securityLevel, contextEngineId,
contextName, pduVersion, rspPDU, maxSizeResponseScopedPDU,
stateReference, statusInformation)
except error.StatusInformation:
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, statusInforma
|
tion %s' % (stateReference, sys.exc_info()[1]))
snmpSilentDrops, = snmpEngine.msgAndPduDsp.mibInstrumController.mibBuilder.importSymbols('__SNMPv2-MIB', 'snmpSilentDrops')
snmpSilentDrops.syntax += 1
elif PDU.tagSet in rfc3411.unconfirmedClassPDUs:
pass
else:
raise error.ProtocolError('Unexpected PDU class %s' % PDU.tagSet)
debug.logger & debug.flagApp and debug.logger('processPdu: stateReference %s, user cbFun %s, cbCtx %s, varBinds %s' % (stateReference, self.__cbFun, self.__cbCtx, varBinds))
if self.__cbFunVer:
self.__cbFun(snmpEngine, stateReference, contextEngineId,
contextName, varBinds, self.__cbCtx)
else:
# Compatibility stub (handle legacy cbFun interface)
try:
self.__cbFun(snmpEngine, contextEngineId, contextName,
varBinds, self.__cbCtx)
except TypeError:
self.__cbFunVer = 1
self.__cbFun(snmpEngine, stateReference, contextEngineId,
contextName, varBinds, self.__cbCtx)
|
nmalkin/multimail
|
multimail.py
|
Python
|
gpl-3.0
| 4,444
| 0.006076
|
#!/usr/bin/env python
import csv
import getopt
import os
import sys
from ConfigParser import ConfigParser
from tempfile import NamedTemporaryFile
# Defaults
VERBOSE = True
CONFIG_SECTION = 'common'
DEFAULT_CONFIG_FILE = 'batch.cfg'
DEFAULT_RECIPIENTS_FILE = 'recipients.csv'
DEFAULT_MESSAGE_FILE = 'message.txt'
# Commands
CMD = 'mutt -s "%(subject)s" %(extras)s -- "%(recipient)s" < "%(message-file)s"'
CC = '-c "%(cc)s" '
BCC = '-b "%(bcc)s" '
ATTACH = '-a %(attachment)s '
def log(message):
""" Prints given message if VERBOSE is set to True.
"""
if VERBOSE:
print(message)
def get_message(filename):
""" Reads the given filename and returns its contents as a string.
"""
f = open(filename, 'r')
message = ''.join(f.readlines())
f.close()
return message
def personalize_message(message, personalizations):
""" Returns a copy of the message with placholders replaced with personalizations.
Placeholders take the form <0>, <1>, <2>, ...
Personalizations are given as a list of strings.
"""
for i in range(len(personalizations)):
placeholder = '<' + str(i) + '>' # e.g., <1>
message = message.replace(placeholder, personalizations[i])
return message
def get_options(filename):
""" Parses config file for subject, cc, bcc, attachment,
returns these as a 4-tuple, in that order.
"""
config = ConfigParser()
config.read(filename)
# Get the options. They're all in the 'common' section.
subject = config.get(CONFIG_SECTION, 'subject')
cc = config.get(CONFIG_SECTION, 'cc')
bcc = config.get(CONFIG_SECTION, 'bcc')
attachment = config.get(CONFIG_SECTION, 'attachment')
return (subject, cc, bcc, attachment)
def get_personalizations(filename):
""" Reads a CSV file with addresses and personalizations.
The first column is the recipient's address;
the remaining columns are the personalizations.
This function returns a list of tuples (recipient, [personalizations]).
"""
personalizations = []
reader = csv.reader(open(filename, 'r'))
for row in reader:
personalizations.append(row)
return personalizations
def send_message(message, subject, recipient, cc='', bcc='', attachment=''):
""" Sends message with given options.
"""
log('Sending message to: %s' % recipient)
tmp = NamedTemporaryFile('w')
tmp.write(message)
tmp.flush()
extras = ''
if cc != '':
extras += CC % {'cc': cc}
if bcc != '':
extras += BCC % {'bcc': bcc}
if attachment != '':
extras += ATTACH % {'attachment': attachment}
run = CMD % \
{'subject' : subject, \
'extras' : extras, \
'recipient' : recipient, \
'message-file' : tmp.name}
os.system(run)
tmp.close()
def multimail(config_filename, personalization_filename, message_filename):
message = get_message(message_filename)
personalizations = get_personalizations(personalization_filename)
(subject, cc, bcc, attachment) = get_options(config_filename)
for recipient in personalizations:
# The first column for each recipient is the email address.
address = recipient[0]
# Personalize message
personalized_message = personalize_message(message, recipient)
# The same pattern matching is applied to the attachment.
attachment_matched = personalize_message(attachment, recipient) if attachment != '' else ''
# Send the message!
send_message(person
|
alized_message, subject, address, cc, bcc, attachment_matched)
def main():
config_file = DEFAULT_CONFIG_FILE
recipients_file = DEFAULT_RECIPIENTS_FILE
message_file = DEFAULT_MESSA
|
GE_FILE
# Read command-line options
try:
opts, args = getopt.getopt(sys.argv[1:], 'c:r:m:s', ['config=', 'recipients=', 'message=', 'silent'])
except getopt.GetoptError, err:
print(str(err))
for opt, val in opts:
if opt in ('-c', '--config'):
config_file = val
elif opt in ('-r', '--recipients'):
recipients_file = val
elif opt in ('-m', '--message'):
message_file = val
elif opt in ('-s', '--silent'):
VERBOSE = False
multimail(config_file, recipients_file, message_file)
if __name__=="__main__":
main()
|
midfies/django-imager
|
imagersite/imager_images/admin.py
|
Python
|
mit
| 728
| 0
|
from django.contrib import admin
from imager_images.models import Photo, Album
# class PhotoAdmin(admin.ModelAdmin):
# list_display = ("title", "description")
# fields = ('title',
# 'description',
# 'published',
# 'owner')
# class AlbumAdmin(admin.ModelAdmin):
# list_display = ("title", "description")
# filter_horizontal = ('photos',)
# fields = ('title',
# 'description',
# 'published',
#
|
'owner')
admin.site.register(Photo)
class AlbumInline(admin.TabularInline):
model = Album.photos.through
@admin.register(Album)
class AlbumAdmin(admin.ModelAdmin):
inlines = (AlbumInline,
|
)
exclude = ('photos',)
|
IT-PM-OpenAdaptronik/Webapp
|
apps/calc/apps.py
|
Python
|
mit
| 88
| 0
|
fr
|
om django.apps import AppConfig
class CalcConfig(AppConfig):
name = 'apps.c
|
alc'
|
Mellthas/quodlibet
|
quodlibet/qltk/renamefiles.py
|
Python
|
gpl-2.0
| 18,543
| 0
|
# Copyright 2004-2005 Joe Wreschnig, Michael Ur
|
man, Iñigo Serna
# 2020 Nick Boultbee
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
import os
import re
import unicodedata
import glob
import shutil
from gi.repository import Gtk, Gdk
from senf import fsn2text, text2fsn
import quodlibet
from quodlibet import qltk
from quodlibet impo
|
rt util
from quodlibet import config
from quodlibet import _
from quodlibet.plugins import PluginManager
from quodlibet.pattern import FileFromPattern
from quodlibet.pattern import ArbitraryExtensionFileFromPattern
from quodlibet.qltk._editutils import FilterPluginBox, FilterCheckButton
from quodlibet.qltk._editutils import EditingPluginHandler
from quodlibet.qltk.views import TreeViewColumn
from quodlibet.qltk.cbes import ComboBoxEntrySave
from quodlibet.qltk.ccb import ConfigCheckButton
from quodlibet.qltk.models import ObjectStore
from quodlibet.qltk import Icons, Button, Frame
from quodlibet.qltk.wlw import WritingWindow
from quodlibet.util import connect_obj
from quodlibet.util.path import strip_win32_incompat_from_path
from quodlibet.util.dprint import print_d, print_e
NBP = os.path.join(quodlibet.get_user_dir(), "lists", "renamepatterns")
NBP_EXAMPLES = """\
<tracknumber>. <title>
<tracknumber|<tracknumber>. ><title>
<tracknumber> - <title>
<tracknumber> - <artist> - <title>
/path/<artist> - <album>/<tracknumber>. <title>
~/<artist>/<album>/<tracknumber> - <title>
<albumartist|<albumartist>|<artist>>/(<~year>) <album>\
/<tracknumber|<tracknumber> - ><title>"""
class SpacesToUnderscores(FilterCheckButton):
_label = _("Replace spaces with _underscores")
_section = "rename"
_key = "spaces"
_order = 1.0
def filter(self, original, filename):
return filename.replace(" ", "_")
class ReplaceColons(FilterCheckButton):
_label = _("Replace [semi]colon delimiting with hyphens")
_tooltip = _('e.g. "iv: allegro.flac" → "iv - allegro.flac"')
_section = "rename"
_key = "colons"
_order = 1.05
def __init__(self):
super().__init__()
# If on Windows, force this to be inactive (and hidden)
if os.name == 'nt':
self.set_active(False)
self.set_sensitive(False)
self.set_no_show_all(True)
def filter(self, original, filename):
regx = re.compile(r'\s*[:;]\s+\b')
return regx.sub(" - ", filename)
class StripWindowsIncompat(FilterCheckButton):
_label = _("Strip _Windows-incompatible characters")
_section = "rename"
_key = "windows"
_order = 1.1
def __init__(self):
super().__init__()
# If on Windows, force this to be inactive (and hidden)
if os.name == 'nt':
self.set_active(False)
self.set_sensitive(False)
self.set_no_show_all(True)
def filter(self, original, filename):
return strip_win32_incompat_from_path(filename)
class StripDiacriticals(FilterCheckButton):
_label = _("Strip _diacritical marks")
_section = "rename"
_key = "diacriticals"
_order = 1.2
def filter(self, original, filename):
return u"".join(filter(lambda s: not unicodedata.combining(s),
unicodedata.normalize('NFKD', filename)))
class StripNonASCII(FilterCheckButton):
_label = _("Strip non-_ASCII characters")
_section = "rename"
_key = "ascii"
_order = 1.3
def filter(self, original, filename):
return u"".join(map(lambda s: (s <= "~" and s) or u"_", filename))
class Lowercase(FilterCheckButton):
_label = _("Use only _lowercase characters")
_section = "rename"
_key = "lowercase"
_order = 1.4
def filter(self, original, filename):
return filename.lower()
class RenameFilesPluginHandler(EditingPluginHandler):
from quodlibet.plugins.editing import RenameFilesPlugin
Kind = RenameFilesPlugin
class Entry:
def __init__(self, song):
self.song = song
new_name = None
"""new name as unicode or None if not set"""
@property
def name(self):
return fsn2text(self.song("~basename"))
class RenameFiles(Gtk.VBox):
title = _("Rename Files")
FILTERS = [SpacesToUnderscores, ReplaceColons, StripWindowsIncompat,
StripDiacriticals, StripNonASCII, Lowercase]
handler = RenameFilesPluginHandler()
IMAGE_EXTENSIONS = ['jpg', 'jpeg', 'png', 'bmp']
@classmethod
def init_plugins(cls):
PluginManager.instance.register_handler(cls.handler)
def __init__(self, parent, library):
super().__init__(spacing=6)
self.__skip_interactive = False
self.set_border_width(12)
hbox = Gtk.HBox(spacing=6)
cbes_defaults = NBP_EXAMPLES.split("\n")
self.combo = ComboBoxEntrySave(NBP, cbes_defaults,
title=_("Path Patterns"),
edit_title=_(u"Edit saved patterns…"))
self.combo.show_all()
hbox.pack_start(self.combo, True, True, 0)
self.preview = qltk.Button(_("_Preview"), Icons.VIEW_REFRESH)
self.preview.show()
hbox.pack_start(self.preview, False, True, 0)
self.pack_start(hbox, False, True, 0)
self.combo.get_child().connect('changed', self._changed)
model = ObjectStore()
self.view = Gtk.TreeView(model=model)
self.view.show()
sw = Gtk.ScrolledWindow()
sw.set_shadow_type(Gtk.ShadowType.IN)
sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
sw.add(self.view)
self.pack_start(sw, True, True, 0)
self.pack_start(Gtk.VBox(), False, True, 0)
# rename options
rename_options = Gtk.HBox()
# file name options
filter_box = FilterPluginBox(self.handler, self.FILTERS)
filter_box.connect("preview", self.__filter_preview)
filter_box.connect("changed", self.__filter_changed)
self.filter_box = filter_box
frame_filename_options = Frame(_("File names"), filter_box)
frame_filename_options.show_all()
rename_options.pack_start(frame_filename_options, False, True, 0)
# album art options
albumart_box = Gtk.VBox()
# move art
moveart_box = Gtk.VBox()
self.moveart = ConfigCheckButton(
_('_Move album art'),
"rename", "move_art", populate=True)
self.moveart.set_tooltip_text(
_("See '[albumart] filenames' config entry "
"for image search strings"))
self.moveart.show()
moveart_box.pack_start(self.moveart, False, True, 0)
self.moveart_overwrite = ConfigCheckButton(
_('_Overwrite album art at target'),
"rename", "move_art_overwrite", populate=True)
self.moveart_overwrite.show()
moveart_box.pack_start(self.moveart_overwrite, False, True, 0)
albumart_box.pack_start(moveart_box, False, True, 0)
# remove empty
removeemptydirs_box = Gtk.VBox()
self.removeemptydirs = ConfigCheckButton(
_('_Remove empty directories'),
"rename", "remove_empty_dirs", populate=True)
self.removeemptydirs.show()
removeemptydirs_box.pack_start(self.removeemptydirs, False, True, 0)
albumart_box.pack_start(removeemptydirs_box, False, True, 0)
frame_albumart_options = Frame(_("Album art"), albumart_box)
frame_albumart_options.show_all()
rename_options.pack_start(frame_albumart_options, False, True, 0)
self.pack_start(rename_options, False, True, 0)
# Save button
self.save = Button(_("_Save"), Icons.DOCUMENT_SAVE)
self.save.show()
bbox = Gtk.HButtonBox()
bbox.set_layout(Gtk.ButtonBoxStyle.END)
bbox.pack_start(self.save, True, True, 0)
self.pack_start(bbox, False, True, 0)
render = Gtk.CellRendererText()
column = TreeViewColumn(
|
stonebig/bokeh
|
bokeh/core/property/numeric.py
|
Python
|
bsd-3-clause
| 8,603
| 0.003487
|
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2019, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
''' Provide the numeric properties.
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from .bases import ParameterizedProperty
from .primitive import Int, Float
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'Angle',
'Byte',
'Interval',
'NonNegativeInt',
'Percent',
'Size',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class NonNegativeInt(Int):
""" Accept non-negative integers. """
def validate(self, value, detail=True):
super(NonNegativeInt, self).validate(value, detail)
if not (value is None or value >= 0):
raise ValueError("expected non-negative integer, got %r" % (value))
class Interval(ParameterizedProperty):
''' Accept numeric values that are contained within a given interval.
Args:
interval_type (numeric property):
numeric types for the range, e.g. ``Int``, ``Float``
start (number) :
A minimum allowable value for the range. Values less than
``start`` will result in validation errors.
end (number) :
A maximum allowable value for the range. Values greater than
``end`` will result in validation errors.
Example:
.. code-block:: python
>>> class RangeModel(HasProps):
... prop = Range(Float, 10, 20)
...
>>> m = RangeModel()
>>> m.prop = 10
>>> m.prop = 20
>>> m.prop = 15
>>> m.prop = 2 # ValueError !!
>>> m.prop = 22 # ValueError !!
>>> m.prop = "foo" # ValueError !!
'''
def __init__(self, interval_type, start, end, default=None, help=None):
self.interval_type = self._validate_type_param(interval_type)
# Make up a property name for validation purposes
self.interval_type.validate(start)
self.interval_type.validate(end)
self.start = start
self.end = end
super(Interval, self).__init__(default=default, help=help)
def __str__(self):
return "%s(%s, %r, %r)" % (self.__class__.__name__, self.interval_type, self.start, self.end)
@property
def type_params(self):
return [self.interval_type]
def validate(
|
self, value, detail=True):
super(Interval, self).validate(value, detail)
if not (value is None or self.interval_type.is_valid(value) and value >= self.start and value <= self.end):
msg = "" if not detail else "expected a value of type %s in range [%s, %s], got %r" % (self.interval_type, self.start, self.end, value)
raise ValueError(msg)
class Byte(Interval):
''' Accept integral byte va
|
lues (0-255).
Example:
.. code-block:: python
>>> class ByteModel(HasProps):
... prop = Byte(default=0)
...
>>> m = ByteModel()
>>> m.prop = 255
>>> m.prop = 256 # ValueError !!
>>> m.prop = 10.3 # ValueError !!
'''
def __init__(self, default=0, help=None):
super(Byte, self).__init__(Int, 0, 255, default=default, help=help)
class Size(Float):
''' Accept non-negative numeric values.
Args:
default (float or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
Example:
.. code-block:: python
>>> class SizeModel(HasProps):
... prop = Size()
...
>>> m = SizeModel()
>>> m.prop = 0
>>> m.prop = 10e6
>>> m.prop = -10 # ValueError !!
>>> m.prop = "foo" # ValueError !!
'''
def validate(self, value, detail=True):
super(Size, self).validate(value, detail)
if not (value is None or 0.0 <= value):
msg = "" if not detail else "expected a non-negative number, got %r" % value
raise ValueError(msg)
class Percent(Float):
''' Accept floating point percentage values.
``Percent`` can be useful and semantically meaningful for specifying
things like alpha values and extents.
Args:
default (float or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
Example:
.. code-block:: python
>>> class PercentModel(HasProps):
... prop = Percent()
...
>>> m = PercentModel()
>>> m.prop = 0.0
>>> m.prop = 0.2
>>> m.prop = 1.0
>>> m.prop = -2 # ValueError !!
>>> m.prop = 5 # ValueError !!
'''
def validate(self, value, detail=True):
super(Percent, self).validate(value, detail)
if not (value is None or 0.0 <= value <= 1.0):
msg = "" if not detail else "expected a value in range [0, 1], got %r" % value
raise ValueError(msg)
class Angle(Float):
''' Accept floating point angle values.
``Angle`` is equivalent to :class:`~bokeh.core.properties.Float` but is
provided for cases when it is more semantically meaningful.
Args:
default (float or None, optional) :
A default value for attributes created from this property to
have (default: None)
help (str or None, optional) :
A documentation string for this property. It will be automatically
used by the :ref:`bokeh.sphinxext.bokeh_prop` extension when
generating Spinx documentation. (default: None)
serialized (bool, optional) :
Whether attributes created from this property should be included
in serialization (default: True)
readonly (bool, optional) :
Whether attributes created from this property are read-only.
(default: False)
'''
pass
#-----------------------------------------------------------------------------
# Dev
|
DataMonster/Python
|
exer/zipunzip/zip.py
|
Python
|
unlicense
| 1,230
| 0.034959
|
def zip(*arg):
Result = []
Check = 1
#check if every item in arg has the same length
for i in arg:
if len(i) != len(arg[0]):
print 'please make sure enter all items with the same length'
Check = 0
break
while (Check):
for j in range(0,len(arg[0])):
result = ()
for item in arg:
result = result + (item[j],)
Result.append(result)
Check = 0
return Result
def unzip(x):
Length = len(x[0])
result = ()
LIST = []
for i in range(0,len(x[0])):
LIST.append([],)
for item in x:
for j in range(0,
|
len(LIST)):
LIST[j].append(item[j])
for k in LIST:
result = result + (k,)
return result
def Test():
p
|
rint '#1 test: '
print ' zip([1,1,1],[2,2,2],[3,3,3],[4,4,4]) -->', zip([1,1,1],[2,2,2],[3,3,3],[4,4,4])
print '\n'
print ' unzip([(1,2,3,4,5),(2,3,4,5,6),(3,4,5,6,7)]) -->', unzip([(1,2,3,4,5),(2,3,4,5,6),(3,4,5,6,7)])
print '\n'
print '#2 test: unzip(zip([100,200,300],[200,300,400],[0,0,0]))'
print unzip(zip([100,200,300],[200,300,400], [0,0,0]))
print '\n'
if __name__ == '__main__':
Test()
|
skybon/obozrenie
|
obozrenie/launch_gtk.py
|
Python
|
gpl-3.0
| 1,061
| 0
|
#!/usr/bin/env python3
# This source file is part of Obozrenie
# Copyright 2015 Artem Vorotnikov
# For more information, see https://github.com/obozrenie/obozrenie
# Obozrenie is free s
|
oftware: you can redist
|
ribute it and/or modify
# it under the terms of the GNU General Public License version 3, as
# published by the Free Software Foundation.
# Obozrenie is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Obozrenie. If not, see <http://www.gnu.org/licenses/>.
import os
from obozrenie.global_settings import *
from obozrenie.gtk import *
try:
core_instance = core.Core()
settings_instance = core.Settings(
core_instance, os.path.expanduser(PROFILE_PATH))
app_instance = App(core_instance, settings_instance)
app_instance.run(None)
except Exception as e:
print(e)
|
robocomp/learnbot
|
learnbot_dsl/Clients/Devices/JointMotor.py
|
Python
|
gpl-3.0
| 379
| 0
|
cla
|
ss JointMotor():
def __init__(self, _callDevice, _readDevice):
self._callDevice = _callDevice
self._readDevice = _readDevice
self._angle = 0
def sendAngle(self, _angle):
self._callDevice(_angle)
self._angle = _angle
def getAngle(self):
return self._angle
def read(self):
_angle =
|
self._readDevice()
|
houssemFat/bloodOn
|
bloodon/accounts/social/providers/oauth2/client.py
|
Python
|
mit
| 2,095
| 0.000955
|
try:
from urllib.parse import parse_qsl, urlencode
except ImportError:
from urllib import urlencode
from urlparse import parse_qsl
import requests
class OAuth2Error(Exception):
pass
class OAuth2Client(object):
def __init__(self, request, consumer_key, consumer_secret,
access_token_url
|
,
callback_url,
scope):
self.request = request
self.access_token_url = access_token_url
self.callback_url = callback_url
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.scope = ' '.j
|
oin(scope)
self.state = None
def get_redirect_url(self, authorization_url, extra_params):
params = {
'client_id': self.consumer_key,
'redirect_uri': self.callback_url,
'scope': self.scope,
'response_type': 'code'
}
if self.state:
params['state'] = self.state
params.update(extra_params)
return '%s?%s' % (authorization_url, urlencode(params))
def get_access_token(self, code):
params = {'client_id': self.consumer_key,
'redirect_uri': self.callback_url,
'grant_type': 'authorization_code',
'client_secret': self.consumer_secret,
'scope': self.scope,
'code': code}
url = self.access_token_url
# TODO: Proper exception handling
resp = requests.post(url, params)
access_token = None
if resp.status_code == 200:
# Weibo sends json via 'text/plain;charset=UTF-8'
if (resp.headers['content-type'].split(';')[0] == 'application/json'
or resp.text[:2] == '{"'):
access_token = resp.json()
else:
access_token = dict(parse_qsl(resp.text))
if not access_token or 'access_token' not in access_token:
raise OAuth2Error('Error retrieving access token: %s'
% resp.content)
return access_token
|
interlegis/saap
|
saap/core/migrations/0019_auto_20180921_1540.py
|
Python
|
gpl-3.0
| 943
| 0.002123
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.13 on 2018-09-21 18:40
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.
|
deletion
import smart_selects.db_fields
class Mi
|
gration(migrations.Migration):
dependencies = [
('core', '0018_auto_20180921_1539'),
]
operations = [
migrations.AddField(
model_name='distrito',
name='estado',
field=models.ForeignKey(default=21, on_delete=django.db.models.deletion.CASCADE, to='core.Estado', verbose_name='Estado'),
),
migrations.AlterField(
model_name='distrito',
name='municipio',
field=smart_selects.db_fields.ChainedForeignKey(auto_choose=True, chained_field='estado', chained_model_field='estado', default=4891, on_delete=django.db.models.deletion.CASCADE, to='core.Municipio', verbose_name='Município'),
),
]
|
albertoferna/compmech
|
compmech/conecyl/clpt/setup_clpt_bc1.py
|
Python
|
bsd-3-clause
| 472
| 0.021186
|
from distutils.core import setup
from distutils.extension import Extension
from Cython.Distut
|
ils import build_ext
args_linear = ['/openmp', '/O2', '/favor:INTEL64']
args_nonlinear = ['/openmp', '/O2', '/favor:INTEL64', '/fp:fast']
ext_modules = [
Extension('clpt_commons_bc1', ['clpt_co
|
mmons_bc1.pyx'],
extra_compile_args=args_linear,
)]
setup(
name = 'clpt_bc1',
cmdclass = {'build_ext': build_ext},
ext_modules = ext_modules
)
|
kafana/ubik
|
lib/ubik/rug/cli.py
|
Python
|
gpl-3.0
| 2,912
| 0.002747
|
#!/usr/bin/python
#
# Copyright 2012 Lee Verberne <lee@blarg.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Command line front end to all these hats
"This is the command line interface to a collection of platform control scripts"
import logging
import optparse
import os
import sys
import ubik.config
import ubik.defaults
import ubik.hats
config = ubik.config.UbikConfig()
options = None
log = logging.getLogger('rug.cli')
def init_cli(args=None):
global config, options
p = optparse.OptionParser(usage='%prog [global_options] COMMAND [ARG ...]',
version='%prog ' + ubik.defaults.VERSION,
description=__doc__,
epilog='Use the help sub-command for more '
'details.')
p.add_option('--conf', '-c', metavar='FILE',
default=ubik.defaults.CONFIG_FILE,
help='Use config FILE instead of %default')
p.add_option('--debug', '-d', action='store_true',
help='Enable debug logging')
p.add_option('--workdir', metavar='DIR',
help="Use DIR as working directory, creating if necessary")
p.add_option('--verbose', '-v', action='store_true',
help='Enable verbose logging')
p.disable_interspersed_args()
(options, args) = p.parse_args(args=args)
if 'DEBUG' in os.environ:
options.debug = True
if options.debug:
log.setLevel(logging.DEBUG)
elif options.verbose:
log.setLevel(logging.INFO)
if 'RUG_GLOBAL_CONFIG' in os.environ:
global_cf = os.environ['RUG_GLOBAL_CONFIG']
else:
global_cf = ubik.defaults.GLOBAL_CONFIG_FILE
config.read(options.conf, global_cf)
if len(args) == 0:
args = ['help',]
return args
def main(args=None):
args = init_cli(args)
# Try to figure out what hat we're using here
hat = ubik.hats.hatter(args, config, options)
if hat:
try:
|
hat.run()
except ubik.hats.HatException as e:
print >>sys.stderr, "ERROR:", str(e)
if op
|
tions.debug:
raise e
return 1
else:
print >>sys.stderr, "ERROR: No such command"
return 2
if __name__ == '__main__':
sys.exit(main())
|
code-disaster/fips
|
mod/tools/cmake.py
|
Python
|
mit
| 4,302
| 0.009298
|
"""wrapper for cmake tool"""
import subprocess
from subprocess import PIPE
import platform
from mod import log,util
from mod.tools import ninja
name = 'cmake'
platforms = ['linux', 'osx', 'win']
optional = False
not_found = 'please install cmake 2.8 or newer'
#------------------------------------------------------------------------------
def check_exists(fips_dir, major=2, minor=8) :
"""test
|
if cmake is in the path and has the required version
:returns: True if cmake found and is the required version
"""
try:
out = subprocess.che
|
ck_output(['cmake', '--version']).decode("utf-8")
ver = out.split()[2].split('.')
if int(ver[0]) > major or (int(ver[0]) == major and int(ver[1]) >= minor):
return True
else :
log.info('{}NOTE{}: cmake must be at least version {}.{} (found: {}.{}.{})'.format(
log.RED, log.DEF, major, minor, ver[0],ver[1],ver[2]))
return False
except (OSError, subprocess.CalledProcessError):
return False
#------------------------------------------------------------------------------
def run_gen(cfg, fips_dir, project_dir, build_dir, toolchain_path, defines) :
"""run cmake tool to generate build files
:param cfg: a fips config object
:param project_dir: absolute path to project (must have root CMakeLists.txt file)
:param build_dir: absolute path to build directory (where cmake files are generated)
:param toolchain: toolchain path or None
:returns: True if cmake returned successful
"""
cmdLine = 'cmake'
if cfg['generator'] != 'Default' :
cmdLine += ' -G "{}"'.format(cfg['generator'])
if cfg['generator-platform'] :
cmdLine += ' -A "{}"'.format(cfg['generator-platform'])
if cfg['generator-toolset'] :
cmdLine += ' -T "{}"'.format(cfg['generator-toolset'])
cmdLine += ' -DCMAKE_BUILD_TYPE={}'.format(cfg['build_type'])
if toolchain_path is not None :
cmdLine += ' -DCMAKE_TOOLCHAIN_FILE={}'.format(toolchain_path)
cmdLine += ' -DFIPS_CONFIG={}'.format(cfg['name'])
if cfg['defines'] is not None :
for key in cfg['defines'] :
val = cfg['defines'][key]
if type(val) is bool :
cmdLine += ' -D{}={}'.format(key, 'ON' if val else 'OFF')
else :
cmdLine += ' -D{}="{}"'.format(key, val)
for key in defines :
cmdLine += ' -D{}={}'.format(key, defines[key])
cmdLine += ' -B' + build_dir
cmdLine += ' -H' + project_dir
print(cmdLine)
res = subprocess.call(cmdLine, cwd=build_dir, shell=True)
return res == 0
#------------------------------------------------------------------------------
def run_build(fips_dir, target, build_type, build_dir, num_jobs=1, args=None) :
"""run cmake in build mode
:param target: build target, can be None (builds all)
:param build_type: CMAKE_BUILD_TYPE string (e.g. Release, Debug)
:param build_dir: path to the build directory
:param num_jobs: number of parallel jobs (default: 1)
:param args: optional string array of cmdline args forwarded to build tool
:returns: True if cmake returns successful
"""
args_str = ''
if args is not None:
args_str = ' '.join(args)
cmdLine = 'cmake --build . --config {}'.format(build_type)
if target :
cmdLine += ' --target {}'.format(target)
if platform.system() == 'Windows' :
cmdLine += ' -- /nologo /verbosity:minimal /maxcpucount:{} {}'.format(num_jobs, args_str)
else :
cmdLine += ' -- -j{} {}'.format(num_jobs, args_str)
print(cmdLine)
res = subprocess.call(cmdLine, cwd=build_dir, shell=True)
return res == 0
#------------------------------------------------------------------------------
def run_clean(fips_dir, build_dir) :
"""run cmake in build mode
:param build_dir: path to the build directory
:returns: True if cmake returns successful
"""
try :
res = subprocess.call('cmake --build . --target clean', cwd=build_dir, shell=True)
return res == 0
except (OSError, subprocess.CalledProcessError) :
return False
|
googleapis/protoc-java-resource-names-plugin
|
plugin/templates/resource_name.py
|
Python
|
bsd-3-clause
| 13,100
| 0
|
# Copyright 2016 Google LLC
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
from collections import OrderedDict
from plugin.utils import path_template
from plugin.utils import casing_utils
from plugin.utils.symbol_table import SymbolTable
RESOURCE_NAMES_GLOBAL_PACKAGE_JAVA = 'com.google.api.resourcenames'
class ResourceNameBase(object):
def __init__(self, class_name, package):
self.class_name = class_name
self.package = package
self.resource_name_global_package_name = \
RESOURCE_NAMES_GLOBAL_PACKAGE_JAVA
self.full_class_name = self.package + '.' + self.class_name
self.var_name = casing_utils.get_lower(self.class_name)
def filename(self):
class_dir = self.package.replace('.', os.path.sep)
return os.path.join(class_dir, self.class_name + '.java')
def template_name(self):
raise NotImplementedError(
'template_name must be implemented by a child')
def template_path(self):
return os.path.join(os.path.dirname(__file__), self.template_name())
class ResourceName(ResourceNameBase):
def __init__(self, collection_config, java_package, oneof):
super(ResourceName, self).__init__(
casing_utils.get_resource_type_class_name(
collection_config.java_entity_name), java_package)
symbol_table = SymbolTable()
id_segments = get_id_segments(collection_config.name_pattern)
self.format_name_lower = casing_utils.get_resource_type_var_name(
collection_config.java_entity_name)
self.type_name_upper = casing_utils.get_resource_type_from_class_name(
self.class_name)
self.builder_parent_class = ""
if oneof:
self.parent_interface = \
casing_utils.get_parent_resource_name_class_name(
oneof.oneof_name)
self.extension_keyword = 'extends'
# TODO: Remove builder_parent_class after we delete the deprecated
# per-pattern resource name subclasses
if oneof.pattern_strings:
self.builder_parent_class = self.parent_interface
else:
self.parent_interface = 'ResourceName'
self.extension_keyword = 'implements'
self.parameter_list = [{
'parameter': symbol_table.getNewSymbol(
casing_utils.lower_underscore_to_lower_camel(lit)),
'parameter_name': lit,
'not_first': True,
'not_last': True,
} for lit in id_segments]
self.parameter_list[0]['not_first'] = False
self.parameter_list[-1]['not_last'] = False
self.format_fields = [{
'upper': casing_utils.lower_underscore_to_upper_camel(
f['parameter_name']),
'lower': f['parameter'],
'parameter_name_in_map':
casing_utils.lower_underscore_to_lower_camel(
f['parameter_name']),
} for f in self.paramet
|
er_list]
self.format_string = collection_config.name_pattern
def template_name(self):
return "resource_name.mustache"
class ParentResourceName(ResourceNameBase):
def __init__(self, oneof, java_package, pattern_strings):
super(ParentResourceName, self).__init__(
|
casing_utils.get_parent_resource_name_class_name(
oneof.oneof_name),
java_package)
symbol_table = SymbolTable()
pattern_to_id_segments = OrderedDict([
(p, get_id_segments(p))
for p in pattern_strings if not is_fixed_pattern(p)])
self.has_fixed_patterns = \
len(pattern_to_id_segments) < len(pattern_strings)
self.has_formattable_patterns = len(pattern_to_id_segments) > 0
segment_to_segment_symbols = OrderedDict()
# Keep segment IDs to symbols in a dictionary, so that we
# do not re-create a new symbol every time.
for segments in pattern_to_id_segments.values():
for seg in segments:
if seg in segment_to_segment_symbols:
continue
symbol = symbol_table.getNewSymbol(
casing_utils.lower_underscore_to_lower_camel(seg))
segment_to_segment_symbols[seg] = symbol
self.format_fields = [
get_format_field(segment, segment_symbol)
for segment, segment_symbol in segment_to_segment_symbols.items()
]
if self.format_fields:
self.format_fields[0]['not_first'] = False
self.format_fields[-1]['not_last'] = False
self.patterns = [
ResourceNamePattern(pattern,
get_format_fields_for_pattern(
pattern,
pattern_to_id_segments,
segment_to_segment_symbols))
for pattern in pattern_strings]
self.has_no_single_pattern_subclasses = \
not oneof.has_deprecated_collections
if len(self.patterns) > 0:
self.first_pattern = self.patterns[0]
self.patterns[0].set_first()
self.patterns[0].set_short_builder_name()
self.patterns[-1].set_last()
def template_name(self):
return "multi_pattern_resource_name.mustache" if self.patterns \
else "deprecated_parent_resource_name.mustache"
class ResourceNamePattern:
def __init__(self, pattern_string,
format_fields):
self.is_fixed = len(format_fields) == 0
self.is_formattable = not self.is_fixed
self.pattern_string = pattern_string
pattern_id = get_pattern_name(pattern_string)
pattern_naming_styles = get_format_field(pattern_id, "")
self.lower_camel = pattern_naming_styles['lower_camel']
self.upper_camel = pattern_naming_styles['upper_camel']
self.upper_underscore = pattern_naming_styles['upper_underscore']
self.format_fields = format_fields
self.builder_name = self.upper_camel + 'Builder'
if format_fields:
self.format_fields[0]['not_first'] = False
self.format_fields[-1]['not_last'] = False
for format_field in self.format_fields:
format_field['pattern_builder_name'] = self.builder_name
self.not_first = True
self.is_first = False
self.not_last = True
def set_first(self):
self.not_first = False
self.is_first = True
def set_last(self):
self.not_last = False
def set_short_builder_name(self):
self.builder_name = 'Builder'
for format_f
|
ric2b/Vivaldi-browser
|
update_notifier/thirdparty/wxWidgets/misc/scripts/png2c.py
|
Python
|
bsd-3-clause
| 3,184
| 0.005653
|
#!/usr/bin/python
# This script is a slightly modified version of the original found at
#
# https://wiki.wxwidgets.org/Embedding_PNG_Images-Bin2c_In_Python
#
# without any copyright attribution so it is assumed it can be used under
# wxWindows licence as the rest of the wiki material.
import sys
import os
import os.path
import re
import array
USAGE = """Usage: png2c [-s] [file...]
Output input PNG files as C arrays to standard output. Used to embed PNG images
in C code (like XPM but with full alpha channel support).
-s embed the image siz
|
e in the image names in generated code."""
if len(sys.argv) < 2:
print(USAGE)
sys.exit(1)
r = re.compile("^([a-zA-Z._][a-zA-Z._0-9]*)[.][pP][nN][gG]$")
with_size = 0
size_suffix = ''
for path in sys.argv[1:]:
if path == '-s':
with_size = 1
continue
filename = os.path.basename(path).replace('-','_')
m = r.match(filename)
# Allow only filenames that make sense as C variable names
if not(m):
|
print("Skipped file (unsuitable filename): " + filename)
continue
# Read PNG file as character array
bytes = array.array('B', open(path, "rb").read())
count = len(bytes)
# Check that it's actually a PNG to avoid problems when loading it
# later.
#
# Each PNG file starts with a 8 byte signature that should be followed
# by IHDR chunk which is always 13 bytes in length so the first 16
# bytes are fixed (or at least we expect them to be).
if bytes[0:16].tostring() != b'\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR':
print('"%s" doesn\'t seem to be a valid PNG file.' % filename)
continue
# Try to naively get its size if necessary
if with_size:
def getInt(start):
""" Convert 4 bytes in network byte order to an integer. """
return 16777216*bytes[start] + \
65536*bytes[start+1] + \
256*bytes[start+2] + \
bytes[start+3];
size_suffix = "_%dx%d" % (getInt(16), getInt(20))
# Create the C header
text = "/* %s - %d bytes */\n" \
"static const unsigned char %s%s_png[] = {\n" % (
filename, count, m.group(1), size_suffix)
# Iterate the characters, we want
# lines like:
# 0x01, 0x02, .... (8 values per line maximum)
i = 0
count = len(bytes)
for byte in bytes:
# Every new line starts with two whitespaces
if (i % 8) == 0:
text += " "
# Then the hex data (up to 8 values per line)
text += "0x%02x" % (byte)
# Separate all but the last values
if (i % 8) == 7:
text += ',\n'
elif (i + 1) < count:
text += ", "
i += 1
# Now conclude the C source
text += "};\n\n"
print(text)
|
wiki-ai/revscoring
|
tests/languages/test_hungarian.py
|
Python
|
mit
| 3,830
| 0
|
import pickle
from revscoring.datasources import revision_oriented
from revscoring.dependencies import solve
from revscoring.languages import hungarian
from .util import compare_extraction
BAD = [
"anyad",
"anyád",
"anyádat",
"anyátok",
"anyátokat",
"apád",
"asd",
"balfasz",
"baszni",
"baszott",
"bazd",
"bazdmeg",
"bazmeg",
"béna",
"birkanépet",
"birkanépünk",
"büdös",
"buktája",
"buzi",
"buzik",
"csicska",
"csá",
"fasszopó",
"fasz",
"fasza",
"faszfej",
"faszkalap",
"faszok",
"faszom",
"faszomat",
"faszság",
"faszt",
"faszát",
"fing",
"fos",
"fuck",
"geci",
"gecik",
"gecis",
"gecit",
"hulye",
"hülye",
"hülyék",
"kabbe",
"kaka",
"kaki",
"kibaszott",
"kocsog",
"kuki",
"kurva",
"kurvák",
"kurvára",
"kurvát",
"köcsög",
"köcsögök",
"lófasz",
"megbaszta",
"mocskos",
"málejku",
"mizu",
"naon",
"picsa",
"picsája",
"pina",
"punci",
"putri",
"pöcs",
"retkes",
"ribanc",
"rohadt",
"sissitek",
"szar",
"szarok",
"szaros",
"szart",
"szopd",
"sále",
"elmenyekvolgye",
"immoviva",
"infosarok",
]
INFORMAL = [
"baromság",
"dencey",
"haha",
"hahaha",
"hehe",
"hello",
"hihi",
"hülyeség",
"képviselőink",
"képviselőinket",
"képünkbe",
"lol",
"megválasszuk",
"mészárosaim",
"országunk",
"special",
"soknevű",
"szavazatunkat",
"szeretem",
"szeretlek",
"szerintem",
"szia",
"sziasztok",
"tex",
"xdd",
"xddd",
"tudjátok",
"tönkretesszük",
"ugye",
"unokáink",
"user",
"utálom",
"vagyok",
"vagytok",
]
OTHER = [
"""A Károlyi-kert közpark Budapest V. kerületében. A Belváros legrégibb
kertje, valamint a kevés magyarországi palotakert között a legjobban
dokumentált. A kertet északról a Ferenczy István utca, keletről a Magyar
utca, délről a Henszlmann Imre utca, nyugatról a Károlyi-palota
határolja. 1932 óta funkcionál közparkként, területe a 17. század
vége óta változatlan: 7625 m², vagyis 0,76 hektár."""
]
def test_badwords():
compare_extraction(hungarian.badwords.revision.datasources.matches, BAD,
OTHER)
assert hungarian.badwords == pickle.loads(pickle.dumps(hungarian.badwords))
def test_informals():
compare_extraction(hungarian.informals.revision.datasources.matches,
INFORMAL, OTHER)
assert hungarian.informals == pickle.loads(
pickle.dumps(hungarian.informals))
def test_dictionary():
cache = {
revision_oriented.revision.text:
'nyugatról között worngly.'
}
assert (solve(
hungarian.dictionary.revision.datasources.dict_words,
cache=cache) ==
["nyugatról", "között"])
assert (solve(hungarian.dictionary.revision.datasources.non_dict_words,
cache=cache) ==
["worngly"])
assert (hungarian.dictionary ==
pickle.loads(pickle.dumps(hungarian.dict
|
ionary)))
def test_stopwords():
cache = {
revision_oriented.revision.text:
'játszótérnek még helyett park jól'
}
assert (solve(hungarian.stopwords.revision.datasources.stopwords,
cache=cache) == ['még', 'jól'])
assert (solve(hungarian.stopwords.revision.datasources.non_stopwords,
cache=cache) ==
['játszótérnek', 'helyett', 'park'])
assert hungari
|
an.stopwords == pickle.loads(
pickle.dumps(hungarian.stopwords))
|
indera/crossref-client
|
crossref-client.py
|
Python
|
mit
| 1,731
| 0
|
#!/usr/bin/env python
"""
crossref-client.py - Implement a tool for interaction with the api.crossref.org
Usage:
crossref-client.py -h | --help
crossref-client.py [-c]
Options:
-h --help
|
Show the help message
-c --count-publishers Display the number of publishers available
[default:False]
"""
__author__ = "University of Florida CTS-IT Team"
import json
import requests
from docopt import docopt
from publisher import Publisher
API_URL_MEMBERS = 'http://api.crossref.org/member
|
s'
def main():
args = docopt(__doc__, help=True)
pub_count = count_publishers(API_URL_MEMBERS)
if args['--count-publishers']:
print("Total publishers @ {}: {}".format(API_URL_MEMBERS, pub_count))
else:
print("List of {} publishers: ".format(pub_count))
groups = pub_count/1000 + 1
for offset in range(0, groups):
publishers = get_publishers(API_URL_MEMBERS, offset*1000)
for idx, pub in enumerate(publishers):
print("{}: {}".format(idx, pub.to_string()))
def count_publishers(url):
"""
@return the total number of publishers
"""
params = {'rows': 0}
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
return data['message']['total-results']
def get_publishers(url, offset):
"""
@return a list of publishers
"""
pubs = []
params = {'rows': 1000, 'offset': offset}
resp = requests.get(url=url, params=params)
data = json.loads(resp.text)
for pub_data in data['message']['items']:
pub = Publisher(pub_data)
pubs.append(pub)
return pubs
if __name__ == "__main__":
main()
|
RealOrangeOne/attack-on-blocks
|
target.py
|
Python
|
mit
| 3,542
| 0.009881
|
import pygame, logging
from random import randint
from game import generate_random_level
class Target(pygame.sprite.Sprite):
def __init__(self, x, y, textures, color=(30,0,150), width=23, height=23):
super().__init__()
self.width = width
self.height = height
self.textures = textures
self.default_texture = textures.get_target_texture()
self.image = pygame.transform.scale(self.default_texture[0], (self.width, self.height))
self.rect = self.image.get_rect()
self.speed = 5
self.rect.x, self.rect.y = (x+(self.width/2)),(y+(self.width/2)) # centres co-ordinates
self.type = "NORMAL"
self.lives = 1
def move(self):
self.rect.x += self.speed
def drop(self):
self.rect.y += 13
self.speed *= -1
def set_position(self, x, y, center=False):
if center:
self.rect.x, self.rect.y = (x+(self.width/2)),(y+(self.width/2))
else:
self.rect.x, self.rect.y = x, y
def generate_targets(player, window_size, Levels):
sprite_list = []
group = pygame.sprite.Group()
if player.level > len(Levels)-1:
level = generate_random_level()
else: level = Levels[player.level]
logging.debug("Generating Level: " + str(level))
for i in range(level.rows):
i *= level.padding + 8
for j in range(75, window_size[0] - 75, level.padding + 10):
temp = Target(x=j,y=i, textures=player.options["Textures"])
sprite_list.append(temp)
del temp
if len(sprite_list) < level.firebacks:
firebacks = len(sprite_list)
else: firebacks = level.firebacks
for i in range(firebacks):
loop = 0
changed = False
while not changed:
if loop == firebacks: changed = True
index = randint(0, len(sprite_list)-1) if (len(sprite_list) - 1 != 0) else 0
if sprite_list[index].type != "SHOOTER":
sprite_list[index].type = "SHOOTER"
sprite_list[index].lives = 2
sprite_list[index].image = pygame.transform.scale(player.options["Textures"].get_texture("SHOOTER"), (sprite_list[index].width, sprite_list[index].height))
x,y = sprite_list[index].rect.x, sprite_list[index].rect.y
sprite_list[index].rect = sprite_list[index].image.get_rect()
sprite_list[index].set_position(x,y, center=False) #Already Centered!
changed = True
loop += 1
if len(sprite_list) < level.powerups:
powerups = len(sprite_list)
else: powerups = level.powerups
for i in range(powerups):
changed = False
while not changed:
index = randint(0, len(sprite_list)-1) if (len(sprite_list) - 1 != 0) else 0
if sprite_list[index].type != "POWERUP" and sprite_list[index].type != "SHOOTER" :
sprite_list[index].type = "POWERUP"
sprite_list[index].image = pygame.transform.scale(player.options["Textures"].get_texture("POWERUP"), (sprite_list[index].width, sprite_list[index].height))
x,y = sprite_list[index].rect.x, sprite_list[index].rect.y
sprite_list[index].rect = sprite_list[index].image.get_
|
rect()
|
sprite_list[index].set_position(x,y, center=False) #Already Centered!
changed = True
for sprite in sprite_list: #Because sprite groups dont support indexing!
group.add(sprite)
return group
|
the-zebulan/CodeWars
|
katas/kyu_8/remove_exclamation_marks.py
|
Python
|
mit
| 63
| 0
|
def remove_
|
exclamation_marks(s):
return s.replace
|
('!', '')
|
tinloaf/home-assistant
|
homeassistant/components/input_boolean.py
|
Python
|
apache-2.0
| 3,609
| 0
|
"""
Component to keep track of user controlled booleans for within automation.
For more details about this component, please refer to the documentation
at https://home-assistant.io/components/input_boolean/
"""
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_ICON, CONF_NAME, SERVICE_TURN_OFF, SERVICE_TURN_ON,
SERVICE_TOGGLE, STATE_ON)
from homeassistant.loader import bind_hass
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.restore_state import RestoreEntity
DOMAIN = 'input_boolean'
ENTITY_ID_FORMAT = DOMAIN + '.{}'
_LOGGER = logging.getLogger(__name__)
CONF_INITIAL = 'initial'
SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
cv.slug: vol.Any({
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_INITIAL): cv.boolean,
vol.Optional(CONF_ICON): cv.icon,
}, None)
})
}, extra=vol.ALLOW_EXTRA)
@bind_hass
def is_on(hass, entity_id):
"""Test if input_boolean is True."""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Set up an input boolean."""
component = EntityComponent(_LOGGER, DOMAIN, hass)
entities = []
for object_id, cfg in config[DOMAIN].items():
if not cfg:
cfg = {}
name = cfg.get(CONF_NAME)
initial = cfg.get(CONF_INITIAL)
icon = cfg.get(CONF_ICON)
entities.append(InputBoolean(object_id, name, initial, icon))
if not entities:
return False
component.async_register_entity_service(
SERVICE_TURN_ON, SERVICE_SCHEMA,
'async_turn_on'
)
component.async_register_entity_service(
SERVICE_TURN_OFF, SERVICE_SCHEMA,
'async_turn_off'
)
component.async_register_entity_service(
SERVICE_TOGGLE, SERVICE_SCHEMA,
'async_toggle'
)
await component.async_add_entities(entities)
return True
class InputBoolean(ToggleEntity, RestoreEntity):
"""Representation of a boolean input."""
def __init__(self, object_id, name, initial, icon):
"""Initialize a boolean input."""
self.entity_id = ENTITY_ID_FORMAT.format(object_id)
self._name = name
self._state = initial
self._icon = icon
@property
def should_poll(self):
"""If entity should be polled."""
return False
@property
def name(
|
self):
"""Return name of the boolean input."""
return self._name
@property
|
def icon(self):
"""Return the icon to be used for this entity."""
return self._icon
@property
def is_on(self):
"""Return true if entity is on."""
return self._state
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
# If not None, we got an initial value.
await super().async_added_to_hass()
if self._state is not None:
return
state = await self.async_get_last_state()
self._state = state and state.state == STATE_ON
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._state = True
await self.async_update_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._state = False
await self.async_update_ha_state()
|
wmalinowski/test-example
|
testexample/test/test_example_app.py
|
Python
|
unlicense
| 442
| 0.002262
|
import tornado.testing
from testexample import ExampleApp
class TestExampleApp(tornado.testing.AsyncHT
|
TPTestCase,
tornado.testing.LogTrapTestCase):
def get_app(self):
return ExampleApp()
def test_home(self):
response = self.fetch('/')
self.assertEqual(response.code, 200)
def test_ticker(self):
|
response = self.fetch('/ticker')
self.assertEqual(response.code, 200)
|
cangencer/hazelcast-python-client
|
hazelcast/protocol/codec/map_execute_on_all_keys_codec.py
|
Python
|
apache-2.0
| 1,451
| 0.002068
|
from hazelcast.ser
|
ialization.bits import *
from hazelcast.protocol.client_message import ClientMessage
from haze
|
lcast.protocol.custom_codec import *
from hazelcast.util import ImmutableLazyDataList
from hazelcast.protocol.codec.map_message_type import *
REQUEST_TYPE = MAP_EXECUTEONALLKEYS
RESPONSE_TYPE = 117
RETRYABLE = False
def calculate_size(name, entry_processor):
""" Calculates the request payload size"""
data_size = 0
data_size += calculate_size_str(name)
data_size += calculate_size_data(entry_processor)
return data_size
def encode_request(name, entry_processor):
""" Encode request into client_message"""
client_message = ClientMessage(payload_size=calculate_size(name, entry_processor))
client_message.set_message_type(REQUEST_TYPE)
client_message.set_retryable(RETRYABLE)
client_message.append_str(name)
client_message.append_data(entry_processor)
client_message.update_frame_length()
return client_message
def decode_response(client_message, to_object=None):
""" Decode response from client message"""
parameters = dict(response=None)
response_size = client_message.read_int()
response = []
for response_index in xrange(0, response_size):
response_item = (client_message.read_data(), client_message.read_data())
response.append(response_item)
parameters['response'] = ImmutableLazyDataList(response, to_object)
return parameters
|
justanr/flask-allows
|
src/flask_allows/additional.py
|
Python
|
mit
| 5,469
| 0.000549
|
from contextlib import contextmanager
from functools import wraps
from werkzeug.local import LocalProxy, LocalStack
_additional_ctx_stack = LocalStack()
__all__ = ("current_additions", "Additional", "AdditionalManager")
@LocalProxy
def current_additions():
"""
Proxy to the currently added requirements
"""
rv = _additional_ctx_stack.top
if rv is None:
return None
return rv[1]
def _isinstance(f):
@wraps(f)
def check(self, other):
if not isinstance(other, Additional):
return NotImplemented
return f(self, other)
return check
class Additional(object):
"""
Container object that allows to run extra requirements on checks. These
additional requirements will be run at most once per check and will
occur in no guarenteed order.
Requirements can be added by passing them into the constructor or
by calling the ``add`` method. They can be removed from this object
by calling the ``remove`` method. To check if a requirement has been added
to the current conext, you may call ``is_added`` or use ``in``::
some_req in additional
additional.is_added(some)req)
Additional objects can be iterated and length checked::
additional = Additional(some_req)
assert len(additional) == 1
assert list(additional) == [some_req]
Additional objects may be combined and compared to each other with the following
operators:
``+`` creates a new additional object by combining two others, the new
additional supplies all requirements that both parents did.
``+=`` similar to ``+`` except it is an inplace update.
``-`` creates a new additional instance by removing any requirements from
the first instance that are contained in the second instance.
``-=`` similar to ``-`` except it is an inplace update.
``==`` compares two additional instances and returns true if both have
the same added requirements.
``!=`` similar to ``!=`` except returns true if both have different
requirements contained in them.
"""
def __init__(self, *requirements):
self._re
|
quirements = set(requirements)
def add(self, requirement, *requirements):
self._requirements.update((requiremen
|
t,) + requirements)
def remove(self, requirement, *requirements):
self._requirements.difference_update((requirement,) + requirements)
@_isinstance
def __add__(self, other):
requirements = self._requirements | other._requirements
return Additional(*requirements)
@_isinstance
def __iadd__(self, other):
if len(other._requirements) > 0:
self._requirements.add(*other._requirements)
return self
@_isinstance
def __sub__(self, other):
requirements = self._requirements - other._requirements
return Additional(*requirements)
@_isinstance
def __isub__(self, other):
if len(other._requirements) > 0:
self.remove(*other._requirements)
return self
@_isinstance
def __eq__(self, other):
return self._requirements == other._requirements
@_isinstance
def __ne__(self, other):
return not self == other
def __iter__(self):
return iter(self._requirements)
def is_added(self, requirement):
return requirement in self._requirements
def __contains__(self, requirement):
return self.is_added(requirement)
def __len__(self):
return len(self._requirements)
def __bool__(self):
return len(self) != 0
__nonzero__ = __bool__
def __repr__(self):
return "Additional({!r})".format(self._requirements)
class AdditionalManager(object):
"""
Used to manage the process of adding and removing additional requirements
to be run. This class shouldn't be used directly, instead use
``allows.additional`` to access these controls.
"""
def push(self, additional, use_parent=False):
"""
Binds an additional to the current context, optionally use the
current additionals in conjunction with this additional
If ``use_parent`` is true, a new additional is created from the
parent and child additionals rather than manipulating either
directly.
"""
current = self.current
if use_parent and current:
additional = current + additional
_additional_ctx_stack.push((self, additional))
def pop(self):
"""
Pops the latest additional context.
If the additional context was pushed by a different additional manager,
a ``RuntimeError`` is raised.
"""
rv = _additional_ctx_stack.pop()
if rv is None or rv[0] is not self:
raise RuntimeError(
"popped wrong additional context ({} instead of {})".format(rv, self)
)
@property
def current(self):
"""
Returns the current additional context if set otherwise None
"""
try:
return _additional_ctx_stack.top[1]
except TypeError:
return None
@contextmanager
def additional(self, additional, use_parent=False):
"""
Allows temporarily pushing an additional context, yields the new context
into the following block.
"""
self.push(additional, use_parent)
yield self.current
self.pop()
|
openstack/networking-arista
|
networking_arista/tests/unit/ml2/security_groups/sg_test_base.py
|
Python
|
apache-2.0
| 3,145
| 0
|
# Copyright (c) 2018 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from neutron_lib.plugins import directory
from networking_arista.tests.unit.ml2 import ml2_test_base
from networking_arista.tests.unit import utils
class SecurityGroupTestBase(ml2_test_base.MechTestBase):
def get_additional_service_plugins(self):
p = super(SecurityGroupTestBase, self).get_additional_service_plugins()
p.update({'arista_security_group_plugin': 'arista_security_group'})
return p
def setUp(self):
super(SecurityGroupTestBase, self).setUp()
self.arista_sec_gp_plugin = directory.get_plugin(
'arista_security_group')
self.switch1 = utils.MockSwitch()
self.switch2 = utils.MockSwitch()
self.switches = {'TOR1': self.switch1,
'TOR2': self.switch2}
self.arista_sec_gp_plugin._switches = self.switches
self.arista_sec_gp_plugin._port_group_info['TOR1'] = {
'Ethernet1': {'interfaceMembership': ''},
'Ethernet2': {'interfaceMembership': ''}}
self.arista_sec_gp_plugin._port_group_info['TOR2'] = {
'Ethernet1': {'interfaceMembership': ''},
'Ethernet2': {'interfaceMembership': ''}}
def create_port_channel(self, switch, interface, pc_name):
intf_info = self.arista_sec_gp_plugin._port_group_info[switch]
intf_info[interface]['interfaceMembership'] = 'Member of %s' % pc_name
def create_sg_rule(self, direction, proto, cidr, range_min=None,
range_max=None, ethertype='IPv4', default=True,
sg_id=None):
if sg_id is None:
sec_group = {'security_group':
{'name': 'sg1',
'tenant_id': 't1',
'description': ''}}
grp = self.plugin.create_security_group(self.context, sec_group,
default_sg=default)
sg_id = grp['id']
for switch in self.switches.values():
switch.clear_received_commands()
rule = {'security_group_rule':
|
{'direction': direction,
'ethertype': ethertype,
'protocol': proto,
'remote_ip_prefix': cidr,
'port_range_min': range_min,
'port_range_max': range_max,
'security_group_id': sg_id,
'remote_group_id': None,
'ten
|
ant_id': 't1'}}
rule = self.plugin.create_security_group_rule(self.context, rule)
return sg_id, rule
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.